loongarch64: Add ic/zamin optimization functions.

This commit is contained in:
Hao Chen 2023-12-28 20:07:58 +08:00 committed by Shiyou Yin
parent d97272cb35
commit fbd612f8c4
5 changed files with 988 additions and 0 deletions

View File

@ -133,6 +133,7 @@ static inline int WhereAmI(void){
#define XVFSUB xvfsub.d #define XVFSUB xvfsub.d
#define XVFADD xvfadd.d #define XVFADD xvfadd.d
#define XVFMUL xvfmul.d
#define XVFMADD xvfmadd.d #define XVFMADD xvfmadd.d
#define XVFMIN xvfmin.d #define XVFMIN xvfmin.d
#define XVFMINA xvfmina.d #define XVFMINA xvfmina.d
@ -146,6 +147,7 @@ static inline int WhereAmI(void){
#define VFSUB vfsub.d #define VFSUB vfsub.d
#define VFADD vfadd.d #define VFADD vfadd.d
#define VFMUL vfmul.d
#define VFMADD vfmadd.d #define VFMADD vfmadd.d
#define VFMIN vfmin.d #define VFMIN vfmin.d
#define VFMINA vfmina.d #define VFMINA vfmina.d
@ -185,6 +187,7 @@ static inline int WhereAmI(void){
#define XVFSUB xvfsub.s #define XVFSUB xvfsub.s
#define XVFADD xvfadd.s #define XVFADD xvfadd.s
#define XVFMUL xvfmul.s
#define XVFMADD xvfmadd.s #define XVFMADD xvfmadd.s
#define XVFMIN xvfmin.s #define XVFMIN xvfmin.s
#define XVFMINA xvfmina.s #define XVFMINA xvfmina.s
@ -198,6 +201,7 @@ static inline int WhereAmI(void){
#define VFSUB vfsub.s #define VFSUB vfsub.s
#define VFADD vfadd.s #define VFADD vfadd.s
#define VFMUL vfmul.s
#define VFMADD vfmadd.s #define VFMADD vfmadd.s
#define VFMIN vfmin.s #define VFMIN vfmin.s
#define VFMINA vfmina.s #define VFMINA vfmina.s

View File

@ -38,6 +38,8 @@ IZAMAXKERNEL = icamax_lsx.S
ISAMINKERNEL = iamin_lsx.S ISAMINKERNEL = iamin_lsx.S
IDAMINKERNEL = iamin_lsx.S IDAMINKERNEL = iamin_lsx.S
ICAMINKERNEL = icamin_lsx.S
IZAMINKERNEL = icamin_lsx.S
SCOPYKERNEL = copy_lsx.S SCOPYKERNEL = copy_lsx.S
DCOPYKERNEL = copy_lsx.S DCOPYKERNEL = copy_lsx.S

View File

@ -38,6 +38,8 @@ IZAMAXKERNEL = icamax_lasx.S
ISAMINKERNEL = iamin_lasx.S ISAMINKERNEL = iamin_lasx.S
IDAMINKERNEL = iamin_lasx.S IDAMINKERNEL = iamin_lasx.S
ICAMINKERNEL = icamin_lasx.S
IZAMINKERNEL = icamin_lasx.S
SCOPYKERNEL = copy_lasx.S SCOPYKERNEL = copy_lasx.S
DCOPYKERNEL = copy_lasx.S DCOPYKERNEL = copy_lasx.S

View File

@ -0,0 +1,555 @@
/***************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r12
#define t1 $r13
#define t2 $r15
#define t3 $r18
#define t4 $r16
#define i0 $r17
#define i1 $r14
#define TEMP $r19
#define a0 $f12
#define a1 $f13
#define s1 $f15
#define x1 $xr9
#define x2 $xr10
#define x3 $xr11
#define x4 $xr12
#define VX0 $xr13
#define VX1 $xr14
#define VM0 $xr15
#define VM1 $xr16
#define VINC4 $xr17
#define VINC8 $xr18
#define VI0 $xr20
#define VI1 $xr21
#define VI2 $xr22
#define VI3 $xr8
#define VI4 $xr19
#define VT0 $xr23
PROLOGUE
li.d i0, 0
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d TEMP, 1
slli.d TEMP, TEMP, ZBASE_SHIFT
slli.d INCX, INCX, ZBASE_SHIFT
LD a0, X, 0 * SIZE
LD a1, X, 1 * SIZE
FABS a0, a0
FABS a1, a1
ADD s1, a1, a0
#ifdef DOUBLE
xvreplve0.d VM0, VM0
xvxor.v VI3, VI3, VI3 // 0
li.d I, -1
xvreplgr2vr.d VI4, I
xvffint.d.l VI4, VI4 // -1
bne INCX, TEMP, .L20
addi.d i0, i0, 1
srai.d I, N, 2
bge $r0, I, .L21
slli.d i0, i0, 2 //4
xvreplgr2vr.d VINC4, i0
addi.d i0, i0, -7
xvinsgr2vr.d VI1, i0, 0 //initialize the index value for vectorization
addi.d i0, i0, 2
xvinsgr2vr.d VI1, i0, 1
addi.d i0, i0, -1
xvinsgr2vr.d VI1, i0, 2
addi.d i0, i0, 2
xvinsgr2vr.d VI1, i0, 3
addi.d i0, i0, 1
xvinsgr2vr.d VI0, i0, 0 //1
addi.d i0, i0, 2
xvinsgr2vr.d VI0, i0, 1 //3
addi.d i0, i0, -1
xvinsgr2vr.d VI0, i0, 2 //2
addi.d i0, i0, 2
xvinsgr2vr.d VI0, i0, 3 //4
#else
xvreplve0.w VM0, VM0
xvxor.v VI3, VI3, VI3 // 0
li.w I, -1
xvreplgr2vr.w VI4, I
xvffint.s.w VI4, VI4 // -1
bne INCX, TEMP, .L20
addi.w i0, i0, 1
srai.d I, N, 3
bge $r0, I, .L21
slli.w i0, i0, 3 //8
xvreplgr2vr.w VINC8, i0
addi.w i0, i0, -15
xvinsgr2vr.w VI1, i0, 0 //initialize the index value for vectorization
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 1
addi.w i0, i0, 3
xvinsgr2vr.w VI1, i0, 2
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 3
addi.w i0, i0, -3
xvinsgr2vr.w VI1, i0, 4
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 5
addi.w i0, i0, 3
xvinsgr2vr.w VI1, i0, 6
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 7
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 0 //1
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 1 //2
addi.w i0, i0, 3
xvinsgr2vr.w VI0, i0, 2 //5
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 3 //6
addi.w i0, i0, -3
xvinsgr2vr.w VI0, i0, 4 //3
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 5 //4
addi.w i0, i0, 3
xvinsgr2vr.w VI0, i0, 6 //7
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 7 //8
#endif
.align 3
.L10:
xvld VX0, X, 0 * SIZE
#ifdef DOUBLE
xvadd.d VI1, VI1, VINC4
xvld VX1, X, 4 * SIZE
addi.d I, I, -1
xvpickev.d x1, VX1, VX0
xvpickod.d x2, VX1, VX0
xvfmul.d x3, VI4, x1
xvfmul.d x4, VI4, x2
xvfcmp.clt.d VT0, x1, VI3
xvfcmp.clt.d VINC8, x2, VI3
xvbitsel.v x1, x1, x3, VT0
xvbitsel.v x2, x2, x4, VINC8
#else
xvadd.w VI1, VI1, VINC8
xvld VX1, X, 8 * SIZE
addi.d I, I, -1
xvpickev.w x1, VX1, VX0
xvpickod.w x2, VX1, VX0
xvfmul.s x3, VI4, x1
xvfmul.s x4, VI4, x2
xvfcmp.clt.s VT0, x1, VI3
xvfcmp.clt.s VINC4, x2, VI3
xvbitsel.v x1, x1, x3, VT0
xvbitsel.v x2, x2, x4, VINC4
#endif
XVFADD x1, x1, x2
XVFMIN x3, VM0, x1
XVCMPEQ VT0, x3, VM0
addi.d X, X, 8 * SIZE
xvbitsel.v VM0, x3, VM0, VT0
xvbitsel.v VI0, VI1, VI0, VT0
blt $r0, I, .L10
.align 3
.L15:
#ifdef DOUBLE
xvpickve.d VI1, VI0, 0
xvpickve.d VI2, VI0, 1
xvpickve.d VI3, VI0, 2
xvpickve.d VI4, VI0, 3
xvpickve.d x1, VM0, 0
xvpickve.d x2, VM0, 1
xvpickve.d x3, VM0, 2
xvpickve.d x4, VM0, 3
xvfmin.d VM1, x1, x2
xvfcmp.ceq.d VT0, VM1, x1
xvbitsel.v VINC4, VI2, VI1, VT0
xvfmin.d VM0, x3, x4
xvfcmp.ceq.d VT0, x3, VM0
xvbitsel.v VINC8, VI4, VI3, VT0
xvfmin.d VM0, VM0, VM1
xvfcmp.ceq.d VT0, VM0, VM1
xvbitsel.v VI0, VINC8, VINC4, VT0
#else
xvxor.v VX0, VX0, VX0
xvor.v VX0, VI0, VX0
xvxor.v VX1, VX1, VX1
xvor.v VX1, VM0, VX1
xvpickve.w VI1, VI0, 0
xvpickve.w VI2, VI0, 1
xvpickve.w VI3, VI0, 2
xvpickve.w VI4, VI0, 3
xvpickve.w x1, VM0, 0
xvpickve.w x2, VM0, 1
xvpickve.w x3, VM0, 2
xvpickve.w x4, VM0, 3
xvfcmp.clt.s VT0, x1, x2
xvbitsel.v VM1, x1, x2, VT0
xvbitsel.v VINC4, VI1, VI2, VT0
xvfcmp.clt.s VT0, x3, x4
xvbitsel.v VM0, x3, x4, VT0
xvbitsel.v VINC8, VI3, VI4, VT0
xvfcmp.clt.s VT0, VM0, VM1
xvbitsel.v VM0, VM0, VM1, VT0
xvbitsel.v VI0, VINC8, VINC4, VT0
#endif
fcmp.ceq.d $fcc0, $f15, $f9
bceqz $fcc0, .L26
XVCMPLT VT0, VI1, VI0
xvbitsel.v VI0, VI0, VI1, VT0
b .L26
.align 3
.L20: // INCX!=1
#ifdef DOUBLE
addi.d i0, i0, 1
srai.d I, N, 2
bge $r0, I, .L21
slli.d i0, i0, 2 //4
xvreplgr2vr.d VINC4, i0
addi.d i0, i0, -7
xvinsgr2vr.d VI1, i0, 0 //initialize the index value for vectorization
addi.d i0, i0, 2
xvinsgr2vr.d VI1, i0, 1
addi.d i0, i0, -1
xvinsgr2vr.d VI1, i0, 2
addi.d i0, i0, 2
xvinsgr2vr.d VI1, i0, 3
addi.d i0, i0, 1
xvinsgr2vr.d VI0, i0, 0 //1
addi.d i0, i0, 2
xvinsgr2vr.d VI0, i0, 1 //3
addi.d i0, i0, -1
xvinsgr2vr.d VI0, i0, 2 //2
addi.d i0, i0, 2
xvinsgr2vr.d VI0, i0, 3 //4
#else
addi.w i0, i0, 1
srai.d I, N, 3
bge $r0, I, .L21
slli.w i0, i0, 3 //8
xvreplgr2vr.w VINC8, i0
addi.w i0, i0, -15
xvinsgr2vr.w VI1, i0, 0 //initialize the index value for vectorization
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 1
addi.w i0, i0, 3
xvinsgr2vr.w VI1, i0, 2
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 3
addi.w i0, i0, -3
xvinsgr2vr.w VI1, i0, 4
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 5
addi.w i0, i0, 3
xvinsgr2vr.w VI1, i0, 6
addi.w i0, i0, 1
xvinsgr2vr.w VI1, i0, 7
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 0 //1
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 1 //2
addi.w i0, i0, 3
xvinsgr2vr.w VI0, i0, 2 //5
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 3 //6
addi.w i0, i0, -3
xvinsgr2vr.w VI0, i0, 4 //3
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 5 //4
addi.w i0, i0, 3
xvinsgr2vr.w VI0, i0, 6 //7
addi.w i0, i0, 1
xvinsgr2vr.w VI0, i0, 7 //8
#endif
.align 3
.L24:
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE
ld.d t2, X, 1 * SIZE
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
ld.d t4, X, 1 * SIZE
add.d X, X, INCX
xvinsgr2vr.d x1, t1, 0
xvinsgr2vr.d x2, t2, 0
xvinsgr2vr.d x1, t3, 1
xvinsgr2vr.d x2, t4, 1
xvadd.d VI1, VI1, VINC4
ld.d t1, X, 0 * SIZE
ld.d t2, X, 1 * SIZE
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
ld.d t4, X, 1 * SIZE
add.d X, X, INCX
xvinsgr2vr.d x1, t1, 2
xvinsgr2vr.d x2, t2, 2
xvinsgr2vr.d x1, t3, 3
xvinsgr2vr.d x2, t4, 3
#else
ld.w t1, X, 0 * SIZE
ld.w t2, X, 1 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
ld.w t4, X, 1 * SIZE
add.d X, X, INCX
xvinsgr2vr.w x1, t1, 0
xvinsgr2vr.w x2, t2, 0
xvinsgr2vr.w x1, t3, 1
xvinsgr2vr.w x2, t4, 1
ld.w t1, X, 0 * SIZE
ld.w t2, X, 1 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
ld.w t4, X, 1 * SIZE
add.d X, X, INCX
xvinsgr2vr.w x1, t1, 2
xvinsgr2vr.w x2, t2, 2
xvinsgr2vr.w x1, t3, 3
xvinsgr2vr.w x2, t4, 3
xvadd.w VI1, VI1, VINC8
ld.w t1, X, 0 * SIZE
ld.w t2, X, 1 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
ld.w t4, X, 1 * SIZE
add.d X, X, INCX
xvinsgr2vr.w x1, t1, 4
xvinsgr2vr.w x2, t2, 4
xvinsgr2vr.w x1, t3, 5
xvinsgr2vr.w x2, t4, 5
xvadd.w VI1, VI1, VINC8
ld.w t1, X, 0 * SIZE
ld.w t2, X, 1 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
ld.w t4, X, 1 * SIZE
add.d X, X, INCX
xvinsgr2vr.w x1, t1, 6
xvinsgr2vr.w x2, t2, 6
xvinsgr2vr.w x1, t3, 7
xvinsgr2vr.w x2, t4, 7
xvpickev.w x1, VX1, VX0
xvpickod.w x2, VX1, VX0
#endif
addi.d I, I, -1
XVFMUL x3, VI4, x1
XVFMUL x4, VI4, x2
XVCMPLT VT0, x1, VI3
XVCMPLT VINC8, x2, VI3
xvbitsel.v x1, x1, x3, VT0
xvbitsel.v x2, x2, x4, VINC8
XVFADD x1, x1, x2
XVFMIN x3, VM0, x1
XVCMPEQ VT0, x3, VM0
xvbitsel.v VM0, x3, VM0, VT0
xvbitsel.v VI0, VI1, VI0, VT0
blt $r0, I, .L24
.align 3
.L25:
#ifdef DOUBLE
xvpickve.d VI1, VI0, 0
xvpickve.d VI2, VI0, 1
xvpickve.d VI3, VI0, 2
xvpickve.d VI4, VI0, 3
xvpickve.d x1, VM0, 0
xvpickve.d x2, VM0, 1
xvpickve.d x3, VM0, 2
xvpickve.d x4, VM0, 3
xvfmina.d VM1, x1, x2
xvfcmp.ceq.d VT0, VM1, x1
xvbitsel.v VINC4, VI2, VI1, VT0
xvfmina.d VM0, x3, x4
xvfcmp.ceq.d VT0, x3, VM0
xvbitsel.v VINC8, VI4, VI3, VT0
xvfmina.d VM0, VM0, VM1
xvfcmp.ceq.d VT0, VM0, VM1
#else
xvxor.v VX0, VX0, VX0
xvor.v VX0, VI0, VX0
xvxor.v VX1, VX1, VX1
xvor.v VX1, VM0, VX1
xvpickve.w VI1, VI0, 0
xvpickve.w VI2, VI0, 1
xvpickve.w VI3, VI0, 2
xvpickve.w VI4, VI0, 3
xvpickve.w x1, VM0, 0
xvpickve.w x2, VM0, 1
xvpickve.w x3, VM0, 2
xvpickve.w x4, VM0, 3
xvfcmp.clt.s VT0, x1, x2
xvbitsel.v VM1, x1, x2, VT0
xvbitsel.v VINC4, VI1, VI2, VT0
xvfcmp.clt.s VT0, x3, x4
xvbitsel.v VM0, x3, x4, VT0
xvbitsel.v VINC8, VI3, VI4, VT0
xvfcmp.clt.s VT0, VM0, VM1
xvbitsel.v VM0, VM0, VM1, VT0
#endif
xvbitsel.v VI0, VINC8, VINC4, VT0
fcmp.ceq.d $fcc0, $f15, $f9
bceqz $fcc0, .L26
XVCMPLT VT0, VI1, VI0
xvbitsel.v VI0, VI0, VI1, VT0
.align 3
.L26:
fcmp.ceq.d $fcc0, $f15, $f10
bceqz $fcc0, .L27
XVCMPLT VT0, VI2, VI0
xvbitsel.v VI0, VI0, VI2, VT0
.align 3
.L27:
fcmp.ceq.d $fcc0, $f15, $f11
bceqz $fcc0, .L28
XVCMPLT VT0, VI3, VI0
xvbitsel.v VI0, VI0, VI3, VT0
.align 3
.L28:
fcmp.ceq.d $fcc0, $f15, $f12
bceqz $fcc0, .L29
XVCMPLT VT0, VI4, VI0
xvbitsel.v VI0, VI0, VI4, VT0
.align 3
.L29:
#ifdef DOUBLE
movfr2gr.d i0, $f20
.align 3
.L21: //N<4
andi I, N, 3
bge $r0, I, .L999
srai.d i1, N, 2
slli.d i1, i1, 2
#else
fmov.s $f16, $f20
.align 3
.L252:
xvxor.v VI0, VI0, VI0
xvor.v VI0, VI0, VX0
fmov.s $f13, $f15
xvxor.v VM0, VM0, VM0
xvor.v VM0, VM0, VX1
xvpickve.w VI1, VI0, 4
xvpickve.w VI2, VI0, 5
xvpickve.w VI3, VI0, 6
xvpickve.w VI4, VI0, 7
xvpickve.w x1, VM0, 4
xvpickve.w x2, VM0, 5
xvpickve.w x3, VM0, 6
xvpickve.w x4, VM0, 7
xvfcmp.clt.s VT0, x1, x2
xvbitsel.v x1, x1, x2, VT0
xvbitsel.v VINC4, VI1, VI2, VT0
xvfcmp.clt.s VT0, x3, x4
xvbitsel.v VM0, x3, x4, VT0
xvbitsel.v VINC8, VI3, VI4, VT0
xvfcmp.clt.s VT0, VM0, x1
xvbitsel.v VM0, VM0, x1, VT0
xvbitsel.v VI0, VINC8, VINC4, VT0
fcmp.ceq.d $fcc0, $f15, $f9
bceqz $fcc0, .L262
xvfcmp.clt.s VT0, VI1, VI0
xvbitsel.v VI0, VI0, VI1, VT0
.align 3
.L262:
fcmp.ceq.d $fcc0, $f15, $f10
bceqz $fcc0, .L272
xvfcmp.clt.s VT0, VI2, VI0
xvbitsel.v VI0, VI0, VI2, VT0
.align 3
.L272:
fcmp.ceq.d $fcc0, $f15, $f11
bceqz $fcc0, .L282
xvfcmp.clt.s VT0, VI3, VI0
xvbitsel.v VI0, VI0, VI3, VT0
.align 3
.L282:
fcmp.ceq.d $fcc0, $f15, $f12
bceqz $fcc0, .L292
xvfcmp.clt.s VT0, VI4, VI0
xvbitsel.v VI0, VI0, VI4, VT0
.align 3
.L292:
fcmp.clt.s $fcc0, $f15, $f13
fsel $f15, $f15, $f13, $fcc0
fsel $f20, $f20, $f16, $fcc0
movfr2gr.s i0, $f20
.L21: //N<8
andi I, N, 7
bge $r0, I, .L999
srai.d i1, N, 3
slli.d i1, i1, 3
#endif
addi.d i1, i1, 1 //current index
movgr2fr.d $f21, i1
movgr2fr.d $f20, i0
.align 3
.L22:
LD a0, X, 0 * SIZE
LD a1, X, 1 * SIZE
addi.d I, I, -1
FABS a0, a0
FABS a1, a1
ADD a0, a0, a1
FMIN a1, s1, a0
CMPEQ $fcc0, s1, a1
add.d X, X, INCX
fsel s1, a1, s1, $fcc0
fsel $f20, $f21, $f20, $fcc0
addi.d i1, i1, 1
movgr2fr.d $f21, i1
blt $r0, I, .L22
MTG i0, $f20
.align 3
.L999:
move $r4, $r17
jirl $r0, $r1, 0x0
.align 3
EPILOGUE

View File

@ -0,0 +1,425 @@
/***************************************************************************
Copyright (c) 2023, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#define ASSEMBLER
#include "common.h"
#define N $r4
#define X $r5
#define INCX $r6
#define I $r12
#define t1 $r13
#define t2 $r15
#define t3 $r18
#define t4 $r16
#define i0 $r17
#define i1 $r14
#define TEMP $r19
#define a0 $f12
#define a1 $f13
#define s1 $f15
#define x1 $vr9
#define x2 $vr10
#define x3 $vr11
#define x4 $vr12
#define VX0 $vr13
#define VX1 $vr14
#define VM0 $vr15
#define VM1 $vr16
#define VINC4 $vr17
#define VINC8 $vr18
#define VI0 $vr20
#define VI1 $vr21
#define VI2 $vr22
#define VI3 $vr8
#define VI4 $vr19
#define VT0 $vr23
PROLOGUE
li.d i0, 0
bge $r0, N, .L999
bge $r0, INCX, .L999
li.d TEMP, 1
slli.d TEMP, TEMP, ZBASE_SHIFT
slli.d INCX, INCX, ZBASE_SHIFT
LD a0, X, 0 * SIZE
LD a1, X, 1 * SIZE
FABS a0, a0
FABS a1, a1
ADD s1, a1, a0
vreplvei.w VM0, VM0, 0
vxor.v VI3, VI3, VI3 // 0
#ifdef DOUBLE
li.d I, -1
vreplgr2vr.d VI4, I
vffint.d.l VI4, VI4 // -1
bne INCX, TEMP, .L20
addi.d i0, i0, 1
srai.d I, N, 2
bge $r0, I, .L21
slli.d i0, i0, 1 //2
vreplgr2vr.d VINC4, i0
addi.d i0, i0, -3
vinsgr2vr.d VI1, i0, 0 //initialize the index value for vectorization
addi.d i0, i0, 1
vinsgr2vr.d VI1, i0, 1
addi.d i0, i0, 1
vinsgr2vr.d VI0, i0, 0 //1
addi.d i0, i0, 1
vinsgr2vr.d VI0, i0, 1 //2
#else
li.w I, -1
vreplgr2vr.w VI4, I
vffint.s.w VI4, VI4 // -1
bne INCX, TEMP, .L20
addi.w i0, i0, 1
srai.d I, N, 2
bge $r0, I, .L21
slli.w i0, i0, 2 //4
vreplgr2vr.w VINC4, i0
addi.w i0, i0, -7
vinsgr2vr.w VI1, i0, 0 //initialize the index value for vectorization
addi.w i0, i0, 1
vinsgr2vr.w VI1, i0, 1
addi.w i0, i0, 1
vinsgr2vr.w VI1, i0, 2
addi.w i0, i0, 1
vinsgr2vr.w VI1, i0, 3
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 0 //1
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 1 //2
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 2 //3
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 3 //4
#endif
.align 3
.L10:
vld VX0, X, 0 * SIZE
#ifdef DOUBLE
vadd.d VI1, VI1, VINC4
vld VX1, X, 2 * SIZE
addi.d I, I, -1
vpickev.d x1, VX1, VX0
vpickod.d x2, VX1, VX0
vfmul.d x3, VI4, x1
vfmul.d x4, VI4, x2
vfcmp.clt.d VT0, x1, VI3
vfcmp.clt.d VINC8, x2, VI3
vbitsel.v x1, x1, x3, VT0
vbitsel.v x2, x2, x4, VINC8
vfadd.d x1, x1, x2
vfmin.d x3, VM0, x1
vfcmp.ceq.d VT0, x3, VM0
vbitsel.v VM0, x3, VM0, VT0
vbitsel.v VI0, VI1, VI0, VT0
vld VX0, X, 4 * SIZE
vadd.d VI1, VI1, VINC4
vld VX1, X, 6 * SIZE
vpickev.d x1, VX1, VX0
vpickod.d x2, VX1, VX0
#else
vadd.w VI1, VI1, VINC4
vld VX1, X, 4 * SIZE
addi.d I, I, -1
vpickev.w x1, VX1, VX0
vpickod.w x2, VX1, VX0
#endif
VFMUL x3, VI4, x1
VFMUL x4, VI4, x2
VCMPLT VT0, x1, VI3
VCMPLT VINC8, x2, VI3
vbitsel.v x1, x1, x3, VT0
vbitsel.v x2, x2, x4, VINC8
VFADD x1, x1, x2
VFMIN x3, VM0, x1
VCMPEQ VT0, x3, VM0
addi.d X, X, 8 * SIZE
vbitsel.v VM0, x3, VM0, VT0
vbitsel.v VI0, VI1, VI0, VT0
blt $r0, I, .L10
.align 3
.L15:
#ifdef DOUBLE
vreplvei.d VI1, VI0, 0
vreplvei.d VI2, VI0, 1
vreplvei.d x1, VM0, 0
vreplvei.d x2, VM0, 1
fcmp.ceq.d $fcc0, $f10, $f9
bceqz $fcc0, .L26
vfcmp.clt.d VT0, VI1, VI2
vbitsel.v VI0, VI2, VI1, VT0
b .L27
#else
vreplvei.w VI1, VI0, 0
vreplvei.w VI2, VI0, 1
vreplvei.w VI3, VI0, 2
vreplvei.w VI4, VI0, 3
vreplvei.w x1, VM0, 0
vreplvei.w x2, VM0, 1
vreplvei.w x3, VM0, 2
vreplvei.w x4, VM0, 3
vfmina.s VM1, x1, x2
vfcmp.ceq.s VT0, VM1, x1
vbitsel.v VINC4, VI2, VI1, VT0
vfmina.s VM0, x3, x4
vfcmp.ceq.s VT0, x3, VM0
vbitsel.v VINC8, VI4, VI3, VT0
vfmina.s VM0, VM0, VM1
vfcmp.ceq.s VT0, VM0, VM1
vbitsel.v VI0, VINC8, VINC4, VT0
fcmp.ceq.d $fcc0, $f15, $f9
bceqz $fcc0, .L26
vfcmp.clt.s VT0, VI1, VI0
vbitsel.v VI0, VI0, VI1, VT0
b .L26
#endif
.align 3
.L20: // INCX!=1
#ifdef DOUBLE
addi.d i0, i0, 1
srai.d I, N, 2
bge $r0, I, .L21
slli.d i0, i0, 1 //2
vreplgr2vr.d VINC4, i0
addi.d i0, i0, -3
vinsgr2vr.d VI1, i0, 0 //initialize the index value for vectorization
addi.d i0, i0, 1
vinsgr2vr.d VI1, i0, 1
addi.d i0, i0, 1
vinsgr2vr.d VI0, i0, 0 //1
addi.d i0, i0, 1
vinsgr2vr.d VI0, i0, 1 //2
#else
addi.w i0, i0, 1
srai.d I, N, 2
bge $r0, I, .L21
slli.w i0, i0, 2 //4
vreplgr2vr.w VINC4, i0
addi.w i0, i0, -7
vinsgr2vr.w VI1, i0, 0 //initialize the index value for vectorization
addi.w i0, i0, 1
vinsgr2vr.w VI1, i0, 1
addi.w i0, i0, 1
vinsgr2vr.w VI1, i0, 2
addi.w i0, i0, 1
vinsgr2vr.w VI1, i0, 3
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 0 //1
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 1 //2
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 2 //3
addi.w i0, i0, 1
vinsgr2vr.w VI0, i0, 3 //4
#endif
.align 3
.L24:
#ifdef DOUBLE
ld.d t1, X, 0 * SIZE
ld.d t2, X, 1 * SIZE
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
ld.d t4, X, 1 * SIZE
add.d X, X, INCX
vinsgr2vr.d x1, t1, 0
vinsgr2vr.d x2, t2, 0
vinsgr2vr.d x1, t3, 1
vinsgr2vr.d x2, t4, 1
vadd.d VI1, VI1, VINC4
vfmul.d x3, VI4, x1
vfmul.d x4, VI4, x2
vfcmp.clt.d VT0, x1, VI3
vfcmp.clt.d VINC8, x2, VI3
vbitsel.v x1, x1, x3, VT0
vbitsel.v x2, x2, x4, VINC8
vfadd.d x1, x1, x2
vfmin.d x3, VM0, x1
ld.d t1, X, 0 * SIZE
vfcmp.ceq.d VT0, x3, VM0
ld.d t2, X, 1 * SIZE
vbitsel.v VM0, x3, VM0, VT0
vbitsel.v VI0, VI1, VI0, VT0
add.d X, X, INCX
ld.d t3, X, 0 * SIZE
ld.d t4, X, 1 * SIZE
add.d X, X, INCX
vinsgr2vr.d x1, t1, 0
vinsgr2vr.d x2, t2, 0
vinsgr2vr.d x1, t3, 1
vinsgr2vr.d x2, t4, 1
vadd.d VI1, VI1, VINC4
#else
ld.w t1, X, 0 * SIZE
ld.w t2, X, 1 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
ld.w t4, X, 1 * SIZE
add.d X, X, INCX
vinsgr2vr.w x1, t1, 0
vinsgr2vr.w x2, t2, 0
vinsgr2vr.w x1, t3, 1
vinsgr2vr.w x2, t4, 1
vadd.w VI1, VI1, VINC4
ld.w t1, X, 0 * SIZE
ld.w t2, X, 1 * SIZE
add.d X, X, INCX
ld.w t3, X, 0 * SIZE
ld.w t4, X, 1 * SIZE
add.d X, X, INCX
vinsgr2vr.w x1, t1, 2
vinsgr2vr.w x2, t2, 2
vinsgr2vr.w x1, t3, 3
vinsgr2vr.w x2, t4, 3
vpickev.w x1, VX1, VX0
vpickod.w x2, VX1, VX0
#endif
addi.d I, I, -1
VFMUL x3, VI4, x1
VFMUL x4, VI4, x2
VCMPLT VT0, x1, VI3
VCMPLT VINC8, x2, VI3
vbitsel.v x1, x1, x3, VT0
vbitsel.v x2, x2, x4, VINC8
VFADD x1, x1, x2
VFMIN x3, VM0, x1
VCMPEQ VT0, x3, VM0
vbitsel.v VM0, x3, VM0, VT0
vbitsel.v VI0, VI1, VI0, VT0
blt $r0, I, .L24
.align 3
.L25:
#ifdef DOUBLE
vreplvei.d VI1, VI0, 0
vreplvei.d VI2, VI0, 1
vreplvei.d x1, VM0, 0
vreplvei.d x2, VM0, 1
fcmp.ceq.d $fcc0, $f10, $f9
bceqz $fcc0, .L26
vfcmp.clt.d VT0, VI1, VI2
vbitsel.v VI0, VI2, VI1, VT0
b .L27
#else
vreplvei.w VI1, VI0, 0
vreplvei.w VI2, VI0, 1
vreplvei.w VI3, VI0, 2
vreplvei.w VI4, VI0, 3
vreplvei.w x1, VM0, 0
vreplvei.w x2, VM0, 1
vreplvei.w x3, VM0, 2
vreplvei.w x4, VM0, 3
vfmina.s VM1, x1, x2
vfcmp.ceq.s VT0, VM1, x1
vbitsel.v VINC4, VI2, VI1, VT0
vfmina.s VM0, x3, x4
vfcmp.ceq.s VT0, x3, VM0
vbitsel.v VINC8, VI4, VI3, VT0
vfmina.s VM0, VM0, VM1
vfcmp.ceq.s VT0, VM0, VM1
vbitsel.v VI0, VINC8, VINC4, VT0
fcmp.ceq.d $fcc0, $f15, $f9
bceqz $fcc0, .L26
vfcmp.clt.s VT0, VI1, VI0
vbitsel.v VI0, VI0, VI1, VT0
#endif
.align 3
.L26:
#ifdef DOUBLE
vfmina.d VM0, x1, x2
vfcmp.ceq.d VT0, x1, VM0
#else
fcmp.ceq.d $fcc0, $f15, $f10
bceqz $fcc0, .L27
vfcmp.clt.s VT0, VI2, VI0
#endif
vbitsel.v VI0, VI0, VI2, VT0
.align 3
.L27:
#ifdef DOUBLE
movfr2gr.d i0, $f20
.align 3
#else
fcmp.ceq.d $fcc0, $f15, $f11
bceqz $fcc0, .L28
vfcmp.clt.s VT0, VI3, VI0
vbitsel.v VI0, VI0, VI3, VT0
.align 3
.L28:
fcmp.ceq.d $fcc0, $f15, $f12
bceqz $fcc0, .L29
vfcmp.clt.s VT0, VI4, VI0
vbitsel.v VI0, VI0, VI4, VT0
.align 3
.L29:
movfr2gr.s i0, $f20
.align 3
#endif
.L21: //N<4
andi I, N, 3
bge $r0, I, .L999
srai.d i1, N, 2
slli.d i1, i1, 2
addi.d i1, i1, 1 //current index
movgr2fr.d $f21, i1
movgr2fr.d $f20, i0
.align 3
.L22:
LD a0, X, 0 * SIZE
LD a1, X, 1 * SIZE
addi.d I, I, -1
FABS a0, a0
FABS a1, a1
ADD a0, a0, a1
FMIN a1, s1, a0
CMPEQ $fcc0, s1, a1
add.d X, X, INCX
fsel s1, a1, s1, $fcc0
fsel $f20, $f21, $f20, $fcc0
addi.d i1, i1, 1
movgr2fr.d $f21, i1
blt $r0, I, .L22
MTG i0, $f20
.align 3
.L999:
move $r4, $r17
jirl $r0, $r1, 0x0
.align 3
EPILOGUE