From 702fc1d56dc8278a856c322a5fc1804067d01fd2 Mon Sep 17 00:00:00 2001 From: yancheng Date: Thu, 7 Dec 2023 11:51:19 +0800 Subject: [PATCH] loongarch64: Add optimization for min. --- kernel/loongarch64/KERNEL.LOONGSON2K1000 | 3 + kernel/loongarch64/KERNEL.LOONGSON3R5 | 3 + kernel/loongarch64/dmin_lasx.S | 175 +++++++++++++++++++ kernel/loongarch64/dmin_lsx.S | 143 ++++++++++++++++ kernel/loongarch64/smin_lasx.S | 205 +++++++++++++++++++++++ kernel/loongarch64/smin_lsx.S | 174 +++++++++++++++++++ 6 files changed, 703 insertions(+) create mode 100644 kernel/loongarch64/dmin_lasx.S create mode 100644 kernel/loongarch64/dmin_lsx.S create mode 100644 kernel/loongarch64/smin_lasx.S create mode 100644 kernel/loongarch64/smin_lsx.S diff --git a/kernel/loongarch64/KERNEL.LOONGSON2K1000 b/kernel/loongarch64/KERNEL.LOONGSON2K1000 index e00893b72..0ff73c2db 100644 --- a/kernel/loongarch64/KERNEL.LOONGSON2K1000 +++ b/kernel/loongarch64/KERNEL.LOONGSON2K1000 @@ -16,4 +16,7 @@ DAMINKERNEL = damin_lsx.S SMAXKERNEL = smax_lsx.S DMAXKERNEL = dmax_lsx.S +SMINKERNEL = smin_lsx.S +DMINKERNEL = dmin_lsx.S + endif diff --git a/kernel/loongarch64/KERNEL.LOONGSON3R5 b/kernel/loongarch64/KERNEL.LOONGSON3R5 index f238436f5..71f53d9d7 100644 --- a/kernel/loongarch64/KERNEL.LOONGSON3R5 +++ b/kernel/loongarch64/KERNEL.LOONGSON3R5 @@ -16,6 +16,9 @@ DAMINKERNEL = damin_lasx.S SMAXKERNEL = smax_lasx.S DMAXKERNEL = dmax_lasx.S +SMINKERNEL = smin_lasx.S +DMINKERNEL = dmin_lasx.S + DGEMMKERNEL = dgemm_kernel_16x4.S DGEMMINCOPY = dgemm_ncopy_16.S DGEMMITCOPY = dgemm_tcopy_16.S diff --git a/kernel/loongarch64/dmin_lasx.S b/kernel/loongarch64/dmin_lasx.S new file mode 100644 index 000000000..e76056565 --- /dev/null +++ b/kernel/loongarch64/dmin_lasx.S @@ -0,0 +1,175 @@ +#define ASSEMBLER + +#include "common.h" + +#define N $r4 +#define X $r5 +#define INCX $r6 +#define I $r12 +#define J $r13 +#define t1 $r14 +#define t2 $r18 +#define t3 $r15 +#define t4 $r17 +#define TEMP $r16 +#define m0 $xr8 +#define x1 $xr9 +#define x2 $xr10 +#define x3 $xr11 +#define x4 $xr12 +#define VX0 $xr20 +#define VX1 $xr21 +#define VM0 $xr22 +#define VM1 $xr23 +#define VM2 $xr19 + + PROLOGUE + + bge $r0, N, .L999 + bge $r0, INCX, .L999 + li.d TEMP, 1 + slli.d TEMP, TEMP, BASE_SHIFT + slli.d INCX, INCX, BASE_SHIFT + bne INCX, TEMP, .L20 + xvld VM0, X, 0 + srai.d I, N, 3 + bge $r0, I, .L12 + .align 3 + +.L10: + xvld VX0, X, 0 * SIZE + xvld VX1, X, 4 * SIZE + addi.d I, I, -1 + xvfmin.d VM1, VX1, VX0 + addi.d X, X, 8 * SIZE + xvfmin.d VM0, VM0, VM1 + blt $r0, I, .L10 + .align 3 + +.L11: + xvpickve.d x1, VM0, 0 + xvpickve.d x2, VM0, 1 + xvpickve.d x3, VM0, 2 + xvpickve.d x4, VM0, 3 + xvfmin.d VM1, x1, x2 + xvfmin.d VM2, x3, x4 + xvfmin.d VM0, VM1, VM2 + .align 3 + +.L12: //INCX==1 and N<8 + andi I, N, 7 + li.d J, 4 + bge J, I, .L13 // 4