From 04593bb27c803d947a5901a1bc6e8ed57e452658 Mon Sep 17 00:00:00 2001 From: Honglin Zhu Date: Mon, 13 Jun 2022 17:05:43 +0800 Subject: [PATCH 1/5] neoverse n2 sbgemm: init file --- kernel/arm64/KERNEL.NEOVERSEN2 | 11 ++++ kernel/arm64/sbgemm_beta_neoversen2.c | 83 +++++++++++++++++++++++++ kernel/arm64/sbgemm_kernel_neoversen2.c | 34 ++++++++++ kernel/arm64/sbgemm_ncopy_neoversen2.c | 33 ++++++++++ kernel/arm64/sbgemm_tcopy_neoversen2.c | 33 ++++++++++ 5 files changed, 194 insertions(+) create mode 100644 kernel/arm64/sbgemm_beta_neoversen2.c create mode 100644 kernel/arm64/sbgemm_kernel_neoversen2.c create mode 100644 kernel/arm64/sbgemm_ncopy_neoversen2.c create mode 100644 kernel/arm64/sbgemm_tcopy_neoversen2.c diff --git a/kernel/arm64/KERNEL.NEOVERSEN2 b/kernel/arm64/KERNEL.NEOVERSEN2 index ea010db42..f880f9692 100644 --- a/kernel/arm64/KERNEL.NEOVERSEN2 +++ b/kernel/arm64/KERNEL.NEOVERSEN2 @@ -187,3 +187,14 @@ ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) + +SBGEMM_BETA = sbgemm_beta_neoversen2.c +SBGEMMKERNEL = sbgemm_kernel_neoversen2.c +SBGEMMINCOPY = sbgemm_ncopy_neoversen2.c +SBGEMMITCOPY = sbgemm_tcopy_neoversen2.c +SBGEMMONCOPY = sbgemm_ncopy_neoversen2.c +SBGEMMOTCOPY = sbgemm_tcopy_neoversen2.c +SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX) +SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX) +SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX) +SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/arm64/sbgemm_beta_neoversen2.c b/kernel/arm64/sbgemm_beta_neoversen2.c new file mode 100644 index 000000000..ab787bfc6 --- /dev/null +++ b/kernel/arm64/sbgemm_beta_neoversen2.c @@ -0,0 +1,83 @@ +/*************************************************************************** + * Copyright (c) 2022, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta, IFLOAT *dummy2, + BLASLONG dummy3, IFLOAT *dummy4, BLASLONG dummy5, FLOAT *c, + BLASLONG ldc) { + + BLASLONG i, j; + BLASLONG chunk, remain; + FLOAT *c_offset1, *c_offset; + c_offset = c; + chunk = m >> 3; + remain = m & 7; + if (beta == ZERO) { + for (j = n; j > 0; j--) { + c_offset1 = c_offset; + c_offset += ldc; + for (i = chunk; i > 0; i--) { + *(c_offset1 + 0) = ZERO; + *(c_offset1 + 1) = ZERO; + *(c_offset1 + 2) = ZERO; + *(c_offset1 + 3) = ZERO; + *(c_offset1 + 4) = ZERO; + *(c_offset1 + 5) = ZERO; + *(c_offset1 + 6) = ZERO; + *(c_offset1 + 7) = ZERO; + c_offset1 += 8; + } + for (i = remain; i > 0; i--) { + *c_offset1 = ZERO; + c_offset1++; + } + } + } else { + for (j = n; j > 0; j--) { + c_offset1 = c_offset; + c_offset += ldc; + for (i = chunk; i > 0; i--) { + *(c_offset1 + 0) *= beta; + *(c_offset1 + 1) *= beta; + *(c_offset1 + 2) *= beta; + *(c_offset1 + 3) *= beta; + *(c_offset1 + 4) *= beta; + *(c_offset1 + 5) *= beta; + *(c_offset1 + 6) *= beta; + *(c_offset1 + 7) *= beta; + c_offset1 += 8; + } + for (i = remain; i > 0; i--) { + *c_offset1 *= beta; + c_offset1++; + } + } + } + return 0; +}; \ No newline at end of file diff --git a/kernel/arm64/sbgemm_kernel_neoversen2.c b/kernel/arm64/sbgemm_kernel_neoversen2.c new file mode 100644 index 000000000..f1022c9d6 --- /dev/null +++ b/kernel/arm64/sbgemm_kernel_neoversen2.c @@ -0,0 +1,34 @@ +/*************************************************************************** + * Copyright (c) 2022, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, + FLOAT *C, BLASLONG ldc) { + return 0; +} diff --git a/kernel/arm64/sbgemm_ncopy_neoversen2.c b/kernel/arm64/sbgemm_ncopy_neoversen2.c new file mode 100644 index 000000000..608b8895c --- /dev/null +++ b/kernel/arm64/sbgemm_ncopy_neoversen2.c @@ -0,0 +1,33 @@ +/*************************************************************************** + * Copyright (c) 2022, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { + return 0; +} diff --git a/kernel/arm64/sbgemm_tcopy_neoversen2.c b/kernel/arm64/sbgemm_tcopy_neoversen2.c new file mode 100644 index 000000000..608b8895c --- /dev/null +++ b/kernel/arm64/sbgemm_tcopy_neoversen2.c @@ -0,0 +1,33 @@ +/*************************************************************************** + * Copyright (c) 2022, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { + return 0; +} From 55d686d41e553174f455cde2a0a6ae0a4f81519a Mon Sep 17 00:00:00 2001 From: Honglin Zhu Date: Wed, 15 Jun 2022 14:20:25 +0800 Subject: [PATCH 2/5] neoverse n2 sbgemm: implement ncopy tcopy kernel_8x4 --- Makefile.arm64 | 2 +- kernel/arm64/KERNEL.NEOVERSEN2 | 2 +- kernel/arm64/sbgemm_kernel_8x4_neoversen2.c | 314 ++++++++++++++++++++ kernel/arm64/sbgemm_kernel_neoversen2.c | 34 --- kernel/arm64/sbgemm_ncopy_neoversen2.c | 82 +++++ kernel/arm64/sbgemm_tcopy_neoversen2.c | 84 ++++++ param.h | 3 + 7 files changed, 485 insertions(+), 36 deletions(-) create mode 100644 kernel/arm64/sbgemm_kernel_8x4_neoversen2.c delete mode 100644 kernel/arm64/sbgemm_kernel_neoversen2.c diff --git a/Makefile.arm64 b/Makefile.arm64 index 9844d2083..c88728e8d 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -121,7 +121,7 @@ ifeq ($(CORE), NEOVERSEN2) ifeq (1, $(filter 1,$(GCCVERSIONGTEQ7) $(ISCLANG))) ifeq ($(GCCVERSIONGTEQ9), 1) ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ4) $(GCCVERSIONGTEQ10))) -CCOMMON_OPT += -march=armv8.5-a -mtune=neoverse-n2 +CCOMMON_OPT += -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2 ifneq ($(F_COMPILER), NAG) FCOMMON_OPT += -march=armv8.5-a -mtune=neoverse-n2 endif diff --git a/kernel/arm64/KERNEL.NEOVERSEN2 b/kernel/arm64/KERNEL.NEOVERSEN2 index f880f9692..07a94a043 100644 --- a/kernel/arm64/KERNEL.NEOVERSEN2 +++ b/kernel/arm64/KERNEL.NEOVERSEN2 @@ -189,7 +189,7 @@ ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) SBGEMM_BETA = sbgemm_beta_neoversen2.c -SBGEMMKERNEL = sbgemm_kernel_neoversen2.c +SBGEMMKERNEL = sbgemm_kernel_$(SBGEMM_UNROLL_M)x$(SBGEMM_UNROLL_N)_neoversen2.c SBGEMMINCOPY = sbgemm_ncopy_neoversen2.c SBGEMMITCOPY = sbgemm_tcopy_neoversen2.c SBGEMMONCOPY = sbgemm_ncopy_neoversen2.c diff --git a/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c new file mode 100644 index 000000000..01a96cd60 --- /dev/null +++ b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c @@ -0,0 +1,314 @@ +/*************************************************************************** + * Copyright (c) 2022, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, + FLOAT *C, BLASLONG ldc) { + // printf("m: %d, n: %d, k: %d\n", m, n, k); + BLASLONG padk = (k + 3) & ~3; + BLASLONG padm = (m + 1) & ~1; + BLASLONG padn = (n + 1) & ~1; + FLOAT *RC = (FLOAT *) calloc(padm * padn, sizeof(float)); + BLASLONG nldc = padm; + + IFLOAT *ptr_a = A; + IFLOAT *ptr_b = B; + FLOAT *ptr_c = RC; + + IFLOAT *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3; + IFLOAT *ptr_b0, *ptr_b1; + FLOAT *ptr_c00, *ptr_c10, *ptr_c20, *ptr_c30, *ptr_c01, *ptr_c11, *ptr_c21, *ptr_c31; + + svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1; + svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31; + svbool_t pg16 = svptrue_b16(); + svbool_t pg32 = svptrue_b32(); + svfloat32_t svalpha = svdup_f32(alpha); + + uint32_t off_c[] = {0, (uint32_t) nldc, 1, (uint32_t) nldc + 1}; // 00 01 10 11 + svuint32_t off_vc = svld1_u32(pg32, off_c); + + for (BLASLONG j = 0; j < padn/4; j++) { + ptr_c00 = ptr_c; + ptr_c10 = ptr_c00 + 2; + ptr_c20 = ptr_c10 + 2; + ptr_c30 = ptr_c20 + 2; + ptr_c01 = ptr_c + 2 * nldc; + ptr_c11 = ptr_c01 + 2; + ptr_c21 = ptr_c11 + 2; + ptr_c31 = ptr_c21 + 2; + ptr_c += 4 * nldc; + + ptr_a = A; + + for (BLASLONG i = 0; i < padm/8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * padk; + ptr_a2 = ptr_a1 + 2 * padk; + ptr_a3 = ptr_a2 + 2 * padk; + ptr_a += 8 * padk; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * padk; + + mc00 = svdup_f32(0); mc01 = svdup_f32(0); + mc10 = svdup_f32(0); mc11 = svdup_f32(0); + mc20 = svdup_f32(0); mc21 = svdup_f32(0); + mc30 = svdup_f32(0); mc31 = svdup_f32(0); + + for (BLASLONG p = 0; p < padk/4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); + ma2 = svld1_bf16(pg16, (bfloat16_t *) ptr_a2); + ma3 = svld1_bf16(pg16, (bfloat16_t *) ptr_a3); + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); + + mc00 = svbfmmla(mc00, ma0, mb0); + mc10 = svbfmmla(mc10, ma1, mb0); + mc20 = svbfmmla(mc20, ma2, mb0); + mc30 = svbfmmla(mc30, ma3, mb0); + mc01 = svbfmmla(mc01, ma0, mb1); + mc11 = svbfmmla(mc11, ma1, mb1); + mc21 = svbfmmla(mc21, ma2, mb1); + mc31 = svbfmmla(mc31, ma3, mb1); + + ptr_a0 += 8; + ptr_a1 += 8; + ptr_a2 += 8; + ptr_a3 += 8; + ptr_b0 += 8; + ptr_b1 += 8; + } + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); + svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); + svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); + svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); + svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); + svst1_scatter_index(pg32, ptr_c21, off_vc, mc21); + svst1_scatter_index(pg32, ptr_c31, off_vc, mc31); + + ptr_c00 += 8; + ptr_c10 += 8; + ptr_c20 += 8; + ptr_c30 += 8; + ptr_c01 += 8; + ptr_c11 += 8; + ptr_c21 += 8; + ptr_c31 += 8; + } + + if (padm & 4) { + // rest 4 or 6 + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * padk; + ptr_a += 4 * padk; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * padk; + + mc00 = svdup_f32(0); mc01 = svdup_f32(0); + mc10 = svdup_f32(0); mc11 = svdup_f32(0); + for (BLASLONG p = 0; p < padk/4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); + + mc00 = svbfmmla(mc00, ma0, mb0); + mc10 = svbfmmla(mc10, ma1, mb0); + mc01 = svbfmmla(mc01, ma0, mb1); + mc11 = svbfmmla(mc11, ma1, mb1); + + ptr_a0 += 8; + ptr_a1 += 8; + ptr_b0 += 8; + ptr_b1 += 8; + } + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); + svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); + svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); + + ptr_c00 += 4; + ptr_c10 += 4; + ptr_c01 += 4; + ptr_c11 += 4; + } + + if (padm & 2) { + // rest 2 + ptr_a0 = ptr_a; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * padk; + + mc00 = svdup_f32(0); mc01 = svdup_f32(0); + for (BLASLONG p = 0; p < padk/4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); + mc00 = svbfmmla(mc00, ma0, mb0); + mc01 = svbfmmla(mc01, ma0, mb1); + ptr_a0 += 8; + ptr_b0 += 8; + ptr_b1 += 8; + } + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); + svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); + ptr_c00 += 2; + ptr_c01 += 2; + } + + ptr_b += 4 * padk; + + } + + if (padn & 2) { + // rest 2 + ptr_c00 = ptr_c; + ptr_c10 = ptr_c00 + 2; + ptr_c20 = ptr_c10 + 2; + ptr_c30 = ptr_c20 + 2; + ptr_c += 2 * nldc; + + ptr_a = A; + + for (BLASLONG i = 0; i < padm/8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * padk; + ptr_a2 = ptr_a1 + 2 * padk; + ptr_a3 = ptr_a2 + 2 * padk; + ptr_a += 8 * padk; + + ptr_b0 = ptr_b; + + mc00 = svdup_f32(0); + mc10 = svdup_f32(0); + mc20 = svdup_f32(0); + mc30 = svdup_f32(0); + + for (BLASLONG p = 0; p < padk/4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); + ma2 = svld1_bf16(pg16, (bfloat16_t *) ptr_a2); + ma3 = svld1_bf16(pg16, (bfloat16_t *) ptr_a3); + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + mc00 = svbfmmla(mc00, ma0, mb0); + mc10 = svbfmmla(mc10, ma1, mb0); + mc20 = svbfmmla(mc20, ma2, mb0); + mc30 = svbfmmla(mc30, ma3, mb0); + ptr_a0 += 8; + ptr_a1 += 8; + ptr_a2 += 8; + ptr_a3 += 8; + ptr_b0 += 8; + } + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); + svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); + svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); + ptr_c00 += 8; + ptr_c10 += 8; + ptr_c20 += 8; + ptr_c30 += 8; + } + + if (padm & 4) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * padk; + ptr_a += 4 * padk; + + ptr_b0 = ptr_b; + + mc00 = svdup_f32(0); + mc10 = svdup_f32(0); + for (BLASLONG p = 0; p < padk/4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + mc00 = svbfmmla(mc00, ma0, mb0); + mc10 = svbfmmla(mc10, ma1, mb0); + ptr_a0 += 8; + ptr_a1 += 8; + ptr_b0 += 8; + } + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); + svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); + ptr_c00 += 4; + ptr_c10 += 4; + } + + if (padm & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * padk; + ptr_b0 = ptr_b; + mc00 = svdup_f32(0); + for (BLASLONG p = 0; p < padk/4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); + mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + mc00 = svbfmmla(mc00, ma0, mb0); + ptr_a0 += 8; + ptr_b0 += 8; + } + svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); + ptr_c00 += 2; + } + + ptr_b += 2 * padk; + } + + FLOAT *org_c = C; + FLOAT *raw_c = RC; + FLOAT *org_c0, *raw_c0; + svfloat32_t org_vc0, raw_vc0; + for (BLASLONG j = 0; j < n; j++) { + org_c0 = org_c; + raw_c0 = raw_c; + org_c += ldc; + raw_c += nldc; + BLASLONG i; + for (i = 0; i < m/4; i++) { + org_vc0 = svld1_f32(pg32, org_c0); + raw_vc0 = svld1_f32(pg32, raw_c0); + org_vc0 = svmad_z(pg32, svalpha, raw_vc0, org_vc0); // alpha * raw + org, raw -> a * b + svst1_f32(pg32, org_c0, org_vc0); + org_c0 += 4; + raw_c0 += 4; + } + for (i = 0; i < (m & 3); i++) { + *org_c0 += alpha * (*raw_c0); + org_c0++; + raw_c0++; + } + } + return 0; +} diff --git a/kernel/arm64/sbgemm_kernel_neoversen2.c b/kernel/arm64/sbgemm_kernel_neoversen2.c deleted file mode 100644 index f1022c9d6..000000000 --- a/kernel/arm64/sbgemm_kernel_neoversen2.c +++ /dev/null @@ -1,34 +0,0 @@ -/*************************************************************************** - * Copyright (c) 2022, The OpenBLAS Project - * All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * 3. Neither the name of the OpenBLAS project nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * *****************************************************************************/ - -#include "common.h" - -int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, - FLOAT *C, BLASLONG ldc) { - return 0; -} diff --git a/kernel/arm64/sbgemm_ncopy_neoversen2.c b/kernel/arm64/sbgemm_ncopy_neoversen2.c index 608b8895c..183106f7f 100644 --- a/kernel/arm64/sbgemm_ncopy_neoversen2.c +++ b/kernel/arm64/sbgemm_ncopy_neoversen2.c @@ -29,5 +29,87 @@ #include "common.h" int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { + IFLOAT *a_offset, *a_offset1, *a_offset2; + IFLOAT *b_offset; + + a_offset = a; + b_offset = b; + + BLASLONG m4 = m & ~3; + BLASLONG n2 = n & ~1; + + BLASLONG j = 0; + for (; j < n2; j += 2) { + a_offset1 = a_offset; + a_offset2 = a_offset1 + lda; + a_offset += 2 * lda; + + BLASLONG i = 0; + for (; i < m4; i += 4) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset1 + 1); + *(b_offset + 2) = *(a_offset1 + 2); + *(b_offset + 3) = *(a_offset1 + 3); + *(b_offset + 4) = *(a_offset2 + 0); + *(b_offset + 5) = *(a_offset2 + 1); + *(b_offset + 6) = *(a_offset2 + 2); + *(b_offset + 7) = *(a_offset2 + 3); + + a_offset1 += 4; + a_offset2 += 4; + b_offset += 8; + } + if (i < m) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 4) = *(a_offset2 + 0); + + if (i + 1 < m) { + *(b_offset + 1) = *(a_offset1 + 1); + *(b_offset + 5) = *(a_offset2 + 1); + } else { + *(b_offset + 1) = 0; + *(b_offset + 5) = 0; + } + + if (i + 2 < m) { + *(b_offset + 2) = *(a_offset1 + 2); + *(b_offset + 6) = *(a_offset2 + 2); + } else { + *(b_offset + 2) = 0; + *(b_offset + 6) = 0; + } + + *(b_offset + 3) = 0; + *(b_offset + 7) = 0; + + b_offset += 8; + } + } + if (j < n) { + BLASLONG i = 0; + for (; i < m4; i += 4) { + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = *(a_offset + 1); + *(b_offset + 2) = *(a_offset + 2); + *(b_offset + 3) = *(a_offset + 3); + *(b_offset + 4) = 0; + *(b_offset + 5) = 0; + *(b_offset + 6) = 0; + *(b_offset + 7) = 0; + a_offset += 4; + b_offset += 8; + } + if (i < m) { + *(b_offset + 4) = 0; + *(b_offset + 5) = 0; + *(b_offset + 6) = 0; + *(b_offset + 7) = 0; + + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = (i + 1 < m) ? *(a_offset + 1) : 0; + *(b_offset + 2) = (i + 2 < m) ? *(a_offset + 2) : 0; + *(b_offset + 3) = 0; + } + } return 0; } diff --git a/kernel/arm64/sbgemm_tcopy_neoversen2.c b/kernel/arm64/sbgemm_tcopy_neoversen2.c index 608b8895c..60e6855a6 100644 --- a/kernel/arm64/sbgemm_tcopy_neoversen2.c +++ b/kernel/arm64/sbgemm_tcopy_neoversen2.c @@ -29,5 +29,89 @@ #include "common.h" int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { + IFLOAT *a_offset, *a_offset1, *a_offset2, *a_offset3, *a_offset4; + IFLOAT *b_offset; + a_offset = a; + b_offset = b; + + BLASLONG m4 = m & ~3; + BLASLONG n2 = n & ~1; + + BLASLONG j = 0; + for (; j < n2; j += 2) { + a_offset1 = a_offset; + a_offset2 = a_offset1 + lda; + a_offset3 = a_offset2 + lda; + a_offset4 = a_offset3 + lda; + a_offset += 2; + + BLASLONG i = 0; + for (; i < m4; i += 4) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset2 + 0); + *(b_offset + 2) = *(a_offset3 + 0); + *(b_offset + 3) = *(a_offset4 + 0); + *(b_offset + 4) = *(a_offset1 + 1); + *(b_offset + 5) = *(a_offset2 + 1); + *(b_offset + 6) = *(a_offset3 + 1); + *(b_offset + 7) = *(a_offset4 + 1); + + b_offset += 8; + a_offset1 += 4 * lda; + a_offset2 += 4 * lda; + a_offset3 += 4 * lda; + a_offset4 += 4 * lda; + } + if (i < m) { // padding 4 + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 4) = *(a_offset1 + 1); + + if (i + 1 < m) { + *(b_offset + 1) = *(a_offset2 + 0); + *(b_offset + 5) = *(a_offset2 + 1); + } else { + *(b_offset + 1) = 0; + *(b_offset + 5) = 0; + } + + if (i + 2 < m) { + *(b_offset + 2) = *(a_offset3 + 0); + *(b_offset + 6) = *(a_offset3 + 1); + } else { + *(b_offset + 2) = 0; + *(b_offset + 6) = 0; + } + + *(b_offset + 3) = 0; + *(b_offset + 7) = 0; + b_offset += 8; + } + } + if (j < n) { // padding 2 + BLASLONG i = 0; + for (; i < m4; i += 4) { + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = *(a_offset + 1 * lda); + *(b_offset + 2) = *(a_offset + 2 * lda); + *(b_offset + 3) = *(a_offset + 3 * lda); + *(b_offset + 4) = 0; + *(b_offset + 5) = 0; + *(b_offset + 6) = 0; + *(b_offset + 7) = 0; + b_offset += 8; + a_offset += 4 * lda; + } + if (i < m) { + *(b_offset + 4) = 0; + *(b_offset + 5) = 0; + *(b_offset + 6) = 0; + *(b_offset + 7) = 0; + + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = (i + 1 < m) ? *(a_offset + 1 * lda) : 0; + *(b_offset + 2) = (i + 2 < m) ? *(a_offset + 2 * lda) : 0; + *(b_offset + 3) = 0; + } + } return 0; } diff --git a/param.h b/param.h index 4e2497b1c..5fa829986 100644 --- a/param.h +++ b/param.h @@ -3330,6 +3330,9 @@ is a big desktop or server with abundant cache rather than a phone or embedded d #elif defined(NEOVERSEN2) +#define SBGEMM_DEFAULT_UNROOL_M 8 +#define SBGEMM_DEFAULT_UNROOL_N 4 + #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4 From bc3728475fc329eb29adfb5954864d7763c37284 Mon Sep 17 00:00:00 2001 From: Honglin Zhu Date: Thu, 16 Jun 2022 19:36:22 +0800 Subject: [PATCH 3/5] format code --- kernel/arm64/sbgemm_kernel_8x4_neoversen2.c | 103 +++++++++++--------- kernel/arm64/sbgemm_ncopy_neoversen2.c | 2 +- kernel/arm64/sbgemm_tcopy_neoversen2.c | 5 +- 3 files changed, 60 insertions(+), 50 deletions(-) diff --git a/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c index 01a96cd60..c97ad81a2 100644 --- a/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c +++ b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c @@ -27,15 +27,16 @@ * *****************************************************************************/ #include + #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, - FLOAT *C, BLASLONG ldc) { +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, FLOAT *C, + BLASLONG ldc) { // printf("m: %d, n: %d, k: %d\n", m, n, k); BLASLONG padk = (k + 3) & ~3; BLASLONG padm = (m + 1) & ~1; BLASLONG padn = (n + 1) & ~1; - FLOAT *RC = (FLOAT *) calloc(padm * padn, sizeof(float)); + FLOAT *RC = (FLOAT *)calloc(padm * padn, sizeof(float)); BLASLONG nldc = padm; IFLOAT *ptr_a = A; @@ -52,10 +53,10 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, svbool_t pg32 = svptrue_b32(); svfloat32_t svalpha = svdup_f32(alpha); - uint32_t off_c[] = {0, (uint32_t) nldc, 1, (uint32_t) nldc + 1}; // 00 01 10 11 + uint32_t off_c[] = {0, (uint32_t)nldc, 1, (uint32_t)nldc + 1}; // 00 01 10 11 svuint32_t off_vc = svld1_u32(pg32, off_c); - for (BLASLONG j = 0; j < padn/4; j++) { + for (BLASLONG j = 0; j < padn / 4; j++) { ptr_c00 = ptr_c; ptr_c10 = ptr_c00 + 2; ptr_c20 = ptr_c10 + 2; @@ -68,7 +69,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, ptr_a = A; - for (BLASLONG i = 0; i < padm/8; i++) { + for (BLASLONG i = 0; i < padm / 8; i++) { ptr_a0 = ptr_a; ptr_a1 = ptr_a0 + 2 * padk; ptr_a2 = ptr_a1 + 2 * padk; @@ -78,18 +79,22 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, ptr_b0 = ptr_b; ptr_b1 = ptr_b0 + 2 * padk; - mc00 = svdup_f32(0); mc01 = svdup_f32(0); - mc10 = svdup_f32(0); mc11 = svdup_f32(0); - mc20 = svdup_f32(0); mc21 = svdup_f32(0); - mc30 = svdup_f32(0); mc31 = svdup_f32(0); - - for (BLASLONG p = 0; p < padk/4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); - ma2 = svld1_bf16(pg16, (bfloat16_t *) ptr_a2); - ma3 = svld1_bf16(pg16, (bfloat16_t *) ptr_a3); - mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); + mc00 = svdup_f32(0); + mc01 = svdup_f32(0); + mc10 = svdup_f32(0); + mc11 = svdup_f32(0); + mc20 = svdup_f32(0); + mc21 = svdup_f32(0); + mc30 = svdup_f32(0); + mc31 = svdup_f32(0); + + for (BLASLONG p = 0; p < padk / 4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); + ma2 = svld1_bf16(pg16, (bfloat16_t *)ptr_a2); + ma3 = svld1_bf16(pg16, (bfloat16_t *)ptr_a3); + mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); + mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); mc00 = svbfmmla(mc00, ma0, mb0); mc10 = svbfmmla(mc10, ma1, mb0); @@ -135,13 +140,15 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, ptr_b0 = ptr_b; ptr_b1 = ptr_b0 + 2 * padk; - mc00 = svdup_f32(0); mc01 = svdup_f32(0); - mc10 = svdup_f32(0); mc11 = svdup_f32(0); - for (BLASLONG p = 0; p < padk/4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); - mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); + mc00 = svdup_f32(0); + mc01 = svdup_f32(0); + mc10 = svdup_f32(0); + mc11 = svdup_f32(0); + for (BLASLONG p = 0; p < padk / 4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); + mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); + mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); mc00 = svbfmmla(mc00, ma0, mb0); mc10 = svbfmmla(mc10, ma1, mb0); @@ -171,11 +178,12 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, ptr_b0 = ptr_b; ptr_b1 = ptr_b0 + 2 * padk; - mc00 = svdup_f32(0); mc01 = svdup_f32(0); - for (BLASLONG p = 0; p < padk/4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); - mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *) ptr_b1); + mc00 = svdup_f32(0); + mc01 = svdup_f32(0); + for (BLASLONG p = 0; p < padk / 4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); + mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); + mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); mc00 = svbfmmla(mc00, ma0, mb0); mc01 = svbfmmla(mc01, ma0, mb1); ptr_a0 += 8; @@ -189,7 +197,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, } ptr_b += 4 * padk; - } if (padn & 2) { @@ -202,7 +209,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, ptr_a = A; - for (BLASLONG i = 0; i < padm/8; i++) { + for (BLASLONG i = 0; i < padm / 8; i++) { ptr_a0 = ptr_a; ptr_a1 = ptr_a0 + 2 * padk; ptr_a2 = ptr_a1 + 2 * padk; @@ -216,12 +223,12 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, mc20 = svdup_f32(0); mc30 = svdup_f32(0); - for (BLASLONG p = 0; p < padk/4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); - ma2 = svld1_bf16(pg16, (bfloat16_t *) ptr_a2); - ma3 = svld1_bf16(pg16, (bfloat16_t *) ptr_a3); - mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + for (BLASLONG p = 0; p < padk / 4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); + ma2 = svld1_bf16(pg16, (bfloat16_t *)ptr_a2); + ma3 = svld1_bf16(pg16, (bfloat16_t *)ptr_a3); + mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); mc00 = svbfmmla(mc00, ma0, mb0); mc10 = svbfmmla(mc10, ma1, mb0); mc20 = svbfmmla(mc20, ma2, mb0); @@ -251,10 +258,10 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, mc00 = svdup_f32(0); mc10 = svdup_f32(0); - for (BLASLONG p = 0; p < padk/4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *) ptr_a1); - mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + for (BLASLONG p = 0; p < padk / 4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); + ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); + mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); mc00 = svbfmmla(mc00, ma0, mb0); mc10 = svbfmmla(mc10, ma1, mb0); ptr_a0 += 8; @@ -272,9 +279,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, ptr_a += 2 * padk; ptr_b0 = ptr_b; mc00 = svdup_f32(0); - for (BLASLONG p = 0; p < padk/4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *) ptr_a0); - mb0 = svld1_bf16(pg16, (bfloat16_t *) ptr_b0); + for (BLASLONG p = 0; p < padk / 4; p++) { + ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); + mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); mc00 = svbfmmla(mc00, ma0, mb0); ptr_a0 += 8; ptr_b0 += 8; @@ -296,10 +303,11 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, org_c += ldc; raw_c += nldc; BLASLONG i; - for (i = 0; i < m/4; i++) { + for (i = 0; i < m / 4; i++) { org_vc0 = svld1_f32(pg32, org_c0); raw_vc0 = svld1_f32(pg32, raw_c0); - org_vc0 = svmad_z(pg32, svalpha, raw_vc0, org_vc0); // alpha * raw + org, raw -> a * b + org_vc0 = svmad_z(pg32, svalpha, raw_vc0, + org_vc0); // alpha * raw + org, raw -> a * b svst1_f32(pg32, org_c0, org_vc0); org_c0 += 4; raw_c0 += 4; @@ -310,5 +318,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, raw_c0++; } } + return 0; } diff --git a/kernel/arm64/sbgemm_ncopy_neoversen2.c b/kernel/arm64/sbgemm_ncopy_neoversen2.c index 183106f7f..977256f34 100644 --- a/kernel/arm64/sbgemm_ncopy_neoversen2.c +++ b/kernel/arm64/sbgemm_ncopy_neoversen2.c @@ -97,7 +97,7 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { *(b_offset + 6) = 0; *(b_offset + 7) = 0; a_offset += 4; - b_offset += 8; + b_offset += 4; } if (i < m) { *(b_offset + 4) = 0; diff --git a/kernel/arm64/sbgemm_tcopy_neoversen2.c b/kernel/arm64/sbgemm_tcopy_neoversen2.c index 60e6855a6..7beed83cd 100644 --- a/kernel/arm64/sbgemm_tcopy_neoversen2.c +++ b/kernel/arm64/sbgemm_tcopy_neoversen2.c @@ -87,7 +87,7 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { b_offset += 8; } } - if (j < n) { // padding 2 + if (j < n) { // rest 1 BLASLONG i = 0; for (; i < m4; i += 4) { *(b_offset + 0) = *(a_offset + 0); @@ -98,7 +98,7 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { *(b_offset + 5) = 0; *(b_offset + 6) = 0; *(b_offset + 7) = 0; - b_offset += 8; + b_offset += 4; a_offset += 4 * lda; } if (i < m) { @@ -113,5 +113,6 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { *(b_offset + 3) = 0; } } + return 0; } From 123e0dfb62b21f2468c19be6c8415331faa56fd5 Mon Sep 17 00:00:00 2001 From: Honglin Zhu Date: Wed, 22 Jun 2022 23:00:40 +0800 Subject: [PATCH 4/5] Neoverse N2 sbgemm: 1. Modify the algorithm to resolve multithreading failures 2. No memory allocation in sbgemm kernel 3. Optimize when alpha == 1.0f --- kernel/arm64/sbgemm_kernel_8x4_neoversen2.c | 298 +------- .../arm64/sbgemm_kernel_8x4_neoversen2_impl.c | 665 ++++++++++++++++++ kernel/arm64/sbgemm_ncopy_neoversen2.c | 84 +-- kernel/arm64/sbgemm_tcopy_neoversen2.c | 87 +-- param.h | 6 +- 5 files changed, 753 insertions(+), 387 deletions(-) create mode 100644 kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c diff --git a/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c index c97ad81a2..66e7dd38a 100644 --- a/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c +++ b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c @@ -30,294 +30,16 @@ #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, FLOAT *C, - BLASLONG ldc) { - // printf("m: %d, n: %d, k: %d\n", m, n, k); - BLASLONG padk = (k + 3) & ~3; - BLASLONG padm = (m + 1) & ~1; - BLASLONG padn = (n + 1) & ~1; - FLOAT *RC = (FLOAT *)calloc(padm * padn, sizeof(float)); - BLASLONG nldc = padm; - - IFLOAT *ptr_a = A; - IFLOAT *ptr_b = B; - FLOAT *ptr_c = RC; - - IFLOAT *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3; - IFLOAT *ptr_b0, *ptr_b1; - FLOAT *ptr_c00, *ptr_c10, *ptr_c20, *ptr_c30, *ptr_c01, *ptr_c11, *ptr_c21, *ptr_c31; - - svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1; - svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31; - svbool_t pg16 = svptrue_b16(); - svbool_t pg32 = svptrue_b32(); - svfloat32_t svalpha = svdup_f32(alpha); - - uint32_t off_c[] = {0, (uint32_t)nldc, 1, (uint32_t)nldc + 1}; // 00 01 10 11 - svuint32_t off_vc = svld1_u32(pg32, off_c); - - for (BLASLONG j = 0; j < padn / 4; j++) { - ptr_c00 = ptr_c; - ptr_c10 = ptr_c00 + 2; - ptr_c20 = ptr_c10 + 2; - ptr_c30 = ptr_c20 + 2; - ptr_c01 = ptr_c + 2 * nldc; - ptr_c11 = ptr_c01 + 2; - ptr_c21 = ptr_c11 + 2; - ptr_c31 = ptr_c21 + 2; - ptr_c += 4 * nldc; - - ptr_a = A; - - for (BLASLONG i = 0; i < padm / 8; i++) { - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a2 = ptr_a1 + 2 * padk; - ptr_a3 = ptr_a2 + 2 * padk; - ptr_a += 8 * padk; - - ptr_b0 = ptr_b; - ptr_b1 = ptr_b0 + 2 * padk; - - mc00 = svdup_f32(0); - mc01 = svdup_f32(0); - mc10 = svdup_f32(0); - mc11 = svdup_f32(0); - mc20 = svdup_f32(0); - mc21 = svdup_f32(0); - mc30 = svdup_f32(0); - mc31 = svdup_f32(0); - - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - ma2 = svld1_bf16(pg16, (bfloat16_t *)ptr_a2); - ma3 = svld1_bf16(pg16, (bfloat16_t *)ptr_a3); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); - - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - mc20 = svbfmmla(mc20, ma2, mb0); - mc30 = svbfmmla(mc30, ma3, mb0); - mc01 = svbfmmla(mc01, ma0, mb1); - mc11 = svbfmmla(mc11, ma1, mb1); - mc21 = svbfmmla(mc21, ma2, mb1); - mc31 = svbfmmla(mc31, ma3, mb1); - - ptr_a0 += 8; - ptr_a1 += 8; - ptr_a2 += 8; - ptr_a3 += 8; - ptr_b0 += 8; - ptr_b1 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); - svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); - svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); - svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); - svst1_scatter_index(pg32, ptr_c21, off_vc, mc21); - svst1_scatter_index(pg32, ptr_c31, off_vc, mc31); - - ptr_c00 += 8; - ptr_c10 += 8; - ptr_c20 += 8; - ptr_c30 += 8; - ptr_c01 += 8; - ptr_c11 += 8; - ptr_c21 += 8; - ptr_c31 += 8; - } - - if (padm & 4) { - // rest 4 or 6 - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a += 4 * padk; - - ptr_b0 = ptr_b; - ptr_b1 = ptr_b0 + 2 * padk; - - mc00 = svdup_f32(0); - mc01 = svdup_f32(0); - mc10 = svdup_f32(0); - mc11 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); - - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - mc01 = svbfmmla(mc01, ma0, mb1); - mc11 = svbfmmla(mc11, ma1, mb1); - - ptr_a0 += 8; - ptr_a1 += 8; - ptr_b0 += 8; - ptr_b1 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); - svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); - - ptr_c00 += 4; - ptr_c10 += 4; - ptr_c01 += 4; - ptr_c11 += 4; - } - - if (padm & 2) { - // rest 2 - ptr_a0 = ptr_a; - - ptr_b0 = ptr_b; - ptr_b1 = ptr_b0 + 2 * padk; - - mc00 = svdup_f32(0); - mc01 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); - mc00 = svbfmmla(mc00, ma0, mb0); - mc01 = svbfmmla(mc01, ma0, mb1); - ptr_a0 += 8; - ptr_b0 += 8; - ptr_b1 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); - ptr_c00 += 2; - ptr_c01 += 2; - } - - ptr_b += 4 * padk; - } - - if (padn & 2) { - // rest 2 - ptr_c00 = ptr_c; - ptr_c10 = ptr_c00 + 2; - ptr_c20 = ptr_c10 + 2; - ptr_c30 = ptr_c20 + 2; - ptr_c += 2 * nldc; - - ptr_a = A; - - for (BLASLONG i = 0; i < padm / 8; i++) { - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a2 = ptr_a1 + 2 * padk; - ptr_a3 = ptr_a2 + 2 * padk; - ptr_a += 8 * padk; - - ptr_b0 = ptr_b; - - mc00 = svdup_f32(0); - mc10 = svdup_f32(0); - mc20 = svdup_f32(0); - mc30 = svdup_f32(0); - - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - ma2 = svld1_bf16(pg16, (bfloat16_t *)ptr_a2); - ma3 = svld1_bf16(pg16, (bfloat16_t *)ptr_a3); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - mc20 = svbfmmla(mc20, ma2, mb0); - mc30 = svbfmmla(mc30, ma3, mb0); - ptr_a0 += 8; - ptr_a1 += 8; - ptr_a2 += 8; - ptr_a3 += 8; - ptr_b0 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); - svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); - ptr_c00 += 8; - ptr_c10 += 8; - ptr_c20 += 8; - ptr_c30 += 8; - } - - if (padm & 4) { - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a += 4 * padk; - - ptr_b0 = ptr_b; - - mc00 = svdup_f32(0); - mc10 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - ptr_a0 += 8; - ptr_a1 += 8; - ptr_b0 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - ptr_c00 += 4; - ptr_c10 += 4; - } - - if (padm & 2) { - ptr_a0 = ptr_a; - ptr_a += 2 * padk; - ptr_b0 = ptr_b; - mc00 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mc00 = svbfmmla(mc00, ma0, mb0); - ptr_a0 += 8; - ptr_b0 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - ptr_c00 += 2; - } - - ptr_b += 2 * padk; - } - - FLOAT *org_c = C; - FLOAT *raw_c = RC; - FLOAT *org_c0, *raw_c0; - svfloat32_t org_vc0, raw_vc0; - for (BLASLONG j = 0; j < n; j++) { - org_c0 = org_c; - raw_c0 = raw_c; - org_c += ldc; - raw_c += nldc; - BLASLONG i; - for (i = 0; i < m / 4; i++) { - org_vc0 = svld1_f32(pg32, org_c0); - raw_vc0 = svld1_f32(pg32, raw_c0); - org_vc0 = svmad_z(pg32, svalpha, raw_vc0, - org_vc0); // alpha * raw + org, raw -> a * b - svst1_f32(pg32, org_c0, org_vc0); - org_c0 += 4; - raw_c0 += 4; - } - for (i = 0; i < (m & 3); i++) { - *org_c0 += alpha * (*raw_c0); - org_c0++; - raw_c0++; - } - } +#define ALPHA_ONE +#include "sbgemm_kernel_8x4_neoversen2_impl.c" +#undef ALPHA_ONE +#include "sbgemm_kernel_8x4_neoversen2_impl.c" +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, + FLOAT *C, BLASLONG ldc) { + if (alpha == 1.0f) + return sbgemm_kernel_neoversen2_alpha_one(m, n, k, alpha, A, B, C, ldc); + else + return sbgemm_kernel_neoversen2_alpha(m, n, k, alpha, A, B, C, ldc); return 0; } diff --git a/kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c b/kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c new file mode 100644 index 000000000..7d53b1aa0 --- /dev/null +++ b/kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c @@ -0,0 +1,665 @@ +/*************************************************************************** + * Copyright (c) 2022, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include + +#include "common.h" + +#ifdef ALPHA_ONE +#define LOAD_C(M, N) \ + mc##M##N = svld1_gather_index(pg32, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_LOW(M, N) \ + mc##M##N = svld1_gather_index(pg32_low, ptr_c0##N + 2 * M, off_vc); + +#define LOAD_C_EVEN(M, N) \ + mc##M##N = svld1_gather_index(pg32_even, ptr_c0##N + 2 * M, off_vc); + +#define LOAD_C_FIRST(M, N) \ + mc##M##N = svld1_gather_index(pg32_first, ptr_c0##N + 2 * M, off_vc); + +#define STORE_C(M, N) \ + svst1_scatter_index(pg32, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_LOW(M, N) \ + svst1_scatter_index(pg32_low, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_EVEN(M, N) \ + svst1_scatter_index(pg32_even, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_FIRST(M, N) \ + svst1_scatter_index(pg32_first, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#else +#define LOAD_C(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_LOW(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32_low, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_EVEN(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32_even, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_FIRST(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32_first, ptr_c0##N + 2 * M , off_vc); + +#define STORE_C(M, N) \ + mc##M##N = svmad_z(pg32, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_LOW(M, N) \ + mc##M##N = svmad_z(pg32_low, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32_low, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_EVEN(M, N) \ + mc##M##N = svmad_z(pg32_even, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32_even, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_FIRST(M, N) \ + mc##M##N = svmad_z(pg32_first, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32_first, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#endif + +#define LOAD_A(M) ma##M = svld1_bf16(pg16, ptr_a##M); + +#define LOAD_B(N) mb##N = svld1_bf16(pg16, ptr_b##N); + +#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N); + +#define LOAD_KREST_1(NAME, M) \ + m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), zero, zero, zero, \ + *(ptr_##NAME##M + 1), zero, zero, zero); + +#define LOAD_KREST_1_LOW(NAME, M) \ + m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), zero, zero, zero, zero, zero, \ + zero, zero); + +#define LOAD_KREST_2(NAME, M) \ + m##NAME##M = \ + svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), zero, zero, \ + *(ptr_##NAME##M + 2), *(ptr_##NAME##M + 3), zero, zero); + +#define LOAD_KREST_2_LOW(NAME, M) \ + m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), zero, \ + zero, zero, zero, zero, zero); + +#define LOAD_KREST_3(NAME, M) \ + m##NAME##M = \ + svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), \ + *(ptr_##NAME##M + 2), zero, *(ptr_##NAME##M + 3), \ + *(ptr_##NAME##M + 4), *(ptr_##NAME##M + 5), zero); + +#define LOAD_KREST_3_LOW(NAME, M) \ + m##NAME##M = \ + svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), \ + *(ptr_##NAME##M + 2), zero, zero, zero, zero, zero); + + +#ifdef ALPHA_ONE +int sbgemm_kernel_neoversen2_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc) +#else +int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc) +#endif +{ + bfloat16_t *ptr_a = (bfloat16_t *)A; + bfloat16_t *ptr_b = (bfloat16_t *)B; + FLOAT *ptr_c = C; + + bfloat16_t *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3; + bfloat16_t *ptr_b0, *ptr_b1; + FLOAT *ptr_c00, *ptr_c01; + + svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1; + svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31; +#ifndef ALPHA_ONE + svfloat32_t oc00, oc01, oc10, oc11, oc20, oc21, oc30, oc31; +#endif + svbool_t pg16 = svptrue_b16(); + svbool_t pg16_low = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0); + svbool_t pg32 = svptrue_b32(); + svbool_t pg32_low = svdupq_b32(1, 1, 0, 0); + svbool_t pg32_even = svdupq_b32(1, 0, 1, 0); + svbool_t pg32_first = svdupq_b32(1, 0, 0, 0); + svfloat32_t svalpha = svdup_f32(alpha); + bfloat16 tmp = 0; + bfloat16_t zero = *((bfloat16_t *)&tmp); + BLASLONG krest = k & 3; + + // 00 01 10 11 + svuint32_t off_vc = svdupq_u32(0, (uint32_t)ldc, 1, (uint32_t)ldc + 1); + + for (BLASLONG j = 0; j < n / 4; j++) { + ptr_c00 = ptr_c; + ptr_c01 = ptr_c + 2 * ldc; + ptr_c += 4 * ldc; + + ptr_a = (bfloat16_t *)A; + + for (BLASLONG i = 0; i < m / 8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a2 = ptr_a1 + 2 * k; + ptr_a3 = ptr_a2 + 2 * k; + ptr_a += 8 * k; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C(0, 0); LOAD_C(0, 1); + LOAD_C(1, 0); LOAD_C(1, 1); + LOAD_C(2, 0); LOAD_C(2, 1); + LOAD_C(3, 0); LOAD_C(3, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + MATMUL(2, 0); MATMUL(2, 1); + MATMUL(3, 0); MATMUL(3, 1); + + ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8; + ptr_b0 += 8; ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + MATMUL(2, 0); MATMUL(2, 1); + MATMUL(3, 0); MATMUL(3, 1); + } + + STORE_C(0, 0); STORE_C(0, 1); + STORE_C(1, 0); STORE_C(1, 1); + STORE_C(2, 0); STORE_C(2, 1); + STORE_C(3, 0); STORE_C(3, 1); + + ptr_c00 += 8; ptr_c01 += 8; + } + + if (m & 4) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a += 4 * k; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C(0, 0); LOAD_C(0, 1); + LOAD_C(1, 0); LOAD_C(1, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + + ptr_a0 += 8; ptr_a1 += 8; + ptr_b0 += 8; ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + } + + STORE_C(0, 0); STORE_C(0, 1); + STORE_C(1, 0); STORE_C(1, 1); + + ptr_c00 += 4; ptr_c01 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * k; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C(0, 0); LOAD_C(0, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + + ptr_a0 += 8; + ptr_b0 += 8; ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + } + STORE_C(0, 0); STORE_C(0, 1); + ptr_c00 += 2; ptr_c01 += 2; + } + + if (m & 1) { + ptr_a0 = ptr_a; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C_LOW(0, 0); LOAD_C_LOW(0, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + ma0 = svld1_bf16(pg16_low, ptr_a0); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + + ptr_a0 += 4; + ptr_b0 += 8; + ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1_LOW(a, 0); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2_LOW(a, 0); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3_LOW(a, 0); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + } + STORE_C_LOW(0, 0); STORE_C_LOW(0, 1); + } + + ptr_b += 4 * k; + } + + if (n & 2) { + ptr_c00 = ptr_c; + ptr_c += 2 * ldc; + + ptr_a = (bfloat16_t *)A; + + for (BLASLONG i = 0; i < m / 8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a2 = ptr_a1 + 2 * k; + ptr_a3 = ptr_a2 + 2 * k; + ptr_a += 8 * k; + + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + LOAD_C(1, 0); + LOAD_C(2, 0); + LOAD_C(3, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3); + LOAD_B(0); + + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + + ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + } + + STORE_C(0, 0); + STORE_C(1, 0); + STORE_C(2, 0); + STORE_C(3, 0); + + ptr_c00 += 8; + } + + if (m & 4) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a += 4 * k; + + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + LOAD_C(1, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); + LOAD_B(0); + + MATMUL(0, 0); + MATMUL(1, 0); + + ptr_a0 += 8; ptr_a1 += 8; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + } + STORE_C(0, 0) + STORE_C(1, 0) + + ptr_c00 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * k; + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); + LOAD_B(0); + MATMUL(0, 0); + ptr_a0 += 8; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + } + STORE_C(0, 0); + ptr_c00 += 2; + } + + if (m & 1) { + ptr_a0 = ptr_a; + + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + ma0 = svld1_bf16(pg16_low, ptr_a0); + LOAD_B(0); + MATMUL(0, 0); + ptr_a0 += 4; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1_LOW(a, 0); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2_LOW(a, 0); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3_LOW(a, 0); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + } + STORE_C_LOW(0, 0); + } + + ptr_b += 2 * k; + } + + if (n & 1) { + ptr_c00 = ptr_c; + ptr_a = (bfloat16_t *) A; + + for (BLASLONG i = 0; i < m / 8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a2 = ptr_a1 + 2 * k; + ptr_a3 = ptr_a2 + 2 * k; + ptr_a += 8 * k; + + ptr_b0 = ptr_b; + + LOAD_C_EVEN(0, 0); + LOAD_C_EVEN(1, 0); + LOAD_C_EVEN(2, 0); + LOAD_C_EVEN(3, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + + ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + } + STORE_C_EVEN(0, 0) + STORE_C_EVEN(1, 0); + STORE_C_EVEN(2, 0); + STORE_C_EVEN(3, 0); + + ptr_c00 += 8; + } + + if (m & 4) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a += 4 * k; + + ptr_b0 = ptr_b; + + LOAD_C_EVEN(0, 0); + LOAD_C_EVEN(1, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + MATMUL(1, 0); + + ptr_a0 += 8; ptr_a1 += 8; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + } + STORE_C_EVEN(0, 0) + STORE_C_EVEN(1, 0) + + ptr_c00 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * k; + + ptr_b0 = ptr_b; + + LOAD_C_EVEN(0, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + + ptr_a0 += 8; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + } + STORE_C_EVEN(0, 0); + ptr_c00 += 2; + } + if (m & 1) { + ptr_a0 = ptr_a; + ptr_b0 = ptr_b; + LOAD_C_FIRST(0, 0); + for (BLASLONG p = 0; p < k / 4; p++) { + ma0 = svld1_bf16(pg16_low, ptr_a0); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + + ptr_a0 += 4; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1_LOW(a, 0); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2_LOW(a, 0); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3_LOW(a, 0); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + } + STORE_C_FIRST(0, 0); + } + } + + return 0; +} \ No newline at end of file diff --git a/kernel/arm64/sbgemm_ncopy_neoversen2.c b/kernel/arm64/sbgemm_ncopy_neoversen2.c index 977256f34..594067ebb 100644 --- a/kernel/arm64/sbgemm_ncopy_neoversen2.c +++ b/kernel/arm64/sbgemm_ncopy_neoversen2.c @@ -35,17 +35,11 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { a_offset = a; b_offset = b; - BLASLONG m4 = m & ~3; - BLASLONG n2 = n & ~1; - - BLASLONG j = 0; - for (; j < n2; j += 2) { + for (BLASLONG j = 0; j < n / 2; j++) { a_offset1 = a_offset; a_offset2 = a_offset1 + lda; a_offset += 2 * lda; - - BLASLONG i = 0; - for (; i < m4; i += 4) { + for (BLASLONG i = 0; i < m / 4; i++) { *(b_offset + 0) = *(a_offset1 + 0); *(b_offset + 1) = *(a_offset1 + 1); *(b_offset + 2) = *(a_offset1 + 2); @@ -59,57 +53,49 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { a_offset2 += 4; b_offset += 8; } - if (i < m) { + BLASLONG rest = m & 3; + if (rest == 3) { *(b_offset + 0) = *(a_offset1 + 0); - *(b_offset + 4) = *(a_offset2 + 0); - - if (i + 1 < m) { - *(b_offset + 1) = *(a_offset1 + 1); - *(b_offset + 5) = *(a_offset2 + 1); - } else { - *(b_offset + 1) = 0; - *(b_offset + 5) = 0; - } - - if (i + 2 < m) { - *(b_offset + 2) = *(a_offset1 + 2); - *(b_offset + 6) = *(a_offset2 + 2); - } else { - *(b_offset + 2) = 0; - *(b_offset + 6) = 0; - } - - *(b_offset + 3) = 0; - *(b_offset + 7) = 0; - - b_offset += 8; + *(b_offset + 1) = *(a_offset1 + 1); + *(b_offset + 2) = *(a_offset1 + 2); + *(b_offset + 3) = *(a_offset2 + 0); + *(b_offset + 4) = *(a_offset2 + 1); + *(b_offset + 5) = *(a_offset2 + 2); + b_offset += 6; + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset1 + 1); + *(b_offset + 2) = *(a_offset2 + 0); + *(b_offset + 3) = *(a_offset2 + 1); + b_offset += 4; + } else if (rest == 1) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset2 + 0); + b_offset += 2; } } - if (j < n) { - BLASLONG i = 0; - for (; i < m4; i += 4) { + if (n & 1) { + for (BLASLONG i = 0; i < m / 4; i++) { *(b_offset + 0) = *(a_offset + 0); *(b_offset + 1) = *(a_offset + 1); *(b_offset + 2) = *(a_offset + 2); *(b_offset + 3) = *(a_offset + 3); - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; - a_offset += 4; - b_offset += 4; - } - if (i < m) { - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; + b_offset += 4; + a_offset += 4; + } + BLASLONG rest = m & 3; + if (rest == 3) { + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = *(a_offset + 1); + *(b_offset + 2) = *(a_offset + 2); + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = *(a_offset + 1); + } else if (rest == 1) { *(b_offset + 0) = *(a_offset + 0); - *(b_offset + 1) = (i + 1 < m) ? *(a_offset + 1) : 0; - *(b_offset + 2) = (i + 2 < m) ? *(a_offset + 2) : 0; - *(b_offset + 3) = 0; } } + return 0; } diff --git a/kernel/arm64/sbgemm_tcopy_neoversen2.c b/kernel/arm64/sbgemm_tcopy_neoversen2.c index 7beed83cd..2f3313379 100644 --- a/kernel/arm64/sbgemm_tcopy_neoversen2.c +++ b/kernel/arm64/sbgemm_tcopy_neoversen2.c @@ -28,25 +28,21 @@ #include "common.h" + int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { IFLOAT *a_offset, *a_offset1, *a_offset2, *a_offset3, *a_offset4; IFLOAT *b_offset; a_offset = a; b_offset = b; - BLASLONG m4 = m & ~3; - BLASLONG n2 = n & ~1; - - BLASLONG j = 0; - for (; j < n2; j += 2) { + for (BLASLONG j = 0; j < n / 2; j++) { a_offset1 = a_offset; a_offset2 = a_offset1 + lda; a_offset3 = a_offset2 + lda; a_offset4 = a_offset3 + lda; a_offset += 2; - BLASLONG i = 0; - for (; i < m4; i += 4) { + for (BLASLONG i = 0; i < m / 4; i++) { *(b_offset + 0) = *(a_offset1 + 0); *(b_offset + 1) = *(a_offset2 + 0); *(b_offset + 2) = *(a_offset3 + 0); @@ -62,55 +58,50 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { a_offset3 += 4 * lda; a_offset4 += 4 * lda; } - if (i < m) { // padding 4 - *(b_offset + 0) = *(a_offset1 + 0); - *(b_offset + 4) = *(a_offset1 + 1); - - if (i + 1 < m) { + + if (m & 3) { + BLASLONG rest = m & 3; + if (rest == 3) { + *(b_offset + 0) = *(a_offset1 + 0); *(b_offset + 1) = *(a_offset2 + 0); - *(b_offset + 5) = *(a_offset2 + 1); - } else { - *(b_offset + 1) = 0; - *(b_offset + 5) = 0; - } - - if (i + 2 < m) { *(b_offset + 2) = *(a_offset3 + 0); - *(b_offset + 6) = *(a_offset3 + 1); - } else { - *(b_offset + 2) = 0; - *(b_offset + 6) = 0; + *(b_offset + 3) = *(a_offset1 + 1); + *(b_offset + 4) = *(a_offset2 + 1); + *(b_offset + 5) = *(a_offset3 + 1); + b_offset += 6; + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset2 + 0); + *(b_offset + 2) = *(a_offset1 + 1); + *(b_offset + 3) = *(a_offset2 + 1); + b_offset += 4; + } else if (rest == 1) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset1 + 1); + b_offset += 2; } - - *(b_offset + 3) = 0; - *(b_offset + 7) = 0; - b_offset += 8; } } - if (j < n) { // rest 1 - BLASLONG i = 0; - for (; i < m4; i += 4) { - *(b_offset + 0) = *(a_offset + 0); - *(b_offset + 1) = *(a_offset + 1 * lda); - *(b_offset + 2) = *(a_offset + 2 * lda); - *(b_offset + 3) = *(a_offset + 3 * lda); - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; + if (n & 1) { + for (BLASLONG i = 0; i < m / 4; i++) { + *(b_offset + 0) = *(a_offset); + *(b_offset + 1) = *(a_offset + lda); + *(b_offset + 2) = *(a_offset + lda * 2); + *(b_offset + 3) = *(a_offset + lda * 3); + b_offset += 4; a_offset += 4 * lda; } - if (i < m) { - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; - - *(b_offset + 0) = *(a_offset + 0); - *(b_offset + 1) = (i + 1 < m) ? *(a_offset + 1 * lda) : 0; - *(b_offset + 2) = (i + 2 < m) ? *(a_offset + 2 * lda) : 0; - *(b_offset + 3) = 0; + BLASLONG rest = m & 3; + if (rest == 3) { + *(b_offset + 0) = *(a_offset); + *(b_offset + 1) = *(a_offset + lda); + *(b_offset + 2) = *(a_offset + lda * 2); + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset); + *(b_offset + 1) = *(a_offset + lda); + } else if (rest == 1) { + *(b_offset + 0) = *(a_offset); } } diff --git a/param.h b/param.h index 5fa829986..62941dbb4 100644 --- a/param.h +++ b/param.h @@ -3330,8 +3330,10 @@ is a big desktop or server with abundant cache rather than a phone or embedded d #elif defined(NEOVERSEN2) -#define SBGEMM_DEFAULT_UNROOL_M 8 -#define SBGEMM_DEFAULT_UNROOL_N 4 +#undef SBGEMM_DEFAULT_UNROLL_M +#undef SBGEMM_DEFAULT_UNROLL_N +#define SBGEMM_DEFAULT_UNROLL_M 8 +#define SBGEMM_DEFAULT_UNROLL_N 4 #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4 From ec0d5c7a2a23e01d2a04754c1f21e07612ed8c6e Mon Sep 17 00:00:00 2001 From: Honglin Zhu Date: Wed, 29 Jun 2022 10:08:06 +0800 Subject: [PATCH 5/5] Add gfortran parameters --- Makefile.arm64 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.arm64 b/Makefile.arm64 index c88728e8d..7ffe93a0b 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -123,7 +123,7 @@ ifeq ($(GCCVERSIONGTEQ9), 1) ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ4) $(GCCVERSIONGTEQ10))) CCOMMON_OPT += -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2 ifneq ($(F_COMPILER), NAG) -FCOMMON_OPT += -march=armv8.5-a -mtune=neoverse-n2 +FCOMMON_OPT += -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2 endif else CCOMMON_OPT += -march=armv8.5-a -mtune=native