From 123e0dfb62b21f2468c19be6c8415331faa56fd5 Mon Sep 17 00:00:00 2001 From: Honglin Zhu Date: Wed, 22 Jun 2022 23:00:40 +0800 Subject: [PATCH] Neoverse N2 sbgemm: 1. Modify the algorithm to resolve multithreading failures 2. No memory allocation in sbgemm kernel 3. Optimize when alpha == 1.0f --- kernel/arm64/sbgemm_kernel_8x4_neoversen2.c | 298 +------- .../arm64/sbgemm_kernel_8x4_neoversen2_impl.c | 665 ++++++++++++++++++ kernel/arm64/sbgemm_ncopy_neoversen2.c | 84 +-- kernel/arm64/sbgemm_tcopy_neoversen2.c | 87 +-- param.h | 6 +- 5 files changed, 753 insertions(+), 387 deletions(-) create mode 100644 kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c diff --git a/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c index c97ad81a2..66e7dd38a 100644 --- a/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c +++ b/kernel/arm64/sbgemm_kernel_8x4_neoversen2.c @@ -30,294 +30,16 @@ #include "common.h" -int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, FLOAT *C, - BLASLONG ldc) { - // printf("m: %d, n: %d, k: %d\n", m, n, k); - BLASLONG padk = (k + 3) & ~3; - BLASLONG padm = (m + 1) & ~1; - BLASLONG padn = (n + 1) & ~1; - FLOAT *RC = (FLOAT *)calloc(padm * padn, sizeof(float)); - BLASLONG nldc = padm; - - IFLOAT *ptr_a = A; - IFLOAT *ptr_b = B; - FLOAT *ptr_c = RC; - - IFLOAT *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3; - IFLOAT *ptr_b0, *ptr_b1; - FLOAT *ptr_c00, *ptr_c10, *ptr_c20, *ptr_c30, *ptr_c01, *ptr_c11, *ptr_c21, *ptr_c31; - - svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1; - svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31; - svbool_t pg16 = svptrue_b16(); - svbool_t pg32 = svptrue_b32(); - svfloat32_t svalpha = svdup_f32(alpha); - - uint32_t off_c[] = {0, (uint32_t)nldc, 1, (uint32_t)nldc + 1}; // 00 01 10 11 - svuint32_t off_vc = svld1_u32(pg32, off_c); - - for (BLASLONG j = 0; j < padn / 4; j++) { - ptr_c00 = ptr_c; - ptr_c10 = ptr_c00 + 2; - ptr_c20 = ptr_c10 + 2; - ptr_c30 = ptr_c20 + 2; - ptr_c01 = ptr_c + 2 * nldc; - ptr_c11 = ptr_c01 + 2; - ptr_c21 = ptr_c11 + 2; - ptr_c31 = ptr_c21 + 2; - ptr_c += 4 * nldc; - - ptr_a = A; - - for (BLASLONG i = 0; i < padm / 8; i++) { - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a2 = ptr_a1 + 2 * padk; - ptr_a3 = ptr_a2 + 2 * padk; - ptr_a += 8 * padk; - - ptr_b0 = ptr_b; - ptr_b1 = ptr_b0 + 2 * padk; - - mc00 = svdup_f32(0); - mc01 = svdup_f32(0); - mc10 = svdup_f32(0); - mc11 = svdup_f32(0); - mc20 = svdup_f32(0); - mc21 = svdup_f32(0); - mc30 = svdup_f32(0); - mc31 = svdup_f32(0); - - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - ma2 = svld1_bf16(pg16, (bfloat16_t *)ptr_a2); - ma3 = svld1_bf16(pg16, (bfloat16_t *)ptr_a3); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); - - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - mc20 = svbfmmla(mc20, ma2, mb0); - mc30 = svbfmmla(mc30, ma3, mb0); - mc01 = svbfmmla(mc01, ma0, mb1); - mc11 = svbfmmla(mc11, ma1, mb1); - mc21 = svbfmmla(mc21, ma2, mb1); - mc31 = svbfmmla(mc31, ma3, mb1); - - ptr_a0 += 8; - ptr_a1 += 8; - ptr_a2 += 8; - ptr_a3 += 8; - ptr_b0 += 8; - ptr_b1 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); - svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); - svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); - svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); - svst1_scatter_index(pg32, ptr_c21, off_vc, mc21); - svst1_scatter_index(pg32, ptr_c31, off_vc, mc31); - - ptr_c00 += 8; - ptr_c10 += 8; - ptr_c20 += 8; - ptr_c30 += 8; - ptr_c01 += 8; - ptr_c11 += 8; - ptr_c21 += 8; - ptr_c31 += 8; - } - - if (padm & 4) { - // rest 4 or 6 - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a += 4 * padk; - - ptr_b0 = ptr_b; - ptr_b1 = ptr_b0 + 2 * padk; - - mc00 = svdup_f32(0); - mc01 = svdup_f32(0); - mc10 = svdup_f32(0); - mc11 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); - - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - mc01 = svbfmmla(mc01, ma0, mb1); - mc11 = svbfmmla(mc11, ma1, mb1); - - ptr_a0 += 8; - ptr_a1 += 8; - ptr_b0 += 8; - ptr_b1 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); - svst1_scatter_index(pg32, ptr_c11, off_vc, mc11); - - ptr_c00 += 4; - ptr_c10 += 4; - ptr_c01 += 4; - ptr_c11 += 4; - } - - if (padm & 2) { - // rest 2 - ptr_a0 = ptr_a; - - ptr_b0 = ptr_b; - ptr_b1 = ptr_b0 + 2 * padk; - - mc00 = svdup_f32(0); - mc01 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mb1 = svld1_bf16(pg16, (bfloat16_t *)ptr_b1); - mc00 = svbfmmla(mc00, ma0, mb0); - mc01 = svbfmmla(mc01, ma0, mb1); - ptr_a0 += 8; - ptr_b0 += 8; - ptr_b1 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c01, off_vc, mc01); - ptr_c00 += 2; - ptr_c01 += 2; - } - - ptr_b += 4 * padk; - } - - if (padn & 2) { - // rest 2 - ptr_c00 = ptr_c; - ptr_c10 = ptr_c00 + 2; - ptr_c20 = ptr_c10 + 2; - ptr_c30 = ptr_c20 + 2; - ptr_c += 2 * nldc; - - ptr_a = A; - - for (BLASLONG i = 0; i < padm / 8; i++) { - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a2 = ptr_a1 + 2 * padk; - ptr_a3 = ptr_a2 + 2 * padk; - ptr_a += 8 * padk; - - ptr_b0 = ptr_b; - - mc00 = svdup_f32(0); - mc10 = svdup_f32(0); - mc20 = svdup_f32(0); - mc30 = svdup_f32(0); - - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - ma2 = svld1_bf16(pg16, (bfloat16_t *)ptr_a2); - ma3 = svld1_bf16(pg16, (bfloat16_t *)ptr_a3); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - mc20 = svbfmmla(mc20, ma2, mb0); - mc30 = svbfmmla(mc30, ma3, mb0); - ptr_a0 += 8; - ptr_a1 += 8; - ptr_a2 += 8; - ptr_a3 += 8; - ptr_b0 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - svst1_scatter_index(pg32, ptr_c20, off_vc, mc20); - svst1_scatter_index(pg32, ptr_c30, off_vc, mc30); - ptr_c00 += 8; - ptr_c10 += 8; - ptr_c20 += 8; - ptr_c30 += 8; - } - - if (padm & 4) { - ptr_a0 = ptr_a; - ptr_a1 = ptr_a0 + 2 * padk; - ptr_a += 4 * padk; - - ptr_b0 = ptr_b; - - mc00 = svdup_f32(0); - mc10 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - ma1 = svld1_bf16(pg16, (bfloat16_t *)ptr_a1); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mc00 = svbfmmla(mc00, ma0, mb0); - mc10 = svbfmmla(mc10, ma1, mb0); - ptr_a0 += 8; - ptr_a1 += 8; - ptr_b0 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - svst1_scatter_index(pg32, ptr_c10, off_vc, mc10); - ptr_c00 += 4; - ptr_c10 += 4; - } - - if (padm & 2) { - ptr_a0 = ptr_a; - ptr_a += 2 * padk; - ptr_b0 = ptr_b; - mc00 = svdup_f32(0); - for (BLASLONG p = 0; p < padk / 4; p++) { - ma0 = svld1_bf16(pg16, (bfloat16_t *)ptr_a0); - mb0 = svld1_bf16(pg16, (bfloat16_t *)ptr_b0); - mc00 = svbfmmla(mc00, ma0, mb0); - ptr_a0 += 8; - ptr_b0 += 8; - } - svst1_scatter_index(pg32, ptr_c00, off_vc, mc00); - ptr_c00 += 2; - } - - ptr_b += 2 * padk; - } - - FLOAT *org_c = C; - FLOAT *raw_c = RC; - FLOAT *org_c0, *raw_c0; - svfloat32_t org_vc0, raw_vc0; - for (BLASLONG j = 0; j < n; j++) { - org_c0 = org_c; - raw_c0 = raw_c; - org_c += ldc; - raw_c += nldc; - BLASLONG i; - for (i = 0; i < m / 4; i++) { - org_vc0 = svld1_f32(pg32, org_c0); - raw_vc0 = svld1_f32(pg32, raw_c0); - org_vc0 = svmad_z(pg32, svalpha, raw_vc0, - org_vc0); // alpha * raw + org, raw -> a * b - svst1_f32(pg32, org_c0, org_vc0); - org_c0 += 4; - raw_c0 += 4; - } - for (i = 0; i < (m & 3); i++) { - *org_c0 += alpha * (*raw_c0); - org_c0++; - raw_c0++; - } - } +#define ALPHA_ONE +#include "sbgemm_kernel_8x4_neoversen2_impl.c" +#undef ALPHA_ONE +#include "sbgemm_kernel_8x4_neoversen2_impl.c" +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, + FLOAT *C, BLASLONG ldc) { + if (alpha == 1.0f) + return sbgemm_kernel_neoversen2_alpha_one(m, n, k, alpha, A, B, C, ldc); + else + return sbgemm_kernel_neoversen2_alpha(m, n, k, alpha, A, B, C, ldc); return 0; } diff --git a/kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c b/kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c new file mode 100644 index 000000000..7d53b1aa0 --- /dev/null +++ b/kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c @@ -0,0 +1,665 @@ +/*************************************************************************** + * Copyright (c) 2022, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include + +#include "common.h" + +#ifdef ALPHA_ONE +#define LOAD_C(M, N) \ + mc##M##N = svld1_gather_index(pg32, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_LOW(M, N) \ + mc##M##N = svld1_gather_index(pg32_low, ptr_c0##N + 2 * M, off_vc); + +#define LOAD_C_EVEN(M, N) \ + mc##M##N = svld1_gather_index(pg32_even, ptr_c0##N + 2 * M, off_vc); + +#define LOAD_C_FIRST(M, N) \ + mc##M##N = svld1_gather_index(pg32_first, ptr_c0##N + 2 * M, off_vc); + +#define STORE_C(M, N) \ + svst1_scatter_index(pg32, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_LOW(M, N) \ + svst1_scatter_index(pg32_low, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_EVEN(M, N) \ + svst1_scatter_index(pg32_even, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_FIRST(M, N) \ + svst1_scatter_index(pg32_first, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#else +#define LOAD_C(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_LOW(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32_low, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_EVEN(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32_even, ptr_c0##N + 2 * M , off_vc); + +#define LOAD_C_FIRST(M, N) \ + mc##M##N = svdup_f32(0); \ + oc##M##N = svld1_gather_index(pg32_first, ptr_c0##N + 2 * M , off_vc); + +#define STORE_C(M, N) \ + mc##M##N = svmad_z(pg32, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_LOW(M, N) \ + mc##M##N = svmad_z(pg32_low, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32_low, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_EVEN(M, N) \ + mc##M##N = svmad_z(pg32_even, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32_even, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#define STORE_C_FIRST(M, N) \ + mc##M##N = svmad_z(pg32_first, svalpha, mc##M##N, oc##M##N); \ + svst1_scatter_index(pg32_first, ptr_c0##N + 2 * M, off_vc, mc##M##N); + +#endif + +#define LOAD_A(M) ma##M = svld1_bf16(pg16, ptr_a##M); + +#define LOAD_B(N) mb##N = svld1_bf16(pg16, ptr_b##N); + +#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N); + +#define LOAD_KREST_1(NAME, M) \ + m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), zero, zero, zero, \ + *(ptr_##NAME##M + 1), zero, zero, zero); + +#define LOAD_KREST_1_LOW(NAME, M) \ + m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), zero, zero, zero, zero, zero, \ + zero, zero); + +#define LOAD_KREST_2(NAME, M) \ + m##NAME##M = \ + svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), zero, zero, \ + *(ptr_##NAME##M + 2), *(ptr_##NAME##M + 3), zero, zero); + +#define LOAD_KREST_2_LOW(NAME, M) \ + m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), zero, \ + zero, zero, zero, zero, zero); + +#define LOAD_KREST_3(NAME, M) \ + m##NAME##M = \ + svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), \ + *(ptr_##NAME##M + 2), zero, *(ptr_##NAME##M + 3), \ + *(ptr_##NAME##M + 4), *(ptr_##NAME##M + 5), zero); + +#define LOAD_KREST_3_LOW(NAME, M) \ + m##NAME##M = \ + svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), \ + *(ptr_##NAME##M + 2), zero, zero, zero, zero, zero); + + +#ifdef ALPHA_ONE +int sbgemm_kernel_neoversen2_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc) +#else +int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc) +#endif +{ + bfloat16_t *ptr_a = (bfloat16_t *)A; + bfloat16_t *ptr_b = (bfloat16_t *)B; + FLOAT *ptr_c = C; + + bfloat16_t *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3; + bfloat16_t *ptr_b0, *ptr_b1; + FLOAT *ptr_c00, *ptr_c01; + + svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1; + svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31; +#ifndef ALPHA_ONE + svfloat32_t oc00, oc01, oc10, oc11, oc20, oc21, oc30, oc31; +#endif + svbool_t pg16 = svptrue_b16(); + svbool_t pg16_low = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0); + svbool_t pg32 = svptrue_b32(); + svbool_t pg32_low = svdupq_b32(1, 1, 0, 0); + svbool_t pg32_even = svdupq_b32(1, 0, 1, 0); + svbool_t pg32_first = svdupq_b32(1, 0, 0, 0); + svfloat32_t svalpha = svdup_f32(alpha); + bfloat16 tmp = 0; + bfloat16_t zero = *((bfloat16_t *)&tmp); + BLASLONG krest = k & 3; + + // 00 01 10 11 + svuint32_t off_vc = svdupq_u32(0, (uint32_t)ldc, 1, (uint32_t)ldc + 1); + + for (BLASLONG j = 0; j < n / 4; j++) { + ptr_c00 = ptr_c; + ptr_c01 = ptr_c + 2 * ldc; + ptr_c += 4 * ldc; + + ptr_a = (bfloat16_t *)A; + + for (BLASLONG i = 0; i < m / 8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a2 = ptr_a1 + 2 * k; + ptr_a3 = ptr_a2 + 2 * k; + ptr_a += 8 * k; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C(0, 0); LOAD_C(0, 1); + LOAD_C(1, 0); LOAD_C(1, 1); + LOAD_C(2, 0); LOAD_C(2, 1); + LOAD_C(3, 0); LOAD_C(3, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + MATMUL(2, 0); MATMUL(2, 1); + MATMUL(3, 0); MATMUL(3, 1); + + ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8; + ptr_b0 += 8; ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + MATMUL(2, 0); MATMUL(2, 1); + MATMUL(3, 0); MATMUL(3, 1); + } + + STORE_C(0, 0); STORE_C(0, 1); + STORE_C(1, 0); STORE_C(1, 1); + STORE_C(2, 0); STORE_C(2, 1); + STORE_C(3, 0); STORE_C(3, 1); + + ptr_c00 += 8; ptr_c01 += 8; + } + + if (m & 4) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a += 4 * k; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C(0, 0); LOAD_C(0, 1); + LOAD_C(1, 0); LOAD_C(1, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + + ptr_a0 += 8; ptr_a1 += 8; + ptr_b0 += 8; ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + MATMUL(1, 0); MATMUL(1, 1); + } + + STORE_C(0, 0); STORE_C(0, 1); + STORE_C(1, 0); STORE_C(1, 1); + + ptr_c00 += 4; ptr_c01 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * k; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C(0, 0); LOAD_C(0, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + + ptr_a0 += 8; + ptr_b0 += 8; ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + } + STORE_C(0, 0); STORE_C(0, 1); + ptr_c00 += 2; ptr_c01 += 2; + } + + if (m & 1) { + ptr_a0 = ptr_a; + + ptr_b0 = ptr_b; + ptr_b1 = ptr_b0 + 2 * k; + + LOAD_C_LOW(0, 0); LOAD_C_LOW(0, 1); + + for (BLASLONG p = 0; p < k / 4; p++) { + ma0 = svld1_bf16(pg16_low, ptr_a0); + LOAD_B(0); LOAD_B(1); + + MATMUL(0, 0); MATMUL(0, 1); + + ptr_a0 += 4; + ptr_b0 += 8; + ptr_b1 += 8; + } + + if (krest) { + if (krest == 1) { + LOAD_KREST_1_LOW(a, 0); + LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1); + } else if (krest == 2) { + LOAD_KREST_2_LOW(a, 0); + LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1); + } else if (krest == 3) { + LOAD_KREST_3_LOW(a, 0); + LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1); + } + MATMUL(0, 0); MATMUL(0, 1); + } + STORE_C_LOW(0, 0); STORE_C_LOW(0, 1); + } + + ptr_b += 4 * k; + } + + if (n & 2) { + ptr_c00 = ptr_c; + ptr_c += 2 * ldc; + + ptr_a = (bfloat16_t *)A; + + for (BLASLONG i = 0; i < m / 8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a2 = ptr_a1 + 2 * k; + ptr_a3 = ptr_a2 + 2 * k; + ptr_a += 8 * k; + + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + LOAD_C(1, 0); + LOAD_C(2, 0); + LOAD_C(3, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3); + LOAD_B(0); + + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + + ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + } + + STORE_C(0, 0); + STORE_C(1, 0); + STORE_C(2, 0); + STORE_C(3, 0); + + ptr_c00 += 8; + } + + if (m & 4) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a += 4 * k; + + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + LOAD_C(1, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); + LOAD_B(0); + + MATMUL(0, 0); + MATMUL(1, 0); + + ptr_a0 += 8; ptr_a1 += 8; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + } + STORE_C(0, 0) + STORE_C(1, 0) + + ptr_c00 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * k; + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); + LOAD_B(0); + MATMUL(0, 0); + ptr_a0 += 8; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + } + STORE_C(0, 0); + ptr_c00 += 2; + } + + if (m & 1) { + ptr_a0 = ptr_a; + + ptr_b0 = ptr_b; + + LOAD_C(0, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + ma0 = svld1_bf16(pg16_low, ptr_a0); + LOAD_B(0); + MATMUL(0, 0); + ptr_a0 += 4; + ptr_b0 += 8; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1_LOW(a, 0); + LOAD_KREST_1(b, 0); + } else if (krest == 2) { + LOAD_KREST_2_LOW(a, 0); + LOAD_KREST_2(b, 0); + } else if (krest == 3) { + LOAD_KREST_3_LOW(a, 0); + LOAD_KREST_3(b, 0); + } + MATMUL(0, 0); + } + STORE_C_LOW(0, 0); + } + + ptr_b += 2 * k; + } + + if (n & 1) { + ptr_c00 = ptr_c; + ptr_a = (bfloat16_t *) A; + + for (BLASLONG i = 0; i < m / 8; i++) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a2 = ptr_a1 + 2 * k; + ptr_a3 = ptr_a2 + 2 * k; + ptr_a += 8 * k; + + ptr_b0 = ptr_b; + + LOAD_C_EVEN(0, 0); + LOAD_C_EVEN(1, 0); + LOAD_C_EVEN(2, 0); + LOAD_C_EVEN(3, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + + ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + MATMUL(2, 0); + MATMUL(3, 0); + } + STORE_C_EVEN(0, 0) + STORE_C_EVEN(1, 0); + STORE_C_EVEN(2, 0); + STORE_C_EVEN(3, 0); + + ptr_c00 += 8; + } + + if (m & 4) { + ptr_a0 = ptr_a; + ptr_a1 = ptr_a0 + 2 * k; + ptr_a += 4 * k; + + ptr_b0 = ptr_b; + + LOAD_C_EVEN(0, 0); + LOAD_C_EVEN(1, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); LOAD_A(1); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + MATMUL(1, 0); + + ptr_a0 += 8; ptr_a1 += 8; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + MATMUL(1, 0); + } + STORE_C_EVEN(0, 0) + STORE_C_EVEN(1, 0) + + ptr_c00 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * k; + + ptr_b0 = ptr_b; + + LOAD_C_EVEN(0, 0); + + for (BLASLONG p = 0; p < k / 4; p++) { + LOAD_A(0); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + + ptr_a0 += 8; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1(a, 0); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2(a, 0); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3(a, 0); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + } + STORE_C_EVEN(0, 0); + ptr_c00 += 2; + } + if (m & 1) { + ptr_a0 = ptr_a; + ptr_b0 = ptr_b; + LOAD_C_FIRST(0, 0); + for (BLASLONG p = 0; p < k / 4; p++) { + ma0 = svld1_bf16(pg16_low, ptr_a0); + mb0 = svld1_bf16(pg16_low, ptr_b0); + + MATMUL(0, 0); + + ptr_a0 += 4; + ptr_b0 += 4; + } + if (krest) { + if (krest == 1) { + LOAD_KREST_1_LOW(a, 0); + LOAD_KREST_1_LOW(b, 0); + } else if (krest == 2) { + LOAD_KREST_2_LOW(a, 0); + LOAD_KREST_2_LOW(b, 0); + } else if (krest == 3) { + LOAD_KREST_3_LOW(a, 0); + LOAD_KREST_3_LOW(b, 0); + } + MATMUL(0, 0); + } + STORE_C_FIRST(0, 0); + } + } + + return 0; +} \ No newline at end of file diff --git a/kernel/arm64/sbgemm_ncopy_neoversen2.c b/kernel/arm64/sbgemm_ncopy_neoversen2.c index 977256f34..594067ebb 100644 --- a/kernel/arm64/sbgemm_ncopy_neoversen2.c +++ b/kernel/arm64/sbgemm_ncopy_neoversen2.c @@ -35,17 +35,11 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { a_offset = a; b_offset = b; - BLASLONG m4 = m & ~3; - BLASLONG n2 = n & ~1; - - BLASLONG j = 0; - for (; j < n2; j += 2) { + for (BLASLONG j = 0; j < n / 2; j++) { a_offset1 = a_offset; a_offset2 = a_offset1 + lda; a_offset += 2 * lda; - - BLASLONG i = 0; - for (; i < m4; i += 4) { + for (BLASLONG i = 0; i < m / 4; i++) { *(b_offset + 0) = *(a_offset1 + 0); *(b_offset + 1) = *(a_offset1 + 1); *(b_offset + 2) = *(a_offset1 + 2); @@ -59,57 +53,49 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { a_offset2 += 4; b_offset += 8; } - if (i < m) { + BLASLONG rest = m & 3; + if (rest == 3) { *(b_offset + 0) = *(a_offset1 + 0); - *(b_offset + 4) = *(a_offset2 + 0); - - if (i + 1 < m) { - *(b_offset + 1) = *(a_offset1 + 1); - *(b_offset + 5) = *(a_offset2 + 1); - } else { - *(b_offset + 1) = 0; - *(b_offset + 5) = 0; - } - - if (i + 2 < m) { - *(b_offset + 2) = *(a_offset1 + 2); - *(b_offset + 6) = *(a_offset2 + 2); - } else { - *(b_offset + 2) = 0; - *(b_offset + 6) = 0; - } - - *(b_offset + 3) = 0; - *(b_offset + 7) = 0; - - b_offset += 8; + *(b_offset + 1) = *(a_offset1 + 1); + *(b_offset + 2) = *(a_offset1 + 2); + *(b_offset + 3) = *(a_offset2 + 0); + *(b_offset + 4) = *(a_offset2 + 1); + *(b_offset + 5) = *(a_offset2 + 2); + b_offset += 6; + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset1 + 1); + *(b_offset + 2) = *(a_offset2 + 0); + *(b_offset + 3) = *(a_offset2 + 1); + b_offset += 4; + } else if (rest == 1) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset2 + 0); + b_offset += 2; } } - if (j < n) { - BLASLONG i = 0; - for (; i < m4; i += 4) { + if (n & 1) { + for (BLASLONG i = 0; i < m / 4; i++) { *(b_offset + 0) = *(a_offset + 0); *(b_offset + 1) = *(a_offset + 1); *(b_offset + 2) = *(a_offset + 2); *(b_offset + 3) = *(a_offset + 3); - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; - a_offset += 4; - b_offset += 4; - } - if (i < m) { - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; + b_offset += 4; + a_offset += 4; + } + BLASLONG rest = m & 3; + if (rest == 3) { + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = *(a_offset + 1); + *(b_offset + 2) = *(a_offset + 2); + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset + 0); + *(b_offset + 1) = *(a_offset + 1); + } else if (rest == 1) { *(b_offset + 0) = *(a_offset + 0); - *(b_offset + 1) = (i + 1 < m) ? *(a_offset + 1) : 0; - *(b_offset + 2) = (i + 2 < m) ? *(a_offset + 2) : 0; - *(b_offset + 3) = 0; } } + return 0; } diff --git a/kernel/arm64/sbgemm_tcopy_neoversen2.c b/kernel/arm64/sbgemm_tcopy_neoversen2.c index 7beed83cd..2f3313379 100644 --- a/kernel/arm64/sbgemm_tcopy_neoversen2.c +++ b/kernel/arm64/sbgemm_tcopy_neoversen2.c @@ -28,25 +28,21 @@ #include "common.h" + int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { IFLOAT *a_offset, *a_offset1, *a_offset2, *a_offset3, *a_offset4; IFLOAT *b_offset; a_offset = a; b_offset = b; - BLASLONG m4 = m & ~3; - BLASLONG n2 = n & ~1; - - BLASLONG j = 0; - for (; j < n2; j += 2) { + for (BLASLONG j = 0; j < n / 2; j++) { a_offset1 = a_offset; a_offset2 = a_offset1 + lda; a_offset3 = a_offset2 + lda; a_offset4 = a_offset3 + lda; a_offset += 2; - BLASLONG i = 0; - for (; i < m4; i += 4) { + for (BLASLONG i = 0; i < m / 4; i++) { *(b_offset + 0) = *(a_offset1 + 0); *(b_offset + 1) = *(a_offset2 + 0); *(b_offset + 2) = *(a_offset3 + 0); @@ -62,55 +58,50 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { a_offset3 += 4 * lda; a_offset4 += 4 * lda; } - if (i < m) { // padding 4 - *(b_offset + 0) = *(a_offset1 + 0); - *(b_offset + 4) = *(a_offset1 + 1); - - if (i + 1 < m) { + + if (m & 3) { + BLASLONG rest = m & 3; + if (rest == 3) { + *(b_offset + 0) = *(a_offset1 + 0); *(b_offset + 1) = *(a_offset2 + 0); - *(b_offset + 5) = *(a_offset2 + 1); - } else { - *(b_offset + 1) = 0; - *(b_offset + 5) = 0; - } - - if (i + 2 < m) { *(b_offset + 2) = *(a_offset3 + 0); - *(b_offset + 6) = *(a_offset3 + 1); - } else { - *(b_offset + 2) = 0; - *(b_offset + 6) = 0; + *(b_offset + 3) = *(a_offset1 + 1); + *(b_offset + 4) = *(a_offset2 + 1); + *(b_offset + 5) = *(a_offset3 + 1); + b_offset += 6; + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset2 + 0); + *(b_offset + 2) = *(a_offset1 + 1); + *(b_offset + 3) = *(a_offset2 + 1); + b_offset += 4; + } else if (rest == 1) { + *(b_offset + 0) = *(a_offset1 + 0); + *(b_offset + 1) = *(a_offset1 + 1); + b_offset += 2; } - - *(b_offset + 3) = 0; - *(b_offset + 7) = 0; - b_offset += 8; } } - if (j < n) { // rest 1 - BLASLONG i = 0; - for (; i < m4; i += 4) { - *(b_offset + 0) = *(a_offset + 0); - *(b_offset + 1) = *(a_offset + 1 * lda); - *(b_offset + 2) = *(a_offset + 2 * lda); - *(b_offset + 3) = *(a_offset + 3 * lda); - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; + if (n & 1) { + for (BLASLONG i = 0; i < m / 4; i++) { + *(b_offset + 0) = *(a_offset); + *(b_offset + 1) = *(a_offset + lda); + *(b_offset + 2) = *(a_offset + lda * 2); + *(b_offset + 3) = *(a_offset + lda * 3); + b_offset += 4; a_offset += 4 * lda; } - if (i < m) { - *(b_offset + 4) = 0; - *(b_offset + 5) = 0; - *(b_offset + 6) = 0; - *(b_offset + 7) = 0; - - *(b_offset + 0) = *(a_offset + 0); - *(b_offset + 1) = (i + 1 < m) ? *(a_offset + 1 * lda) : 0; - *(b_offset + 2) = (i + 2 < m) ? *(a_offset + 2 * lda) : 0; - *(b_offset + 3) = 0; + BLASLONG rest = m & 3; + if (rest == 3) { + *(b_offset + 0) = *(a_offset); + *(b_offset + 1) = *(a_offset + lda); + *(b_offset + 2) = *(a_offset + lda * 2); + } else if (rest == 2) { + *(b_offset + 0) = *(a_offset); + *(b_offset + 1) = *(a_offset + lda); + } else if (rest == 1) { + *(b_offset + 0) = *(a_offset); } } diff --git a/param.h b/param.h index 5fa829986..62941dbb4 100644 --- a/param.h +++ b/param.h @@ -3330,8 +3330,10 @@ is a big desktop or server with abundant cache rather than a phone or embedded d #elif defined(NEOVERSEN2) -#define SBGEMM_DEFAULT_UNROOL_M 8 -#define SBGEMM_DEFAULT_UNROOL_N 4 +#undef SBGEMM_DEFAULT_UNROLL_M +#undef SBGEMM_DEFAULT_UNROLL_N +#define SBGEMM_DEFAULT_UNROLL_M 8 +#define SBGEMM_DEFAULT_UNROLL_N 4 #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4