From 7a6fa699f2aad57a5b4772fc97b2fc60abc8526e Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Tue, 20 Feb 2024 10:50:43 +0000 Subject: [PATCH 1/7] Small GEMM for AArch64 This is a fairly conservative addition of small matrix kernels using SVE. --- Makefile.system | 2 + kernel/arm64/KERNEL.ARMV8SVE | 20 + kernel/arm64/dgemm_small_kernel_nn_sve.c | 657 ++++++++++ kernel/arm64/dgemm_small_kernel_nt_sve.c | 414 ++++++ kernel/arm64/dgemm_small_kernel_tn_sve.c | 709 +++++++++++ kernel/arm64/dgemm_small_kernel_tt_sve.c | 482 +++++++ kernel/arm64/gemm_small_kernel_permit_sve.c | 47 + kernel/arm64/sgemm_small_kernel_nn_sve.c | 1046 ++++++++++++++++ kernel/arm64/sgemm_small_kernel_nt_sve.c | 647 ++++++++++ kernel/arm64/sgemm_small_kernel_tn_sve.c | 1247 +++++++++++++++++++ kernel/arm64/sgemm_small_kernel_tt_sve.c | 574 +++++++++ 11 files changed, 5845 insertions(+) create mode 100644 kernel/arm64/dgemm_small_kernel_nn_sve.c create mode 100644 kernel/arm64/dgemm_small_kernel_nt_sve.c create mode 100644 kernel/arm64/dgemm_small_kernel_tn_sve.c create mode 100644 kernel/arm64/dgemm_small_kernel_tt_sve.c create mode 100644 kernel/arm64/gemm_small_kernel_permit_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_nn_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_nt_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_tn_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_tt_sve.c diff --git a/Makefile.system b/Makefile.system index aadf3459a..e95d1529a 100644 --- a/Makefile.system +++ b/Makefile.system @@ -268,6 +268,8 @@ SMALL_MATRIX_OPT = 1 else ifeq ($(ARCH), power) SMALL_MATRIX_OPT = 1 BUILD_BFLOAT16 = 1 +else ifeq ($(ARCH), arm64) +SMALL_MATRIX_OPT = 1 endif ifeq ($(SMALL_MATRIX_OPT), 1) CCOMMON_OPT += -DSMALL_MATRIX_OPT diff --git a/kernel/arm64/KERNEL.ARMV8SVE b/kernel/arm64/KERNEL.ARMV8SVE index eeb4844bf..bfadf5cba 100644 --- a/kernel/arm64/KERNEL.ARMV8SVE +++ b/kernel/arm64/KERNEL.ARMV8SVE @@ -131,6 +131,16 @@ SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) +SGEMM_SMALL_M_PERMIT = gemm_small_kernel_permit_sve.c +SGEMM_SMALL_K_NT = sgemm_small_kernel_nt_sve.c +SGEMM_SMALL_K_B0_NT = sgemm_small_kernel_nt_sve.c +SGEMM_SMALL_K_NN = sgemm_small_kernel_nn_sve.c +SGEMM_SMALL_K_B0_NN = sgemm_small_kernel_nn_sve.c +SGEMM_SMALL_K_TT = sgemm_small_kernel_tt_sve.c +SGEMM_SMALL_K_B0_TT = sgemm_small_kernel_tt_sve.c +SGEMM_SMALL_K_TN = sgemm_small_kernel_tn_sve.c +SGEMM_SMALL_K_B0_TN = sgemm_small_kernel_tn_sve.c + STRMMUNCOPY_M = trmm_uncopy_sve_v1.c STRMMLNCOPY_M = trmm_lncopy_sve_v1.c STRMMUTCOPY_M = trmm_utcopy_sve_v1.c @@ -152,6 +162,16 @@ DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) +DGEMM_SMALL_M_PERMIT = gemm_small_kernel_permit_sve.c +DGEMM_SMALL_K_NT = dgemm_small_kernel_nt_sve.c +DGEMM_SMALL_K_B0_NT = dgemm_small_kernel_nt_sve.c +DGEMM_SMALL_K_NN = dgemm_small_kernel_nn_sve.c +DGEMM_SMALL_K_B0_NN = dgemm_small_kernel_nn_sve.c +DGEMM_SMALL_K_TT = dgemm_small_kernel_tt_sve.c +DGEMM_SMALL_K_B0_TT = dgemm_small_kernel_tt_sve.c +DGEMM_SMALL_K_TN = dgemm_small_kernel_tn_sve.c +DGEMM_SMALL_K_B0_TN = dgemm_small_kernel_tn_sve.c + DTRMMUNCOPY_M = trmm_uncopy_sve_v1.c DTRMMLNCOPY_M = trmm_lncopy_sve_v1.c DTRMMUTCOPY_M = trmm_utcopy_sve_v1.c diff --git a/kernel/arm64/dgemm_small_kernel_nn_sve.c b/kernel/arm64/dgemm_small_kernel_nn_sve.c new file mode 100644 index 000000000..8baef8277 --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_nn_sve.c @@ -0,0 +1,657 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K2(n, offset_k) \ + float64x2_t b##k##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B2_K2(n0, n1, offset_k0, offset_k1) \ + float64x2_t b##n0##_k##offset_k0 = \ + vzip1q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float64x2_t b##n0##_k##offset_k1 = \ + vzip2q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); + +#define SCALE_B2_K2(n0, offset_k0, offset_k1) \ + svfloat64_t b##s##n0##_k##offset_k0 = svdup_neonq_f64(b##n0##_k##offset_k0); \ + svfloat64_t b##s##n0##_k##offset_k1 = svdup_neonq_f64(b##n0##_k##offset_k1); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define VECTOR_UNPACK_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B2(n, offset_k) \ + vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1); +#else +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define PACK_B(n, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define VECTOR_PACK_B(n, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(n* v_size, offset_k), b##s##n##_k##offset_k); +#define QUADWORD_PACK_B(n, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define UNPACK_VECTOR_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(n * v_size, offset_k)); +#define UNPACK_BROADCAST_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(PACK_ELEMENT_K(n, offset_k)); +#define UNPACK_QUADWORD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svbool_t pg_first = svwhilelt_b64(0, 1); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG k2 = K & -2; + + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_b = + (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_b != NULL)) { + if (i == 0) { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_PACK_B2(0, 0); + VECTOR_PACK_B2(0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + VECTOR_PACK_B2(2, 0); + VECTOR_PACK_B2(2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + PACK_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + PACK_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + PACK_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + PACK_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_QUADWORD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_QUADWORD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_QUADWORD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < n2; j += 2) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_b) + free(packed_b); + + return 0; +} diff --git a/kernel/arm64/dgemm_small_kernel_nt_sve.c b/kernel/arm64/dgemm_small_kernel_nt_sve.c new file mode 100644 index 000000000..982388287 --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_nt_sve.c @@ -0,0 +1,414 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1); +#else +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size32 = v_size * 32; + const uint64_t v_size3 = v_size * 3; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + const BLASLONG v_m3 = M - (M % v_size3); + const BLASLONG v_m1 = M & -v_size; + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + DECLARE_RESULT_VECTOR(2, 2); + DECLARE_RESULT_VECTOR(2, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 2, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + VECTOR_STORE(pg_true, 2, 2); + VECTOR_STORE(pg_true, 2, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < n2; j += 2) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(2, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 2, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + return 0; +} diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c new file mode 100644 index 000000000..7158851da --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -0,0 +1,709 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K2(n, offset_k) \ + float64x2_t b##k##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B2_K2(n0, n1, offset_k0, offset_k1) \ + float64x2_t b##n0##_k##offset_k0 = \ + vzip1q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float64x2_t b##n0##_k##offset_k1 = \ + vzip2q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); + +#define SCALE_B2_K2(n0, offset_k0, offset_k1) \ + svfloat64_t b##s##n0##_k##offset_k0 = svdup_neonq_f64(b##n0##_k##offset_k0); \ + svfloat64_t b##s##n0##_k##offset_k1 = svdup_neonq_f64(b##n0##_k##offset_k1); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define VECTOR_UNPACK_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B2(n, offset_k) \ + vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1); +#else +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svbool_t pg_first = svwhilelt_b64(0, 1); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const svuint64_t lda_vec = svindex_u64(0LL, lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + const BLASLONG k2 = K & -2; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + VECTOR_PACK_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 1); + VECTOR_PACK_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + + if (LIKELY(packed_a != NULL)) { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} diff --git a/kernel/arm64/dgemm_small_kernel_tt_sve.c b/kernel/arm64/dgemm_small_kernel_tt_sve.c new file mode 100644 index 000000000..12fc0b59e --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_tt_sve.c @@ -0,0 +1,482 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define VECTOR_UNPACK_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B2(n, offset_k) \ + vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define VECTOR_STORE2(m, n) \ + vst1q_f64(&C_ELEMENT(m, n), vmulq_f64(result##m##n, vdupq_n_f64(alpha))); +#define STORE(m, n) C_ELEMENT(m, n) = alpha * result##m##n; +#else +#define VECTOR_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + result##m##n = \ + vfmaq_f64(result##m##n, vld1q_f64(&C_ELEMENT(m, n)), vdupq_n_f64(beta)); \ + vst1q_f64(&C_ELEMENT(m, n), result##m##n); +#define STORE(m, n) \ + C_ELEMENT(m, n) = C_ELEMENT(m, n) * beta + alpha * result##m##n; +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svbool_t pg_first = svwhilelt_b64(0, 1); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const svuint64_t lda_vec = svindex_u64(0LL, lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} diff --git a/kernel/arm64/gemm_small_kernel_permit_sve.c b/kernel/arm64/gemm_small_kernel_permit_sve.c new file mode 100644 index 000000000..9526dbbe2 --- /dev/null +++ b/kernel/arm64/gemm_small_kernel_permit_sve.c @@ -0,0 +1,47 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +int CNAME(int transa, int transb, BLASLONG M, BLASLONG N, BLASLONG K, FLOAT alpha, FLOAT beta) +{ + BLASLONG MNK = M * N * K; + +#if defined(DOUBLE) // dgemm + // TN prefers full copies much earlier + if (transa && !transb && MNK > 16*16*16) { + return 0; + } +#else // sgemm + // TODO! +#endif + + if (MNK <= 64*64*64) + return 1; + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c new file mode 100644 index 000000000..85c7cfa86 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -0,0 +1,1046 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K4(n, offset_k) \ + float32x4_t b##k##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B4_K4( \ + n0, n1, n2, n3, offset_k0, offset_k1, offset_k2, offset_k3) \ + float32x4_t b##t##n0##_k##offset_k0 = \ + vzip1q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k1 = \ + vzip2q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k2 = \ + vzip1q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k3 = \ + vzip2q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##n0##_k##offset_k0 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k1 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k2 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); \ + float32x4_t b##n0##_k##offset_k3 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); + +#define SCALE_B4_K4(n0, offset_k0, offset_k1, offset_k2, offset_k3) \ + svfloat32_t b##s##n0##_k##offset_k0 = svdup_neonq_f32(b##n0##_k##offset_k0); \ + svfloat32_t b##s##n0##_k##offset_k1 = svdup_neonq_f32(b##n0##_k##offset_k1); \ + svfloat32_t b##s##n0##_k##offset_k2 = svdup_neonq_f32(b##n0##_k##offset_k2); \ + svfloat32_t b##s##n0##_k##offset_k3 = svdup_neonq_f32(b##n0##_k##offset_k3); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = vgetq_lane_f32(result##m##n, 3); +#else +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = \ + C_ELEMENT(m, n + 2) * beta + vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = \ + C_ELEMENT(m, n + 3) * beta + vgetq_lane_f32(result##m##n, 3); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define PACK_B(n, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define VECTOR_PACK_B(n, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(n* v_size, offset_k), b##s##n##_k##offset_k); +#define QUADWORD_PACK_B(n, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define UNPACK_VECTOR_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(n * v_size, offset_k)); +#define UNPACK_BROADCAST_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(PACK_ELEMENT_K(n, offset_k)); +#define UNPACK_QUADWORD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG k4 = K & -4; + + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_b = + (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_b != NULL)) { + if (i == 0) { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_PACK_B4(0, 0); + VECTOR_PACK_B4(0, 1); + VECTOR_PACK_B4(0, 2); + VECTOR_PACK_B4(0, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + VECTOR_PACK_B4(4, 0); + VECTOR_PACK_B4(4, 1); + VECTOR_PACK_B4(4, 2); + VECTOR_PACK_B4(4, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + PACK_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + PACK_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + PACK_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + PACK_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + PACK_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + PACK_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + PACK_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + PACK_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } else { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); + } + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + } + } + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_b) + free(packed_b); + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c new file mode 100644 index 000000000..1c3d324d0 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -0,0 +1,647 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = vgetq_lane_f32(result##m##n, 3); +#else +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = \ + C_ELEMENT(m, n + 2) * beta + vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = \ + C_ELEMENT(m, n + 3) * beta + vgetq_lane_f32(result##m##n, 3); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define PACK_B(n, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define VECTOR_PACK_B(n, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(n* v_size, offset_k), b##s##n##_k##offset_k); +#define QUADWORD_PACK_B(n, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define UNPACK_VECTOR_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(n * v_size, offset_k)); +#define UNPACK_BROADCAST_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(PACK_ELEMENT_K(n, offset_k)); +#define UNPACK_QUADWORD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size3 = v_size * 3; + const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + const BLASLONG v_m3 = M - (M % v_size3); + const BLASLONG v_m1 = M & -v_size; + + const int pack_b = M >= v_size3 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_b = + (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + DECLARE_RESULT_VECTOR(2, 2); + DECLARE_RESULT_VECTOR(2, 3); + DECLARE_RESULT_VECTOR(2, 4); + DECLARE_RESULT_VECTOR(2, 5); + DECLARE_RESULT_VECTOR(2, 6); + DECLARE_RESULT_VECTOR(2, 7); + + if (LIKELY(packed_b != NULL)) { + if (i == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + QUADWORD_PACK_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + QUADWORD_PACK_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + VECTOR_STORE(pg_true, 2, 2); + VECTOR_STORE(pg_true, 2, 3); + VECTOR_STORE(pg_true, 2, 4); + VECTOR_STORE(pg_true, 2, 5); + VECTOR_STORE(pg_true, 2, 6); + VECTOR_STORE(pg_true, 2, 7); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + } + } + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + DECLARE_RESULT_VECTOR(2, 2); + DECLARE_RESULT_VECTOR(2, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + VECTOR_STORE(pg_true, 2, 2); + VECTOR_STORE(pg_true, 2, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(2, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 2, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_b) + free(packed_b); + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c new file mode 100644 index 000000000..6fd3b12a6 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -0,0 +1,1247 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K4(n, offset_k) \ + float32x4_t b##k##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B4_K4( \ + n0, n1, n2, n3, offset_k0, offset_k1, offset_k2, offset_k3) \ + float32x4_t b##t##n0##_k##offset_k0 = \ + vzip1q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k1 = \ + vzip2q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k2 = \ + vzip1q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k3 = \ + vzip2q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##n0##_k##offset_k0 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k1 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k2 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); \ + float32x4_t b##n0##_k##offset_k3 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); + +#define SCALE_B4_K4(n0, offset_k0, offset_k1, offset_k2, offset_k3) \ + svfloat32_t b##s##n0##_k##offset_k0 = svdup_neonq_f32(b##n0##_k##offset_k0); \ + svfloat32_t b##s##n0##_k##offset_k1 = svdup_neonq_f32(b##n0##_k##offset_k1); \ + svfloat32_t b##s##n0##_k##offset_k2 = svdup_neonq_f32(b##n0##_k##offset_k2); \ + svfloat32_t b##s##n0##_k##offset_k3 = svdup_neonq_f32(b##n0##_k##offset_k3); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = vgetq_lane_f32(result##m##n, 3); +#else +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = \ + C_ELEMENT(m, n + 2) * beta + vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = \ + C_ELEMENT(m, n + 3) * beta + vgetq_lane_f32(result##m##n, 3); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + + +#define GATHER_LOAD_A64(pg, m, offset_k) \ + svfloat64_t a##t##m##_k##offset_k = \ + svld1_gather_offset(pg, (double *)&A_ELEMENT_K(v64_size * m, offset_k), lda_vec64); + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; + const uint64_t v64_size = v_size / 2; + const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const svuint32_t lda_vec = svindex_u32(0LL, lda); + const svuint64_t lda_vec64 = svmul_m(pg_true, svindex_u64(0,sizeof(FLOAT)), lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + const BLASLONG k4 = K & -4; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + + GATHER_LOAD_A64(pg_true, 0, 0); + GATHER_LOAD_A64(pg_true, 1, 0); + svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + VECTOR_PACK_A(0, 0); + VECTOR_PACK_A(0, 1); + + // GATHER_LOAD_A(pg_true, 0, 0); + // VECTOR_PACK_A(0, 0); + // GATHER_LOAD_A(pg_true, 0, 1); + // VECTOR_PACK_A(0, 1); + + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + + GATHER_LOAD_A64(pg_true, 0, 2); + GATHER_LOAD_A64(pg_true, 1, 2); + svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + VECTOR_PACK_A(0, 2); + VECTOR_PACK_A(0, 3); + + // GATHER_LOAD_A(pg_true, 0, 2); + // VECTOR_PACK_A(0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + // GATHER_LOAD_A(pg_true, 0, 3); + // VECTOR_PACK_A(0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + + GATHER_LOAD_A64(pg_true, 2, 0); + GATHER_LOAD_A64(pg_true, 3, 0); + svfloat32_t as1_k0 = svuzp1(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); + svfloat32_t as1_k1 = svuzp2(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); + VECTOR_PACK_A(1, 0); + VECTOR_PACK_A(1, 1); + + // GATHER_LOAD_A(pg_true, 1, 0); + // VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + // GATHER_LOAD_A(pg_true, 1, 1); + // VECTOR_PACK_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + + // 64-bit load x2 then unzip into 32-bit + GATHER_LOAD_A64(pg_true, 2, 2); + GATHER_LOAD_A64(pg_true, 3, 2); + svfloat32_t as1_k2 = svuzp1(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); + svfloat32_t as1_k3 = svuzp2(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); + VECTOR_PACK_A(1, 2); + VECTOR_PACK_A(1, 3); + + // GATHER_LOAD_A(pg_true, 1, 2); + // VECTOR_PACK_A(1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + // GATHER_LOAD_A(pg_true, 1, 3); + // VECTOR_PACK_A(1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + UNPACK_VECTOR_A(0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + UNPACK_VECTOR_A(0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + UNPACK_VECTOR_A(1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + UNPACK_VECTOR_A(1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + GATHER_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + GATHER_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + UNPACK_VECTOR_A(0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + UNPACK_VECTOR_A(0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UNPACK_VECTOR_A(1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UNPACK_VECTOR_A(1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + GATHER_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + GATHER_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + + GATHER_LOAD_A64(pg_true, 0, 0); + GATHER_LOAD_A64(pg_true, 1, 0); + svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + + // GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + // GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + + GATHER_LOAD_A64(pg_true, 0, 2); + GATHER_LOAD_A64(pg_true, 1, 2); + svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + + // GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + // GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + + GATHER_LOAD_A64(pg_true, 0, 0); + GATHER_LOAD_A64(pg_true, 1, 0); + svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + + // GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + // GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + + GATHER_LOAD_A64(pg_true, 0, 2); + GATHER_LOAD_A64(pg_true, 1, 2); + svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + + // GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + // GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c new file mode 100644 index 000000000..894e7fd46 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -0,0 +1,574 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define VECTOR_STORE4(m, n) \ + vst1q_f32(&C_ELEMENT(m, n), vmulq_f32(result##m##n, vdupq_n_f32(alpha))); +#define STORE(m, n) C_ELEMENT(m, n) = alpha * result##m##n; +#else +#define VECTOR_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + result##m##n = \ + vfmaq_f32(result##m##n, vld1q_f32(&C_ELEMENT(m, n)), vdupq_n_f32(beta)); \ + vst1q_f32(&C_ELEMENT(m, n), result##m##n); +#define STORE(m, n) \ + C_ELEMENT(m, n) = C_ELEMENT(m, n) * beta + alpha * result##m##n; +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); + +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, offset_m, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(offset_m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(offset_m, m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(offset_m, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(offset_m, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, offset_m, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, offset_m, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(offset_m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b32(); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const svuint32_t lda_vec = svindex_u32(0LL, lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + VECTOR_PACK_A(0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + VECTOR_PACK_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UNPACK_VECTOR_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + VECTOR_STORE(pg_true, 0, 0, 4); + VECTOR_STORE(pg_true, 0, 0, 5); + VECTOR_STORE(pg_true, 0, 0, 6); + VECTOR_STORE(pg_true, 0, 0, 7); + VECTOR_STORE(pg_true, v_size, 1, 0); + VECTOR_STORE(pg_true, v_size, 1, 1); + VECTOR_STORE(pg_true, v_size, 1, 2); + VECTOR_STORE(pg_true, v_size, 1, 3); + VECTOR_STORE(pg_true, v_size, 1, 4); + VECTOR_STORE(pg_true, v_size, 1, 5); + VECTOR_STORE(pg_true, v_size, 1, 6); + VECTOR_STORE(pg_true, v_size, 1, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + VECTOR_STORE(pg_true, v_size, 1, 0); + VECTOR_STORE(pg_true, v_size, 1, 1); + VECTOR_STORE(pg_true, v_size, 1, 2); + VECTOR_STORE(pg_true, v_size, 1, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, v_size, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + VECTOR_STORE(pg_true, 0, 0, 4); + VECTOR_STORE(pg_true, 0, 0, 5); + VECTOR_STORE(pg_true, 0, 0, 6); + VECTOR_STORE(pg_true, 0, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0, 0); + VECTOR_STORE(pg_tail, 0, 0, 1); + VECTOR_STORE(pg_tail, 0, 0, 2); + VECTOR_STORE(pg_tail, 0, 0, 3); + VECTOR_STORE(pg_tail, 0, 0, 4); + VECTOR_STORE(pg_tail, 0, 0, 5); + VECTOR_STORE(pg_tail, 0, 0, 6); + VECTOR_STORE(pg_tail, 0, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0, 0); + VECTOR_STORE(pg_tail, 0, 0, 1); + VECTOR_STORE(pg_tail, 0, 0, 2); + VECTOR_STORE(pg_tail, 0, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} From 8c472ef7e30d019776dde92b96b02f4c998f7487 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Mon, 24 Jun 2024 10:47:47 +0100 Subject: [PATCH 2/7] Further tweak small GEMM for AArch64 --- kernel/arm64/dgemm_small_kernel_nn_sve.c | 134 ++- kernel/arm64/dgemm_small_kernel_nt_sve.c | 169 ++-- kernel/arm64/dgemm_small_kernel_tn_sve.c | 137 ++- kernel/arm64/dgemm_small_kernel_tt_sve.c | 137 ++- kernel/arm64/gemm_small_kernel_permit_sve.c | 14 +- kernel/arm64/sgemm_small_kernel_nn_sve.c | 673 ++----------- kernel/arm64/sgemm_small_kernel_nt_sve.c | 406 ++------ kernel/arm64/sgemm_small_kernel_tn_sve.c | 998 ++------------------ kernel/arm64/sgemm_small_kernel_tt_sve.c | 390 ++------ 9 files changed, 772 insertions(+), 2286 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_nn_sve.c b/kernel/arm64/dgemm_small_kernel_nn_sve.c index 8baef8277..417633471 100644 --- a/kernel/arm64/dgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nn_sve.c @@ -46,13 +46,27 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) + +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] #define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) @@ -112,8 +126,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -140,26 +153,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -169,13 +179,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -223,12 +226,29 @@ CNAME(BLASLONG M, FLOAT* packed_b = (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -372,9 +392,16 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -431,9 +458,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -491,13 +524,30 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } + + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < n2; j += 2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -538,9 +588,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -568,9 +623,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -599,13 +658,26 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); } + + UPDATE_B_POINTER(2); + RESET_A_POINTER(); + UPDATE_C_POINTER(2); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); @@ -620,9 +692,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -633,9 +709,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -647,11 +726,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } if (pack_b) free(packed_b); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/dgemm_small_kernel_nt_sve.c b/kernel/arm64/dgemm_small_kernel_nt_sve.c index 982388287..241d96a6c 100644 --- a/kernel/arm64/dgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nt_sve.c @@ -46,13 +46,27 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) + +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] #define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) @@ -97,8 +111,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -111,26 +124,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -140,13 +150,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -176,8 +179,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntd(); - const uint64_t v_size32 = v_size * 32; - const uint64_t v_size3 = v_size * 3; + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b64(); const svbool_t pg_quad = svwhilelt_b64(0, 2); const svfloat64_t alpha_vec = svdup_f64(alpha); @@ -186,14 +188,31 @@ CNAME(BLASLONG M, #endif const BLASLONG n4 = N & -4; const BLASLONG n2 = N & -2; - const BLASLONG v_m3 = M - (M % v_size3); + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -204,10 +223,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(1, 1); DECLARE_RESULT_VECTOR(1, 2); DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); - DECLARE_RESULT_VECTOR(2, 2); - DECLARE_RESULT_VECTOR(2, 3); for (; k < K; k++) { @@ -223,11 +238,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 2, 1, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); @@ -237,13 +247,16 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); - VECTOR_STORE(pg_true, 2, 2); - VECTOR_STORE(pg_true, 2, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -264,9 +277,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -288,20 +307,35 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } + + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < n2; j += 2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(1, 0); DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); for (; k < K; k++) { @@ -312,19 +346,19 @@ CNAME(BLASLONG M, VECTOR_LOAD_A(pg_true, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -338,9 +372,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -355,17 +393,29 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); } + + UPDATE_B_POINTER(2); + RESET_A_POINTER(); + UPDATE_C_POINTER(2); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(2, 0); for (; k < K; k++) { @@ -374,15 +424,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); VECTOR_LOAD_A(pg_true, 1, 0); UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 2, 0); + INCR_C_POINTER(0, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -393,9 +444,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -407,8 +461,13 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index 7158851da..b8783c1d5 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -112,14 +127,13 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); #define VECTOR_PACK_A(m, offset_k) \ @@ -143,26 +157,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -172,13 +183,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -226,14 +230,29 @@ CNAME(BLASLONG M, const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -408,9 +427,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -484,9 +509,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); + INCR_C_POINTER(0, 2); + INCR_C_POINTER(1, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); @@ -512,13 +542,28 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); } for (; i < v_m1; i += v_size) { + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -562,9 +607,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -592,9 +642,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -605,14 +659,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -656,9 +723,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -686,9 +758,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -699,11 +775,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/dgemm_small_kernel_tt_sve.c b/kernel/arm64/dgemm_small_kernel_tt_sve.c index 12fc0b59e..aa5bf2751 100644 --- a/kernel/arm64/dgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tt_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -103,14 +118,13 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); #define VECTOR_PACK_A(m, offset_k) \ @@ -134,26 +148,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -163,13 +174,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -216,14 +220,29 @@ CNAME(BLASLONG M, const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -295,9 +314,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -331,9 +356,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); + INCR_C_POINTER(0, 2); + INCR_C_POINTER(1, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); @@ -359,13 +389,28 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); } for (; i < v_m1; i += v_size) { + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -386,9 +431,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -402,9 +452,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -415,14 +469,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -443,9 +510,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -459,9 +531,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -472,11 +548,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/gemm_small_kernel_permit_sve.c b/kernel/arm64/gemm_small_kernel_permit_sve.c index 9526dbbe2..c1275129d 100644 --- a/kernel/arm64/gemm_small_kernel_permit_sve.c +++ b/kernel/arm64/gemm_small_kernel_permit_sve.c @@ -32,16 +32,14 @@ int CNAME(int transa, int transb, BLASLONG M, BLASLONG N, BLASLONG K, FLOAT alph BLASLONG MNK = M * N * K; #if defined(DOUBLE) // dgemm - // TN prefers full copies much earlier - if (transa && !transb && MNK > 16*16*16) { - return 0; - } -#else // sgemm - // TODO! -#endif - if (MNK <= 64*64*64) return 1; +#else // sgemm + if (MNK <= 256*256*256) + return 1; +#endif + + return 0; } diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c index 85c7cfa86..2e65e61ff 100644 --- a/kernel/arm64/sgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -46,15 +46,29 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) -#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) // ASIMD @@ -141,8 +155,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -169,26 +182,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -198,13 +208,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -234,7 +237,6 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -242,39 +244,41 @@ CNAME(BLASLONG M, #ifndef B0 const svfloat32_t beta_vec = svdup_f32(beta); #endif - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG k4 = K & -4; - const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = - (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -310,68 +314,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - VECTOR_PACK_B4(4, 0); - VECTOR_PACK_B4(4, 1); - VECTOR_PACK_B4(4, 2); - VECTOR_PACK_B4(4, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - VECTOR_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - VECTOR_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); } for (; k < K; k++) { @@ -382,33 +324,12 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(1, 0); PACK_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); PACK_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); PACK_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - PACK_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - PACK_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - PACK_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - PACK_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } else { for (; k < K; k++) { @@ -419,20 +340,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } } else { @@ -464,190 +371,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - VECTOR_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - VECTOR_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); - } - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 1, 4); - VECTOR_STORE(pg_true, 1, 5); - VECTOR_STORE(pg_true, 1, 6); - VECTOR_STORE(pg_true, 1, 7); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - if (LIKELY(packed_b != NULL)) { - for (; k < K; k++) { - - UNPACK_QUADWORD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); } for (; k < K; k++) { @@ -660,37 +383,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); } } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); if (LIKELY(packed_b != NULL)) { for (; k < K; k++) { @@ -701,11 +414,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); } } else { for (; k < k4; k += 4) { @@ -736,28 +444,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); } for (; k < K; k++) { @@ -770,248 +456,33 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); } } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); - VECTOR_STORE(pg_tail, 0, 4); - VECTOR_STORE(pg_tail, 0, 5); - VECTOR_STORE(pg_tail, 0, 6); - VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } - } - for (; j < n4; j += 4) { - BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - } - for (; i < M; i += v_size) { - const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_tail, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_tail, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); - } - VECTOR_STORE(pg_tail, 0, 0); - VECTOR_STORE(pg_tail, 0, 1); - VECTOR_STORE(pg_tail, 0, 2); - VECTOR_STORE(pg_tail, 0, 3); - } + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 1, 0); - } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -1022,9 +493,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -1036,11 +510,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } if (pack_b) free(packed_b); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c index 1c3d324d0..9f99c2422 100644 --- a/kernel/arm64/sgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -46,15 +46,29 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) -#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) // ASIMD @@ -113,8 +127,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -141,26 +154,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -170,13 +180,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -206,7 +209,6 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size3 = v_size * 3; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -214,46 +216,40 @@ CNAME(BLASLONG M, #ifndef B0 const svfloat32_t beta_vec = svdup_f32(beta); #endif - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const BLASLONG v_m3 = M - (M % v_size3); const BLASLONG v_m1 = M & -v_size; - const int pack_b = M >= v_size3 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = - (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); - DECLARE_RESULT_VECTOR(2, 2); - DECLARE_RESULT_VECTOR(2, 3); - DECLARE_RESULT_VECTOR(2, 4); - DECLARE_RESULT_VECTOR(2, 5); - DECLARE_RESULT_VECTOR(2, 6); - DECLARE_RESULT_VECTOR(2, 7); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -266,30 +262,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - QUADWORD_PACK_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); } } else { for (; k < K; k++) { @@ -300,29 +272,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); } } } else { @@ -334,120 +283,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); } } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 1, 4); - VECTOR_STORE(pg_true, 1, 5); - VECTOR_STORE(pg_true, 1, 6); - VECTOR_STORE(pg_true, 1, 7); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); - VECTOR_STORE(pg_true, 2, 2); - VECTOR_STORE(pg_true, 2, 3); - VECTOR_STORE(pg_true, 2, 4); - VECTOR_STORE(pg_true, 2, 5); - VECTOR_STORE(pg_true, 2, 6); - VECTOR_STORE(pg_true, 2, 7); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - if (LIKELY(packed_b != NULL)) { - for (; k < K; k++) { - - UNPACK_QUADWORD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); if (LIKELY(packed_b != NULL)) { for (; k < K; k++) { @@ -458,11 +314,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); } } else { for (; k < K; k++) { @@ -473,146 +324,33 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); } } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); - VECTOR_STORE(pg_tail, 0, 4); - VECTOR_STORE(pg_tail, 0, 5); - VECTOR_STORE(pg_tail, 0, 6); - VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } - } - for (; j < n4; j += 4) { - BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); - DECLARE_RESULT_VECTOR(2, 2); - DECLARE_RESULT_VECTOR(2, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); - VECTOR_STORE(pg_true, 2, 2); - VECTOR_STORE(pg_true, 2, 3); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - } - for (; i < M; i += v_size) { - const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - VECTOR_STORE(pg_tail, 0, 0); - VECTOR_STORE(pg_tail, 0, 1); - VECTOR_STORE(pg_tail, 0, 2); - VECTOR_STORE(pg_tail, 0, 3); - } + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(2, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 2, 0); - } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -623,9 +361,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -637,11 +378,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } if (pack_b) free(packed_b); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 6fd3b12a6..9cbb60d40 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -65,36 +80,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); #define LOAD_A1(m, offset_k) \ float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); -#define VECTOR_LOAD_B_K4(n, offset_k) \ - float32x4_t b##k##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); -#define TRANSPOSE_B4_K4( \ - n0, n1, n2, n3, offset_k0, offset_k1, offset_k2, offset_k3) \ - float32x4_t b##t##n0##_k##offset_k0 = \ - vzip1q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ - float32x4_t b##t##n0##_k##offset_k1 = \ - vzip2q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ - float32x4_t b##t##n0##_k##offset_k2 = \ - vzip1q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ - float32x4_t b##t##n0##_k##offset_k3 = \ - vzip2q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ - float32x4_t b##n0##_k##offset_k0 = vreinterpretq_f32_f64( \ - vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ - float32x4_t b##n0##_k##offset_k1 = vreinterpretq_f32_f64( \ - vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ - float32x4_t b##n0##_k##offset_k2 = vreinterpretq_f32_f64( \ - vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); \ - float32x4_t b##n0##_k##offset_k3 = vreinterpretq_f32_f64( \ - vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); - -#define SCALE_B4_K4(n0, offset_k0, offset_k1, offset_k2, offset_k3) \ - svfloat32_t b##s##n0##_k##offset_k0 = svdup_neonq_f32(b##n0##_k##offset_k0); \ - svfloat32_t b##s##n0##_k##offset_k1 = svdup_neonq_f32(b##n0##_k##offset_k1); \ - svfloat32_t b##s##n0##_k##offset_k2 = svdup_neonq_f32(b##n0##_k##offset_k2); \ - svfloat32_t b##s##n0##_k##offset_k3 = svdup_neonq_f32(b##n0##_k##offset_k3); #define GATHER_LOAD_B4(n, offset_k) \ float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ b##n##_k##offset_k = \ @@ -105,8 +90,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); #define VECTOR_UNPACK_B4(n, offset_k) \ float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); -#define VECTOR_PACK_B4(n, offset_k) \ - vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); #define PACK_B0(n, offset_k) \ PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); #define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ @@ -141,14 +124,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); -#define QUADWORD_LOAD_B(n, offset_k) \ - svfloat32_t b##s##n##_k##offset_k = \ - svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); #define VECTOR_PACK_A(m, offset_k) \ @@ -172,26 +151,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -201,18 +177,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif - - -#define GATHER_LOAD_A64(pg, m, offset_k) \ - svfloat64_t a##t##m##_k##offset_k = \ - svld1_gather_offset(pg, (double *)&A_ELEMENT_K(v64_size * m, offset_k), lda_vec64); #ifdef B0 int @@ -242,8 +206,6 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size2 = v_size * 2; - const uint64_t v64_size = v_size / 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -252,173 +214,41 @@ CNAME(BLASLONG M, const svfloat32_t beta_vec = svdup_f32(beta); #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); - const svuint64_t lda_vec64 = svmul_m(pg_true, svindex_u64(0,sizeof(FLOAT)), lda); - const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const BLASLONG k4 = K & -4; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { + for (; i < v_m1; i += v_size) { + + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_a != NULL)) { if (j == 0) { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - - GATHER_LOAD_A64(pg_true, 0, 0); - GATHER_LOAD_A64(pg_true, 1, 0); - svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - VECTOR_PACK_A(0, 0); - VECTOR_PACK_A(0, 1); - - // GATHER_LOAD_A(pg_true, 0, 0); - // VECTOR_PACK_A(0, 0); - // GATHER_LOAD_A(pg_true, 0, 1); - // VECTOR_PACK_A(0, 1); - - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - - GATHER_LOAD_A64(pg_true, 0, 2); - GATHER_LOAD_A64(pg_true, 1, 2); - svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - VECTOR_PACK_A(0, 2); - VECTOR_PACK_A(0, 3); - - // GATHER_LOAD_A(pg_true, 0, 2); - // VECTOR_PACK_A(0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - // GATHER_LOAD_A(pg_true, 0, 3); - // VECTOR_PACK_A(0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - - GATHER_LOAD_A64(pg_true, 2, 0); - GATHER_LOAD_A64(pg_true, 3, 0); - svfloat32_t as1_k0 = svuzp1(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); - svfloat32_t as1_k1 = svuzp2(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); - VECTOR_PACK_A(1, 0); - VECTOR_PACK_A(1, 1); - - // GATHER_LOAD_A(pg_true, 1, 0); - // VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - // GATHER_LOAD_A(pg_true, 1, 1); - // VECTOR_PACK_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - - // 64-bit load x2 then unzip into 32-bit - GATHER_LOAD_A64(pg_true, 2, 2); - GATHER_LOAD_A64(pg_true, 3, 2); - svfloat32_t as1_k2 = svuzp1(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); - svfloat32_t as1_k3 = svuzp2(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); - VECTOR_PACK_A(1, 2); - VECTOR_PACK_A(1, 3); - - // GATHER_LOAD_A(pg_true, 1, 2); - // VECTOR_PACK_A(1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - // GATHER_LOAD_A(pg_true, 1, 3); - // VECTOR_PACK_A(1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -427,117 +257,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); BROADCAST_LOAD_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 0); - VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - UNPACK_VECTOR_A(0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - UNPACK_VECTOR_A(0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - UNPACK_VECTOR_A(1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - UNPACK_VECTOR_A(1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -545,117 +270,13 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); BROADCAST_LOAD_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - GATHER_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - GATHER_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -663,207 +284,25 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); BROADCAST_LOAD_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 1, 4); - VECTOR_STORE(pg_true, 1, 5); - VECTOR_STORE(pg_true, 1, 6); - VECTOR_STORE(pg_true, 1, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - - if (LIKELY(packed_a != NULL)) { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - UNPACK_VECTOR_A(0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - UNPACK_VECTOR_A(0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UNPACK_VECTOR_A(1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UNPACK_VECTOR_A(1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - } - } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - GATHER_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - GATHER_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - } - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); if (LIKELY(packed_a != NULL)) { for (; k < K; k++) { @@ -871,8 +310,6 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } else { for (; k < K; k++) { @@ -880,334 +317,36 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); } - } - for (; i < v_m1; i += v_size) { - BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - - GATHER_LOAD_A64(pg_true, 0, 0); - GATHER_LOAD_A64(pg_true, 1, 0); - svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - - // GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - // GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - - GATHER_LOAD_A64(pg_true, 0, 2); - GATHER_LOAD_A64(pg_true, 1, 2); - svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - - // GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - // GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - - GATHER_LOAD_A64(pg_true, 0, 0); - GATHER_LOAD_A64(pg_true, 1, 0); - svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - - // GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - // GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - - GATHER_LOAD_A64(pg_true, 0, 2); - GATHER_LOAD_A64(pg_true, 1, 2); - svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - - // GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - // GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - } - for (; j < N; j++) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0); - } + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_tail, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_tail, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); - } - VECTOR_STORE(pg_tail, 0, 0); - VECTOR_STORE(pg_tail, 0, 1); - VECTOR_STORE(pg_tail, 0, 2); - VECTOR_STORE(pg_tail, 0, 3); - VECTOR_STORE(pg_tail, 0, 4); - VECTOR_STORE(pg_tail, 0, 5); - VECTOR_STORE(pg_tail, 0, 6); - VECTOR_STORE(pg_tail, 0, 7); - } for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_tail, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_tail, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -1224,9 +363,13 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -1237,11 +380,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index 894e7fd46..dd9840c37 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -106,22 +121,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); - +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); -#define GATHER_LOAD_A(pg, offset_m, m, offset_k) \ +#define GATHER_LOAD_A(pg, m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(offset_m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); -#define VECTOR_PACK_A(offset_m, m, offset_k) \ +#define VECTOR_PACK_A(m, offset_k) \ svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); #define QUADWORD_PACK_A(m, offset_k) \ svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); -#define UNPACK_VECTOR_A(offset_m, m, offset_k) \ +#define UNPACK_VECTOR_A(m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg_true, &PACK_ELEMENT_K(offset_m, offset_k)); + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); #define UNPACK_BROADCAST_A(m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = svdup_f32(PACK_ELEMENT_K(m, offset_k)); #define UNPACK_QUADWORD_A(m, offset_k) \ @@ -134,28 +150,25 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. result##m##n = svmla_lane( \ result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); #ifdef B0 -#define VECTOR_STORE(pg, offset_m, m, n) \ +#define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else -#define VECTOR_STORE(pg, offset_m, m, n) \ +#define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(offset_m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -165,13 +178,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -201,337 +207,132 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); const svfloat32_t alpha_vec = svdup_f32(alpha); #ifndef B0 const svfloat32_t beta_vec = svdup_f32(beta); #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); - const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { + for (; i < v_m1; i += v_size) { + + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_a != NULL)) { if (j == 0) { for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - VECTOR_PACK_A(0, 0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - VECTOR_PACK_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } else { for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0, 0); + UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UNPACK_VECTOR_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } } else { for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - VECTOR_STORE(pg_true, 0, 0, 4); - VECTOR_STORE(pg_true, 0, 0, 5); - VECTOR_STORE(pg_true, 0, 0, 6); - VECTOR_STORE(pg_true, 0, 0, 7); - VECTOR_STORE(pg_true, v_size, 1, 0); - VECTOR_STORE(pg_true, v_size, 1, 1); - VECTOR_STORE(pg_true, v_size, 1, 2); - VECTOR_STORE(pg_true, v_size, 1, 3); - VECTOR_STORE(pg_true, v_size, 1, 4); - VECTOR_STORE(pg_true, v_size, 1, 5); - VECTOR_STORE(pg_true, v_size, 1, 6); - VECTOR_STORE(pg_true, v_size, 1, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - - if (LIKELY(packed_a != NULL)) { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } - } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - VECTOR_STORE(pg_true, v_size, 1, 0); - VECTOR_STORE(pg_true, v_size, 1, 1); - VECTOR_STORE(pg_true, v_size, 1, 2); - VECTOR_STORE(pg_true, v_size, 1, 3); + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); if (LIKELY(packed_a != NULL)) { for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0, 0); + UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - UNPACK_VECTOR_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } else { for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, v_size, 1, 0); + VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, 1); } - } - for (; i < v_m1; i += v_size) { - BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - VECTOR_STORE(pg_true, 0, 0, 4); - VECTOR_STORE(pg_true, 0, 0, 5); - VECTOR_STORE(pg_true, 0, 0, 6); - VECTOR_STORE(pg_true, 0, 0, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - } - for (; j < N; j++) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0, 0); - } + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - VECTOR_STORE(pg_tail, 0, 0, 0); - VECTOR_STORE(pg_tail, 0, 0, 1); - VECTOR_STORE(pg_tail, 0, 0, 2); - VECTOR_STORE(pg_tail, 0, 0, 3); - VECTOR_STORE(pg_tail, 0, 0, 4); - VECTOR_STORE(pg_tail, 0, 0, 5); - VECTOR_STORE(pg_tail, 0, 0, 6); - VECTOR_STORE(pg_tail, 0, 0, 7); - } for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -541,34 +342,43 @@ CNAME(BLASLONG M, for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); } - VECTOR_STORE(pg_tail, 0, 0, 0); - VECTOR_STORE(pg_tail, 0, 0, 1); - VECTOR_STORE(pg_tail, 0, 0, 2); - VECTOR_STORE(pg_tail, 0, 0, 3); + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } - VECTOR_STORE(pg_tail, 0, 0, 0); + VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file From b1c9fafabb028c73f57d90be991356ea0760a56a Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Thu, 18 Jul 2024 17:37:18 +0100 Subject: [PATCH 3/7] Remove k2 loop from DGEMM TN and use a more conservative heuristic for SGEMM --- kernel/arm64/dgemm_small_kernel_tn_sve.c | 209 +------------------- kernel/arm64/gemm_small_kernel_permit_sve.c | 4 +- 2 files changed, 2 insertions(+), 211 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index b8783c1d5..6d3f4dd28 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -265,43 +265,7 @@ CNAME(BLASLONG M, if (LIKELY(packed_a != NULL)) { if (j == 0) { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - VECTOR_PACK_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - GATHER_LOAD_A(pg_true, 1, 0); - VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - GATHER_LOAD_A(pg_true, 1, 1); - VECTOR_PACK_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); VECTOR_PACK_A(0, 0); @@ -320,39 +284,7 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); } } else { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -370,37 +302,6 @@ CNAME(BLASLONG M, } } } else { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -443,27 +344,7 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(1, 1); if (LIKELY(packed_a != NULL)) { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -474,27 +355,7 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); } } else { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -570,27 +431,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -619,19 +459,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -686,27 +513,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -735,19 +541,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -787,4 +580,4 @@ CNAME(BLASLONG M, free(packed_a); return 0; -} \ No newline at end of file +} diff --git a/kernel/arm64/gemm_small_kernel_permit_sve.c b/kernel/arm64/gemm_small_kernel_permit_sve.c index c1275129d..3d425624a 100644 --- a/kernel/arm64/gemm_small_kernel_permit_sve.c +++ b/kernel/arm64/gemm_small_kernel_permit_sve.c @@ -35,11 +35,9 @@ int CNAME(int transa, int transb, BLASLONG M, BLASLONG N, BLASLONG K, FLOAT alph if (MNK <= 64*64*64) return 1; #else // sgemm - if (MNK <= 256*256*256) + if (MNK <= 64*64*64) return 1; #endif - - return 0; } From 9984c5ce9dae74305b5bbfed0b1fa5602849ac35 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Thu, 18 Jul 2024 17:34:43 +0000 Subject: [PATCH 4/7] Clean up k2 removal more and unroll SGEMM more --- kernel/arm64/dgemm_small_kernel_tn_sve.c | 23 +-- kernel/arm64/sgemm_small_kernel_nn_sve.c | 167 +++++++++++++++++- kernel/arm64/sgemm_small_kernel_nt_sve.c | 95 +++++++++- kernel/arm64/sgemm_small_kernel_tn_sve.c | 210 +++++++++++++++++------ kernel/arm64/sgemm_small_kernel_tt_sve.c | 189 ++++++++++++++------ 5 files changed, 555 insertions(+), 129 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index 6d3f4dd28..1b0fada2a 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -80,25 +80,12 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); #define LOAD_A1(m, offset_k) \ float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); -#define VECTOR_LOAD_B_K2(n, offset_k) \ - float64x2_t b##k##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); -#define TRANSPOSE_B2_K2(n0, n1, offset_k0, offset_k1) \ - float64x2_t b##n0##_k##offset_k0 = \ - vzip1q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ - float64x2_t b##n0##_k##offset_k1 = \ - vzip2q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); - -#define SCALE_B2_K2(n0, offset_k0, offset_k1) \ - svfloat64_t b##s##n0##_k##offset_k0 = svdup_neonq_f64(b##n0##_k##offset_k0); \ - svfloat64_t b##s##n0##_k##offset_k1 = svdup_neonq_f64(b##n0##_k##offset_k1); #define GATHER_LOAD_B2(n, offset_k) \ float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ b##n##_k##offset_k = \ vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); #define VECTOR_UNPACK_B2(n, offset_k) \ float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); -#define VECTOR_PACK_B2(n, offset_k) \ - vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); #define PACK_B0(n, offset_k) \ PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); #define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ @@ -128,9 +115,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); -#define QUADWORD_LOAD_B(n, offset_k) \ - svfloat64_t b##s##n##_k##offset_k = \ - svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = \ svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); @@ -226,7 +210,6 @@ CNAME(BLASLONG M, const BLASLONG v_m1 = M & -v_size; const BLASLONG n4 = N & -4; const BLASLONG n2 = N & -2; - const BLASLONG k2 = K & -2; const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = @@ -266,6 +249,7 @@ CNAME(BLASLONG M, if (LIKELY(packed_a != NULL)) { if (j == 0) { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); VECTOR_PACK_A(0, 0); @@ -285,6 +269,7 @@ CNAME(BLASLONG M, } } else { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -345,6 +330,7 @@ CNAME(BLASLONG M, if (LIKELY(packed_a != NULL)) { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -356,6 +342,7 @@ CNAME(BLASLONG M, } } else { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -580,4 +567,4 @@ CNAME(BLASLONG M, free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c index 2e65e61ff..0af073a14 100644 --- a/kernel/arm64/sgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -237,6 +237,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -245,10 +246,11 @@ CNAME(BLASLONG M, const svfloat32_t beta_vec = svdup_f32(beta); #endif const BLASLONG n4 = N & -4; + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG k4 = K & -4; - const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; @@ -269,16 +271,21 @@ CNAME(BLASLONG M, CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m1; i += v_size) { + for (; i < v_m2; i += v_size2) { CREATE_A_POINTER(0, 0); - UPDATE_A_POINTER(v_size); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -314,6 +321,26 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); } for (; k < K; k++) { @@ -324,12 +351,17 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(1, 0); PACK_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); PACK_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); PACK_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); } } else { for (; k < K; k++) { @@ -340,11 +372,118 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } } else { for (; k < k4; k += 4) { + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); + } + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + } else { + for (; k < k4; k += 4) { + VECTOR_LOAD_B_K4(0, 0); VECTOR_LOAD_B_K4(1, 0); VECTOR_LOAD_B_K4(2, 0); @@ -478,6 +617,28 @@ CNAME(BLASLONG M, CREATE_B_POINTER(0, 0); BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, v_size2); + } for (; i < v_m1; i += v_size) { CREATE_A_POINTER(0, 0); diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c index 9f99c2422..ed7ee6bd6 100644 --- a/kernel/arm64/sgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -209,6 +209,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -217,9 +218,10 @@ CNAME(BLASLONG M, const svfloat32_t beta_vec = svdup_f32(beta); #endif const BLASLONG n4 = N & -4; + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; - const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; @@ -240,16 +242,21 @@ CNAME(BLASLONG M, CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m1; i += v_size) { + for (; i < v_m2; i += v_size2) { CREATE_A_POINTER(0, 0); - UPDATE_A_POINTER(v_size); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -262,6 +269,11 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } else { for (; k < K; k++) { @@ -272,11 +284,66 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } } else { for (; k < K; k++) { + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); + } + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + } else { + for (; k < K; k++) { + QUADWORD_LOAD_B(0, 0); VECTOR_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); @@ -346,6 +413,28 @@ CNAME(BLASLONG M, CREATE_B_POINTER(0, 0); BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, v_size2); + } for (; i < v_m1; i += v_size) { CREATE_A_POINTER(0, 0); diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 9cbb60d40..54608a47b 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -69,7 +69,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #undef C_ELEMENT // #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -206,6 +206,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -215,18 +216,153 @@ CNAME(BLASLONG M, #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; FLOAT* a_offset = A; FLOAT* b_offset = B; FLOAT* c_offset = C; BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); + } + for (; j < N; j++) { + + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); + } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); + } for (; i < v_m1; i += v_size) { CREATE_C_POINTER(0, 0); @@ -247,48 +383,17 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - if (LIKELY(packed_a != NULL)) { - if (j == 0) { - for (; k < K; k++) { + for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); @@ -304,20 +409,11 @@ CNAME(BLASLONG M, BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - if (LIKELY(packed_a != NULL)) { - for (; k < K; k++) { + for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); INCR_C_POINTER(0, 1); diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index dd9840c37..50dbd7399 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -69,7 +69,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #undef C_ELEMENT // #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -207,6 +207,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -216,18 +217,144 @@ CNAME(BLASLONG M, #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; FLOAT* a_offset = A; FLOAT* b_offset = B; FLOAT* c_offset = C; BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); + } + for (; j < N; j++) { + + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); + } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); + } for (; i < v_m1; i += v_size) { CREATE_C_POINTER(0, 0); @@ -248,39 +375,14 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - if (LIKELY(packed_a != NULL)) { - if (j == 0) { - for (; k < K; k++) { + for (; k < K; k++) { - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); @@ -296,20 +398,11 @@ CNAME(BLASLONG M, BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - if (LIKELY(packed_a != NULL)) { - for (; k < K; k++) { + for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); INCR_C_POINTER(0, 1); From a9edddb6953cf70020f81e0d97f37b892325b459 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Thu, 18 Jul 2024 19:03:34 +0000 Subject: [PATCH 5/7] Unroll TN further --- kernel/arm64/sgemm_small_kernel_tn_sve.c | 231 ++++++++++++++++++++++- 1 file changed, 229 insertions(+), 2 deletions(-) diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 54608a47b..03406daa6 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -218,6 +218,7 @@ CNAME(BLASLONG M, const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; @@ -237,23 +238,35 @@ CNAME(BLASLONG M, CREATE_A_POINTER(1, v_size); BLASLONG j = 0; - for (; j < n4; j += 4) { + for (; j < n8; j += 8) { CREATE_B_POINTER(0, 0); CREATE_B_POINTER(1, 1); CREATE_B_POINTER(2, 2); CREATE_B_POINTER(3, 3); - UPDATE_B_POINTER(4); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); DECLARE_RESULT_VECTOR(1, 0); DECLARE_RESULT_VECTOR(1, 1); DECLARE_RESULT_VECTOR(1, 2); DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_a != NULL)) { if (j == 0) { @@ -275,6 +288,18 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } else { for (; k < K; k++) { @@ -293,11 +318,109 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } } else { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + INCR_C_POINTER(0, 8); + INCR_C_POINTER(1, 8); + } + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -369,6 +492,58 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); @@ -429,6 +604,58 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); From 7311d9301650adc0672c2990e1997702630b9d47 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Fri, 19 Jul 2024 16:50:50 +0000 Subject: [PATCH 6/7] Unroll TT further --- kernel/arm64/sgemm_small_kernel_tt_sve.c | 262 ++++++++++++++++++++--- 1 file changed, 231 insertions(+), 31 deletions(-) diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index 50dbd7399..c66330fb5 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -219,6 +219,7 @@ CNAME(BLASLONG M, const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; @@ -238,6 +239,132 @@ CNAME(BLASLONG M, CREATE_A_POINTER(1, v_size); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + INCR_C_POINTER(0, 8); + INCR_C_POINTER(1, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); @@ -257,38 +384,19 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(1, 3); if (LIKELY(packed_a != NULL)) { - if (j == 0) { - for (; k < K; k++) { + for (; k < K; k++) { - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 1, 0); - VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } else { for (; k < K; k++) { @@ -361,6 +469,52 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); @@ -418,6 +572,52 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); From ea4ab3b31081a4344905929a9c911b0a53eeca45 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Sat, 20 Jul 2024 13:39:22 +0000 Subject: [PATCH 7/7] Better header guard around bridge --- kernel/arm64/dgemm_small_kernel_nn_sve.c | 3 ++- kernel/arm64/dgemm_small_kernel_nt_sve.c | 3 ++- kernel/arm64/dgemm_small_kernel_tn_sve.c | 3 ++- kernel/arm64/dgemm_small_kernel_tt_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_nn_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_nt_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_tn_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_tt_sve.c | 3 ++- 8 files changed, 16 insertions(+), 8 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_nn_sve.c b/kernel/arm64/dgemm_small_kernel_nn_sve.c index 417633471..fa39103d0 100644 --- a/kernel/arm64/dgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/dgemm_small_kernel_nt_sve.c b/kernel/arm64/dgemm_small_kernel_nt_sve.c index 241d96a6c..0b306e754 100644 --- a/kernel/arm64/dgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index 1b0fada2a..daca8e1be 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/dgemm_small_kernel_tt_sve.c b/kernel/arm64/dgemm_small_kernel_tt_sve.c index aa5bf2751..efe11a9f9 100644 --- a/kernel/arm64/dgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c index 0af073a14..8ea9cf5a7 100644 --- a/kernel/arm64/sgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c index ed7ee6bd6..ac7e067cd 100644 --- a/kernel/arm64/sgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 03406daa6..114640950 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index c66330fb5..731c9861b 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \