From 7a6fa699f2aad57a5b4772fc97b2fc60abc8526e Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Tue, 20 Feb 2024 10:50:43 +0000 Subject: [PATCH 01/52] Small GEMM for AArch64 This is a fairly conservative addition of small matrix kernels using SVE. --- Makefile.system | 2 + kernel/arm64/KERNEL.ARMV8SVE | 20 + kernel/arm64/dgemm_small_kernel_nn_sve.c | 657 ++++++++++ kernel/arm64/dgemm_small_kernel_nt_sve.c | 414 ++++++ kernel/arm64/dgemm_small_kernel_tn_sve.c | 709 +++++++++++ kernel/arm64/dgemm_small_kernel_tt_sve.c | 482 +++++++ kernel/arm64/gemm_small_kernel_permit_sve.c | 47 + kernel/arm64/sgemm_small_kernel_nn_sve.c | 1046 ++++++++++++++++ kernel/arm64/sgemm_small_kernel_nt_sve.c | 647 ++++++++++ kernel/arm64/sgemm_small_kernel_tn_sve.c | 1247 +++++++++++++++++++ kernel/arm64/sgemm_small_kernel_tt_sve.c | 574 +++++++++ 11 files changed, 5845 insertions(+) create mode 100644 kernel/arm64/dgemm_small_kernel_nn_sve.c create mode 100644 kernel/arm64/dgemm_small_kernel_nt_sve.c create mode 100644 kernel/arm64/dgemm_small_kernel_tn_sve.c create mode 100644 kernel/arm64/dgemm_small_kernel_tt_sve.c create mode 100644 kernel/arm64/gemm_small_kernel_permit_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_nn_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_nt_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_tn_sve.c create mode 100644 kernel/arm64/sgemm_small_kernel_tt_sve.c diff --git a/Makefile.system b/Makefile.system index aadf3459a..e95d1529a 100644 --- a/Makefile.system +++ b/Makefile.system @@ -268,6 +268,8 @@ SMALL_MATRIX_OPT = 1 else ifeq ($(ARCH), power) SMALL_MATRIX_OPT = 1 BUILD_BFLOAT16 = 1 +else ifeq ($(ARCH), arm64) +SMALL_MATRIX_OPT = 1 endif ifeq ($(SMALL_MATRIX_OPT), 1) CCOMMON_OPT += -DSMALL_MATRIX_OPT diff --git a/kernel/arm64/KERNEL.ARMV8SVE b/kernel/arm64/KERNEL.ARMV8SVE index eeb4844bf..bfadf5cba 100644 --- a/kernel/arm64/KERNEL.ARMV8SVE +++ b/kernel/arm64/KERNEL.ARMV8SVE @@ -131,6 +131,16 @@ SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) +SGEMM_SMALL_M_PERMIT = gemm_small_kernel_permit_sve.c +SGEMM_SMALL_K_NT = sgemm_small_kernel_nt_sve.c +SGEMM_SMALL_K_B0_NT = sgemm_small_kernel_nt_sve.c +SGEMM_SMALL_K_NN = sgemm_small_kernel_nn_sve.c +SGEMM_SMALL_K_B0_NN = sgemm_small_kernel_nn_sve.c +SGEMM_SMALL_K_TT = sgemm_small_kernel_tt_sve.c +SGEMM_SMALL_K_B0_TT = sgemm_small_kernel_tt_sve.c +SGEMM_SMALL_K_TN = sgemm_small_kernel_tn_sve.c +SGEMM_SMALL_K_B0_TN = sgemm_small_kernel_tn_sve.c + STRMMUNCOPY_M = trmm_uncopy_sve_v1.c STRMMLNCOPY_M = trmm_lncopy_sve_v1.c STRMMUTCOPY_M = trmm_utcopy_sve_v1.c @@ -152,6 +162,16 @@ DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) +DGEMM_SMALL_M_PERMIT = gemm_small_kernel_permit_sve.c +DGEMM_SMALL_K_NT = dgemm_small_kernel_nt_sve.c +DGEMM_SMALL_K_B0_NT = dgemm_small_kernel_nt_sve.c +DGEMM_SMALL_K_NN = dgemm_small_kernel_nn_sve.c +DGEMM_SMALL_K_B0_NN = dgemm_small_kernel_nn_sve.c +DGEMM_SMALL_K_TT = dgemm_small_kernel_tt_sve.c +DGEMM_SMALL_K_B0_TT = dgemm_small_kernel_tt_sve.c +DGEMM_SMALL_K_TN = dgemm_small_kernel_tn_sve.c +DGEMM_SMALL_K_B0_TN = dgemm_small_kernel_tn_sve.c + DTRMMUNCOPY_M = trmm_uncopy_sve_v1.c DTRMMLNCOPY_M = trmm_lncopy_sve_v1.c DTRMMUTCOPY_M = trmm_utcopy_sve_v1.c diff --git a/kernel/arm64/dgemm_small_kernel_nn_sve.c b/kernel/arm64/dgemm_small_kernel_nn_sve.c new file mode 100644 index 000000000..8baef8277 --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_nn_sve.c @@ -0,0 +1,657 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K2(n, offset_k) \ + float64x2_t b##k##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B2_K2(n0, n1, offset_k0, offset_k1) \ + float64x2_t b##n0##_k##offset_k0 = \ + vzip1q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float64x2_t b##n0##_k##offset_k1 = \ + vzip2q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); + +#define SCALE_B2_K2(n0, offset_k0, offset_k1) \ + svfloat64_t b##s##n0##_k##offset_k0 = svdup_neonq_f64(b##n0##_k##offset_k0); \ + svfloat64_t b##s##n0##_k##offset_k1 = svdup_neonq_f64(b##n0##_k##offset_k1); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define VECTOR_UNPACK_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B2(n, offset_k) \ + vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1); +#else +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define PACK_B(n, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define VECTOR_PACK_B(n, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(n* v_size, offset_k), b##s##n##_k##offset_k); +#define QUADWORD_PACK_B(n, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define UNPACK_VECTOR_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(n * v_size, offset_k)); +#define UNPACK_BROADCAST_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(PACK_ELEMENT_K(n, offset_k)); +#define UNPACK_QUADWORD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svbool_t pg_first = svwhilelt_b64(0, 1); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG k2 = K & -2; + + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_b = + (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_b != NULL)) { + if (i == 0) { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_PACK_B2(0, 0); + VECTOR_PACK_B2(0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + VECTOR_PACK_B2(2, 0); + VECTOR_PACK_B2(2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + PACK_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + PACK_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + PACK_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + PACK_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_QUADWORD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_QUADWORD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_QUADWORD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < n2; j += 2) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_b) + free(packed_b); + + return 0; +} diff --git a/kernel/arm64/dgemm_small_kernel_nt_sve.c b/kernel/arm64/dgemm_small_kernel_nt_sve.c new file mode 100644 index 000000000..982388287 --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_nt_sve.c @@ -0,0 +1,414 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1); +#else +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size32 = v_size * 32; + const uint64_t v_size3 = v_size * 3; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + const BLASLONG v_m3 = M - (M % v_size3); + const BLASLONG v_m1 = M & -v_size; + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + DECLARE_RESULT_VECTOR(2, 2); + DECLARE_RESULT_VECTOR(2, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 2, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + VECTOR_STORE(pg_true, 2, 2); + VECTOR_STORE(pg_true, 2, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < n2; j += 2) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(2, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 2, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + return 0; +} diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c new file mode 100644 index 000000000..7158851da --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -0,0 +1,709 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K2(n, offset_k) \ + float64x2_t b##k##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B2_K2(n0, n1, offset_k0, offset_k1) \ + float64x2_t b##n0##_k##offset_k0 = \ + vzip1q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float64x2_t b##n0##_k##offset_k1 = \ + vzip2q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); + +#define SCALE_B2_K2(n0, offset_k0, offset_k1) \ + svfloat64_t b##s##n0##_k##offset_k0 = svdup_neonq_f64(b##n0##_k##offset_k0); \ + svfloat64_t b##s##n0##_k##offset_k1 = svdup_neonq_f64(b##n0##_k##offset_k1); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define VECTOR_UNPACK_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B2(n, offset_k) \ + vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1); +#else +#define SCATTER_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svbool_t pg_first = svwhilelt_b64(0, 1); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const svuint64_t lda_vec = svindex_u64(0LL, lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + const BLASLONG k2 = K & -2; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + VECTOR_PACK_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 1); + VECTOR_PACK_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + + if (LIKELY(packed_a != NULL)) { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + } + } else { + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + VECTOR_LOAD_B_K2(2, 0); + VECTOR_LOAD_B_K2(3, 0); + TRANSPOSE_B2_K2(2, 3, 0, 1); + SCALE_B2_K2(2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < k2; k += 2) { + + VECTOR_LOAD_B_K2(0, 0); + VECTOR_LOAD_B_K2(1, 0); + TRANSPOSE_B2_K2(0, 1, 0, 1); + SCALE_B2_K2(0, 0, 1); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} diff --git a/kernel/arm64/dgemm_small_kernel_tt_sve.c b/kernel/arm64/dgemm_small_kernel_tt_sve.c new file mode 100644 index 000000000..12fc0b59e --- /dev/null +++ b/kernel/arm64/dgemm_small_kernel_tt_sve.c @@ -0,0 +1,482 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR2(m, n) \ + float64x2_t result##m##n = vdupq_n_f64(0.0); +#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0; +#define BROADCAST_LOAD_A2(m, offset_k) \ + float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); +#define VECTOR_UNPACK_B2(n, offset_k) \ + float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B2(n, offset_k) \ + vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ + result##m##n = \ + vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define VECTOR_STORE2(m, n) \ + vst1q_f64(&C_ELEMENT(m, n), vmulq_f64(result##m##n, vdupq_n_f64(alpha))); +#define STORE(m, n) C_ELEMENT(m, n) = alpha * result##m##n; +#else +#define VECTOR_STORE2(m, n) \ + result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \ + result##m##n = \ + vfmaq_f64(result##m##n, vld1q_f64(&C_ELEMENT(m, n)), vdupq_n_f64(beta)); \ + vst1q_f64(&C_ELEMENT(m, n), result##m##n); +#define STORE(m, n) \ + C_ELEMENT(m, n) = C_ELEMENT(m, n) * beta + alpha * result##m##n; +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat64_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = svdup_f64(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat64_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntd(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b64(); + const svbool_t pg_quad = svwhilelt_b64(0, 2); + const svbool_t pg_first = svwhilelt_b64(0, 1); + const svfloat64_t alpha_vec = svdup_f64(alpha); +#ifndef B0 + const svfloat64_t beta_vec = svdup_f64(beta); +#endif + const svuint64_t lda_vec = svindex_u64(0LL, lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n4 = N & -4; + const BLASLONG n2 = N & -2; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + QUADWORD_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + for (; j < n2; j += 2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} diff --git a/kernel/arm64/gemm_small_kernel_permit_sve.c b/kernel/arm64/gemm_small_kernel_permit_sve.c new file mode 100644 index 000000000..9526dbbe2 --- /dev/null +++ b/kernel/arm64/gemm_small_kernel_permit_sve.c @@ -0,0 +1,47 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +int CNAME(int transa, int transb, BLASLONG M, BLASLONG N, BLASLONG K, FLOAT alpha, FLOAT beta) +{ + BLASLONG MNK = M * N * K; + +#if defined(DOUBLE) // dgemm + // TN prefers full copies much earlier + if (transa && !transb && MNK > 16*16*16) { + return 0; + } +#else // sgemm + // TODO! +#endif + + if (MNK <= 64*64*64) + return 1; + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c new file mode 100644 index 000000000..85c7cfa86 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -0,0 +1,1046 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K4(n, offset_k) \ + float32x4_t b##k##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B4_K4( \ + n0, n1, n2, n3, offset_k0, offset_k1, offset_k2, offset_k3) \ + float32x4_t b##t##n0##_k##offset_k0 = \ + vzip1q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k1 = \ + vzip2q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k2 = \ + vzip1q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k3 = \ + vzip2q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##n0##_k##offset_k0 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k1 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k2 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); \ + float32x4_t b##n0##_k##offset_k3 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); + +#define SCALE_B4_K4(n0, offset_k0, offset_k1, offset_k2, offset_k3) \ + svfloat32_t b##s##n0##_k##offset_k0 = svdup_neonq_f32(b##n0##_k##offset_k0); \ + svfloat32_t b##s##n0##_k##offset_k1 = svdup_neonq_f32(b##n0##_k##offset_k1); \ + svfloat32_t b##s##n0##_k##offset_k2 = svdup_neonq_f32(b##n0##_k##offset_k2); \ + svfloat32_t b##s##n0##_k##offset_k3 = svdup_neonq_f32(b##n0##_k##offset_k3); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = vgetq_lane_f32(result##m##n, 3); +#else +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = \ + C_ELEMENT(m, n + 2) * beta + vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = \ + C_ELEMENT(m, n + 3) * beta + vgetq_lane_f32(result##m##n, 3); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define PACK_B(n, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define VECTOR_PACK_B(n, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(n* v_size, offset_k), b##s##n##_k##offset_k); +#define QUADWORD_PACK_B(n, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define UNPACK_VECTOR_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(n * v_size, offset_k)); +#define UNPACK_BROADCAST_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(PACK_ELEMENT_K(n, offset_k)); +#define UNPACK_QUADWORD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG k4 = K & -4; + + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_b = + (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_b != NULL)) { + if (i == 0) { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_PACK_B4(0, 0); + VECTOR_PACK_B4(0, 1); + VECTOR_PACK_B4(0, 2); + VECTOR_PACK_B4(0, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + VECTOR_PACK_B4(4, 0); + VECTOR_PACK_B4(4, 1); + VECTOR_PACK_B4(4, 2); + VECTOR_PACK_B4(4, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + PACK_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + PACK_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + PACK_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + PACK_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + PACK_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + PACK_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + PACK_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + PACK_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } else { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); + } + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + } + } + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_b) + free(packed_b); + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c new file mode 100644 index 000000000..1c3d324d0 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -0,0 +1,647 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = vgetq_lane_f32(result##m##n, 3); +#else +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = \ + C_ELEMENT(m, n + 2) * beta + vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = \ + C_ELEMENT(m, n + 3) * beta + vgetq_lane_f32(result##m##n, 3); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define PACK_B(n, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define VECTOR_PACK_B(n, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(n* v_size, offset_k), b##s##n##_k##offset_k); +#define QUADWORD_PACK_B(n, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k); +#define UNPACK_VECTOR_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(n * v_size, offset_k)); +#define UNPACK_BROADCAST_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(PACK_ELEMENT_K(n, offset_k)); +#define UNPACK_QUADWORD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(n, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size3 = v_size * 3; + const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + const BLASLONG v_m3 = M - (M % v_size3); + const BLASLONG v_m1 = M & -v_size; + + const int pack_b = M >= v_size3 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_b = + (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + DECLARE_RESULT_VECTOR(2, 2); + DECLARE_RESULT_VECTOR(2, 3); + DECLARE_RESULT_VECTOR(2, 4); + DECLARE_RESULT_VECTOR(2, 5); + DECLARE_RESULT_VECTOR(2, 6); + DECLARE_RESULT_VECTOR(2, 7); + + if (LIKELY(packed_b != NULL)) { + if (i == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + QUADWORD_PACK_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + QUADWORD_PACK_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + VECTOR_STORE(pg_true, 2, 2); + VECTOR_STORE(pg_true, 2, 3); + VECTOR_STORE(pg_true, 2, 4); + VECTOR_STORE(pg_true, 2, 5); + VECTOR_STORE(pg_true, 2, 6); + VECTOR_STORE(pg_true, 2, 7); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_QUADWORD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + } + } + for (; j < n4; j += 4) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(2, 0); + DECLARE_RESULT_VECTOR(2, 1); + DECLARE_RESULT_VECTOR(2, 2); + DECLARE_RESULT_VECTOR(2, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 2, 0); + VECTOR_STORE(pg_true, 2, 1); + VECTOR_STORE(pg_true, 2, 2); + VECTOR_STORE(pg_true, 2, 3); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + } + for (; j < N; j++) { + + BLASLONG i = 0; + for (; i < v_m3; i += v_size3) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(2, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + VECTOR_LOAD_A(pg_true, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 2, 0); + } + for (; i < v_m1; i += v_size) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_b) + free(packed_b); + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c new file mode 100644 index 000000000..6fd3b12a6 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -0,0 +1,1247 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B_K4(n, offset_k) \ + float32x4_t b##k##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define TRANSPOSE_B4_K4( \ + n0, n1, n2, n3, offset_k0, offset_k1, offset_k2, offset_k3) \ + float32x4_t b##t##n0##_k##offset_k0 = \ + vzip1q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k1 = \ + vzip2q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k2 = \ + vzip1q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##t##n0##_k##offset_k3 = \ + vzip2q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ + float32x4_t b##n0##_k##offset_k0 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k1 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ + float32x4_t b##n0##_k##offset_k2 = vreinterpretq_f32_f64( \ + vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); \ + float32x4_t b##n0##_k##offset_k3 = vreinterpretq_f32_f64( \ + vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ + vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); + +#define SCALE_B4_K4(n0, offset_k0, offset_k1, offset_k2, offset_k3) \ + svfloat32_t b##s##n0##_k##offset_k0 = svdup_neonq_f32(b##n0##_k##offset_k0); \ + svfloat32_t b##s##n0##_k##offset_k1 = svdup_neonq_f32(b##n0##_k##offset_k1); \ + svfloat32_t b##s##n0##_k##offset_k2 = svdup_neonq_f32(b##n0##_k##offset_k2); \ + svfloat32_t b##s##n0##_k##offset_k3 = svdup_neonq_f32(b##n0##_k##offset_k3); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = vgetq_lane_f32(result##m##n, 3); +#else +#define SCATTER_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + C_ELEMENT(m, n + 0) = \ + C_ELEMENT(m, n + 0) * beta + vgetq_lane_f32(result##m##n, 0); \ + C_ELEMENT(m, n + 1) = \ + C_ELEMENT(m, n + 1) * beta + vgetq_lane_f32(result##m##n, 1); \ + C_ELEMENT(m, n + 2) = \ + C_ELEMENT(m, n + 2) * beta + vgetq_lane_f32(result##m##n, 2); \ + C_ELEMENT(m, n + 3) = \ + C_ELEMENT(m, n + 3) * beta + vgetq_lane_f32(result##m##n, 3); +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + + +#define GATHER_LOAD_A64(pg, m, offset_k) \ + svfloat64_t a##t##m##_k##offset_k = \ + svld1_gather_offset(pg, (double *)&A_ELEMENT_K(v64_size * m, offset_k), lda_vec64); + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; + const uint64_t v64_size = v_size / 2; + const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const svuint32_t lda_vec = svindex_u32(0LL, lda); + const svuint64_t lda_vec64 = svmul_m(pg_true, svindex_u64(0,sizeof(FLOAT)), lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + const BLASLONG k4 = K & -4; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + + GATHER_LOAD_A64(pg_true, 0, 0); + GATHER_LOAD_A64(pg_true, 1, 0); + svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + VECTOR_PACK_A(0, 0); + VECTOR_PACK_A(0, 1); + + // GATHER_LOAD_A(pg_true, 0, 0); + // VECTOR_PACK_A(0, 0); + // GATHER_LOAD_A(pg_true, 0, 1); + // VECTOR_PACK_A(0, 1); + + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + + GATHER_LOAD_A64(pg_true, 0, 2); + GATHER_LOAD_A64(pg_true, 1, 2); + svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + VECTOR_PACK_A(0, 2); + VECTOR_PACK_A(0, 3); + + // GATHER_LOAD_A(pg_true, 0, 2); + // VECTOR_PACK_A(0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + // GATHER_LOAD_A(pg_true, 0, 3); + // VECTOR_PACK_A(0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + + GATHER_LOAD_A64(pg_true, 2, 0); + GATHER_LOAD_A64(pg_true, 3, 0); + svfloat32_t as1_k0 = svuzp1(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); + svfloat32_t as1_k1 = svuzp2(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); + VECTOR_PACK_A(1, 0); + VECTOR_PACK_A(1, 1); + + // GATHER_LOAD_A(pg_true, 1, 0); + // VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + // GATHER_LOAD_A(pg_true, 1, 1); + // VECTOR_PACK_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + + // 64-bit load x2 then unzip into 32-bit + GATHER_LOAD_A64(pg_true, 2, 2); + GATHER_LOAD_A64(pg_true, 3, 2); + svfloat32_t as1_k2 = svuzp1(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); + svfloat32_t as1_k3 = svuzp2(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); + VECTOR_PACK_A(1, 2); + VECTOR_PACK_A(1, 3); + + // GATHER_LOAD_A(pg_true, 1, 2); + // VECTOR_PACK_A(1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + // GATHER_LOAD_A(pg_true, 1, 3); + // VECTOR_PACK_A(1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + UNPACK_VECTOR_A(0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + UNPACK_VECTOR_A(0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + UNPACK_VECTOR_A(1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + UNPACK_VECTOR_A(1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); + GATHER_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); + GATHER_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + UNPACK_VECTOR_A(0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + UNPACK_VECTOR_A(0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UNPACK_VECTOR_A(1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + UNPACK_VECTOR_A(1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + UNPACK_VECTOR_A(1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + GATHER_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + GATHER_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + + GATHER_LOAD_A64(pg_true, 0, 0); + GATHER_LOAD_A64(pg_true, 1, 0); + svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + + // GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + // GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + + GATHER_LOAD_A64(pg_true, 0, 2); + GATHER_LOAD_A64(pg_true, 1, 2); + svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + + // GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + // GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + + GATHER_LOAD_A64(pg_true, 0, 0); + GATHER_LOAD_A64(pg_true, 1, 0); + svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); + + // GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + // GATHER_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + + GATHER_LOAD_A64(pg_true, 0, 2); + GATHER_LOAD_A64(pg_true, 1, 2); + svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); + + // GATHER_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + // GATHER_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_B_K4(4, 0); + VECTOR_LOAD_B_K4(5, 0); + VECTOR_LOAD_B_K4(6, 0); + VECTOR_LOAD_B_K4(7, 0); + TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); + SCALE_B4_K4(4, 0, 1, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < k4; k += 4) { + + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_tail, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + GATHER_LOAD_A(pg_tail, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + GATHER_LOAD_A(pg_tail, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c new file mode 100644 index 000000000..894e7fd46 --- /dev/null +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -0,0 +1,574 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include "common.h" + +#include +#include +#ifdef __ARM_NEON_SVE_BRIDGE +#include +#else +#define svdup_neonq_f32(fixed_reg) \ + ({ \ + svfloat32_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#define svdup_neonq_f64(fixed_reg) \ + ({ \ + svfloat64_t scalable_reg; \ + asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \ + scalable_reg; \ + }) +#endif + +#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define A_ELEMENT(m) A_ELEMENT_K(m, 0) + +#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define B_ELEMENT(n) B_ELEMENT_K(n, 0) + +#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] +#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) + +// ASIMD +#define DECLARE_RESULT_VECTOR4(m, n) \ + float32x4_t result##m##n = vdupq_n_f32(0.0); +#define DECLARE_RESULT(m, n) float32_t result##m##n = 0.0; +#define BROADCAST_LOAD_A4(m, offset_k) \ + float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); +#define LOAD_A1(m, offset_k) \ + float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); +#define VECTOR_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 2, offset_k), b##n##_k##offset_k, 2); \ + b##n##_k##offset_k = \ + vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); +#define VECTOR_UNPACK_B4(n, offset_k) \ + float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); +#define VECTOR_PACK_B4(n, offset_k) \ + vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); +#define PACK_B0(n, offset_k) \ + PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); +#define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ + result##m##n = \ + vfmaq_f32(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k); +#define UPDATE_RESULT(m, n, offset_k) \ + result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k; +#ifdef B0 +#define VECTOR_STORE4(m, n) \ + vst1q_f32(&C_ELEMENT(m, n), vmulq_f32(result##m##n, vdupq_n_f32(alpha))); +#define STORE(m, n) C_ELEMENT(m, n) = alpha * result##m##n; +#else +#define VECTOR_STORE4(m, n) \ + result##m##n = vmulq_f32(result##m##n, vdupq_n_f32(alpha)); \ + result##m##n = \ + vfmaq_f32(result##m##n, vld1q_f32(&C_ELEMENT(m, n)), vdupq_n_f32(beta)); \ + vst1q_f32(&C_ELEMENT(m, n), result##m##n); +#define STORE(m, n) \ + C_ELEMENT(m, n) = C_ELEMENT(m, n) * beta + alpha * result##m##n; +#endif + +// SVE +#define DECLARE_RESULT_VECTOR(m, n) svfloat32_t result##m##n = svdup_f32(0.0); +#define BROADCAST_LOAD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); +#define BROADCAST_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); + +#define QUADWORD_LOAD_B(n, offset_k) \ + svfloat32_t b##s##n##_k##offset_k = \ + svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); +#define GATHER_LOAD_A(pg, offset_m, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1_gather_index(pg, &A_ELEMENT_K(offset_m, offset_k), lda_vec); +#define PACK_A(m, offset_k) \ + svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define VECTOR_PACK_A(offset_m, m, offset_k) \ + svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); +#define QUADWORD_PACK_A(m, offset_k) \ + svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); +#define UNPACK_VECTOR_A(offset_m, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1(pg_true, &PACK_ELEMENT_K(offset_m, offset_k)); +#define UNPACK_BROADCAST_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svdup_f32(PACK_ELEMENT_K(m, offset_k)); +#define UNPACK_QUADWORD_A(m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = \ + svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k)); +#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \ + result##m##n = \ + svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k); +#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \ + result##m##n = svmla_lane( \ + result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); +#ifdef B0 +#define VECTOR_STORE(pg, offset_m, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#else +#define VECTOR_STORE(pg, offset_m, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = \ + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(offset_m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); +#define SCATTER_STORE(pg, m, n) \ + result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ + result##m##n = svmla_m( \ + pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ + beta_vec); \ + svst1_scatter_index( \ + pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); +#endif + +#ifndef LIKELY +#ifdef __GNUC__ +#define LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define LIKELY(x) (x) +#endif +#endif +#ifndef UNLIKELY +#ifdef __GNUC__ +#define UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define UNLIKELY(x) (x) +#endif +#endif + +#ifdef B0 +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT* C, + BLASLONG ldc) +#else +int +CNAME(BLASLONG M, + BLASLONG N, + BLASLONG K, + IFLOAT* A, + BLASLONG lda, + FLOAT alpha, + IFLOAT* B, + BLASLONG ldb, + FLOAT beta, + FLOAT* C, + BLASLONG ldc) +#endif +{ + const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; + const svbool_t pg_true = svptrue_b32(); + const svfloat32_t alpha_vec = svdup_f32(alpha); +#ifndef B0 + const svfloat32_t beta_vec = svdup_f32(beta); +#endif + const svuint32_t lda_vec = svindex_u32(0LL, lda); + + const BLASLONG v_m2 = M & -v_size2; + const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; + const BLASLONG n4 = N & -4; + + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + FLOAT* packed_a = + (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + + BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + VECTOR_PACK_A(0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + VECTOR_PACK_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UNPACK_VECTOR_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + VECTOR_STORE(pg_true, 0, 0, 4); + VECTOR_STORE(pg_true, 0, 0, 5); + VECTOR_STORE(pg_true, 0, 0, 6); + VECTOR_STORE(pg_true, 0, 0, 7); + VECTOR_STORE(pg_true, v_size, 1, 0); + VECTOR_STORE(pg_true, v_size, 1, 1); + VECTOR_STORE(pg_true, v_size, 1, 2); + VECTOR_STORE(pg_true, v_size, 1, 3); + VECTOR_STORE(pg_true, v_size, 1, 4); + VECTOR_STORE(pg_true, v_size, 1, 5); + VECTOR_STORE(pg_true, v_size, 1, 6); + VECTOR_STORE(pg_true, v_size, 1, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + VECTOR_STORE(pg_true, v_size, 1, 0); + VECTOR_STORE(pg_true, v_size, 1, 1); + VECTOR_STORE(pg_true, v_size, 1, 2); + VECTOR_STORE(pg_true, v_size, 1, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(v_size, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, v_size, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, v_size, 1, 0); + } + } + for (; i < v_m1; i += v_size) { + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + VECTOR_STORE(pg_true, 0, 0, 4); + VECTOR_STORE(pg_true, 0, 0, 5); + VECTOR_STORE(pg_true, 0, 0, 6); + VECTOR_STORE(pg_true, 0, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0, 0); + VECTOR_STORE(pg_true, 0, 0, 1); + VECTOR_STORE(pg_true, 0, 0, 2); + VECTOR_STORE(pg_true, 0, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0, 0); + } + } + for (; i < M; i += v_size) { + const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + + BLASLONG j = 0; + for (; j < n8; j += 8) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0, 0); + VECTOR_STORE(pg_tail, 0, 0, 1); + VECTOR_STORE(pg_tail, 0, 0, 2); + VECTOR_STORE(pg_tail, 0, 0, 3); + VECTOR_STORE(pg_tail, 0, 0, 4); + VECTOR_STORE(pg_tail, 0, 0, 5); + VECTOR_STORE(pg_tail, 0, 0, 6); + VECTOR_STORE(pg_tail, 0, 0, 7); + } + for (; j < n4; j += 4) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0, 0); + VECTOR_STORE(pg_tail, 0, 0, 1); + VECTOR_STORE(pg_tail, 0, 0, 2); + VECTOR_STORE(pg_tail, 0, 0, 3); + } + for (; j < N; j++) { + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + } + VECTOR_STORE(pg_tail, 0, 0, 0); + } + } + + if (pack_a) + free(packed_a); + + return 0; +} From 8c472ef7e30d019776dde92b96b02f4c998f7487 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Mon, 24 Jun 2024 10:47:47 +0100 Subject: [PATCH 02/52] Further tweak small GEMM for AArch64 --- kernel/arm64/dgemm_small_kernel_nn_sve.c | 134 ++- kernel/arm64/dgemm_small_kernel_nt_sve.c | 169 ++-- kernel/arm64/dgemm_small_kernel_tn_sve.c | 137 ++- kernel/arm64/dgemm_small_kernel_tt_sve.c | 137 ++- kernel/arm64/gemm_small_kernel_permit_sve.c | 14 +- kernel/arm64/sgemm_small_kernel_nn_sve.c | 673 ++----------- kernel/arm64/sgemm_small_kernel_nt_sve.c | 406 ++------ kernel/arm64/sgemm_small_kernel_tn_sve.c | 998 ++------------------ kernel/arm64/sgemm_small_kernel_tt_sve.c | 390 ++------ 9 files changed, 772 insertions(+), 2286 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_nn_sve.c b/kernel/arm64/dgemm_small_kernel_nn_sve.c index 8baef8277..417633471 100644 --- a/kernel/arm64/dgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nn_sve.c @@ -46,13 +46,27 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) + +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] #define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) @@ -112,8 +126,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -140,26 +153,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -169,13 +179,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -223,12 +226,29 @@ CNAME(BLASLONG M, FLOAT* packed_b = (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -372,9 +392,16 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -431,9 +458,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -491,13 +524,30 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } + + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < n2; j += 2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -538,9 +588,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -568,9 +623,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -599,13 +658,26 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); } + + UPDATE_B_POINTER(2); + RESET_A_POINTER(); + UPDATE_C_POINTER(2); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); @@ -620,9 +692,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -633,9 +709,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -647,11 +726,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } if (pack_b) free(packed_b); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/dgemm_small_kernel_nt_sve.c b/kernel/arm64/dgemm_small_kernel_nt_sve.c index 982388287..241d96a6c 100644 --- a/kernel/arm64/dgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nt_sve.c @@ -46,13 +46,27 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) + +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] #define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) @@ -97,8 +111,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -111,26 +124,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -140,13 +150,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -176,8 +179,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntd(); - const uint64_t v_size32 = v_size * 32; - const uint64_t v_size3 = v_size * 3; + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b64(); const svbool_t pg_quad = svwhilelt_b64(0, 2); const svfloat64_t alpha_vec = svdup_f64(alpha); @@ -186,14 +188,31 @@ CNAME(BLASLONG M, #endif const BLASLONG n4 = N & -4; const BLASLONG n2 = N & -2; - const BLASLONG v_m3 = M - (M % v_size3); + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -204,10 +223,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(1, 1); DECLARE_RESULT_VECTOR(1, 2); DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); - DECLARE_RESULT_VECTOR(2, 2); - DECLARE_RESULT_VECTOR(2, 3); for (; k < K; k++) { @@ -223,11 +238,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 2, 1, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); @@ -237,13 +247,16 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); - VECTOR_STORE(pg_true, 2, 2); - VECTOR_STORE(pg_true, 2, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -264,9 +277,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -288,20 +307,35 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } + + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < n2; j += 2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(1, 0); DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); for (; k < K; k++) { @@ -312,19 +346,19 @@ CNAME(BLASLONG M, VECTOR_LOAD_A(pg_true, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -338,9 +372,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -355,17 +393,29 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); } + + UPDATE_B_POINTER(2); + RESET_A_POINTER(); + UPDATE_C_POINTER(2); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(2, 0); for (; k < K; k++) { @@ -374,15 +424,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); VECTOR_LOAD_A(pg_true, 1, 0); UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 2, 0); + INCR_C_POINTER(0, v_size2); } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -393,9 +444,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -407,8 +461,13 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index 7158851da..b8783c1d5 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -112,14 +127,13 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); #define VECTOR_PACK_A(m, offset_k) \ @@ -143,26 +157,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -172,13 +183,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -226,14 +230,29 @@ CNAME(BLASLONG M, const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -408,9 +427,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -484,9 +509,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); + INCR_C_POINTER(0, 2); + INCR_C_POINTER(1, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); @@ -512,13 +542,28 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); } for (; i < v_m1; i += v_size) { + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -562,9 +607,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -592,9 +642,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -605,14 +659,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -656,9 +723,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -686,9 +758,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -699,11 +775,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/dgemm_small_kernel_tt_sve.c b/kernel/arm64/dgemm_small_kernel_tt_sve.c index 12fc0b59e..aa5bf2751 100644 --- a/kernel/arm64/dgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tt_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -103,14 +118,13 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat64_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat64_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); #define VECTOR_PACK_A(m, offset_k) \ @@ -134,26 +148,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u64(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u64(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -163,13 +174,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -216,14 +220,29 @@ CNAME(BLASLONG M, const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; for (; i < v_m2; i += v_size2) { + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -295,9 +314,15 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 1, 1); VECTOR_STORE(pg_true, 1, 2); VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -331,9 +356,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 1, 0); VECTOR_STORE(pg_true, 1, 1); + INCR_C_POINTER(0, 2); + INCR_C_POINTER(1, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(1, 0); @@ -359,13 +389,28 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); } for (; i < v_m1; i += v_size) { + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); + BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -386,9 +431,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -402,9 +452,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -415,14 +469,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -443,9 +510,14 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < n2; j += 2) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + UPDATE_B_POINTER(2); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -459,9 +531,13 @@ CNAME(BLASLONG M, } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); + INCR_C_POINTER(0, 2); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -472,11 +548,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/gemm_small_kernel_permit_sve.c b/kernel/arm64/gemm_small_kernel_permit_sve.c index 9526dbbe2..c1275129d 100644 --- a/kernel/arm64/gemm_small_kernel_permit_sve.c +++ b/kernel/arm64/gemm_small_kernel_permit_sve.c @@ -32,16 +32,14 @@ int CNAME(int transa, int transb, BLASLONG M, BLASLONG N, BLASLONG K, FLOAT alph BLASLONG MNK = M * N * K; #if defined(DOUBLE) // dgemm - // TN prefers full copies much earlier - if (transa && !transb && MNK > 16*16*16) { - return 0; - } -#else // sgemm - // TODO! -#endif - if (MNK <= 64*64*64) return 1; +#else // sgemm + if (MNK <= 256*256*256) + return 1; +#endif + + return 0; } diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c index 85c7cfa86..2e65e61ff 100644 --- a/kernel/arm64/sgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -46,15 +46,29 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) -#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) // ASIMD @@ -141,8 +155,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -169,26 +182,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -198,13 +208,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -234,7 +237,6 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -242,39 +244,41 @@ CNAME(BLASLONG M, #ifndef B0 const svfloat32_t beta_vec = svdup_f32(beta); #endif - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG k4 = K & -4; - const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = - (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -310,68 +314,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - VECTOR_PACK_B4(4, 0); - VECTOR_PACK_B4(4, 1); - VECTOR_PACK_B4(4, 2); - VECTOR_PACK_B4(4, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - VECTOR_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - VECTOR_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); } for (; k < K; k++) { @@ -382,33 +324,12 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(1, 0); PACK_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); PACK_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); PACK_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - PACK_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - PACK_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - PACK_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - PACK_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } else { for (; k < K; k++) { @@ -419,20 +340,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } } else { @@ -464,190 +371,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - VECTOR_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - VECTOR_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); - } - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 1, 4); - VECTOR_STORE(pg_true, 1, 5); - VECTOR_STORE(pg_true, 1, 6); - VECTOR_STORE(pg_true, 1, 7); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - if (LIKELY(packed_b != NULL)) { - for (; k < K; k++) { - - UNPACK_QUADWORD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); } for (; k < K; k++) { @@ -660,37 +383,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); } } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); if (LIKELY(packed_b != NULL)) { for (; k < K; k++) { @@ -701,11 +414,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); } } else { for (; k < k4; k += 4) { @@ -736,28 +444,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); } for (; k < K; k++) { @@ -770,248 +456,33 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); } } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); - VECTOR_STORE(pg_tail, 0, 4); - VECTOR_STORE(pg_tail, 0, 5); - VECTOR_STORE(pg_tail, 0, 6); - VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } - } - for (; j < n4; j += 4) { - BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - } - for (; i < M; i += v_size) { - const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - VECTOR_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - VECTOR_LOAD_A(pg_tail, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - VECTOR_LOAD_A(pg_tail, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); - } - VECTOR_STORE(pg_tail, 0, 0); - VECTOR_STORE(pg_tail, 0, 1); - VECTOR_STORE(pg_tail, 0, 2); - VECTOR_STORE(pg_tail, 0, 3); - } + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 1, 0); - } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -1022,9 +493,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -1036,11 +510,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } if (pack_b) free(packed_b); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c index 1c3d324d0..9f99c2422 100644 --- a/kernel/arm64/sgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -46,15 +46,29 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) + (k + offset_k) * lda] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr; +#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc; +#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i)) -#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 8 + n] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n] #define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0) // ASIMD @@ -113,8 +127,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); @@ -141,26 +154,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -170,13 +180,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -206,7 +209,6 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size3 = v_size * 3; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -214,46 +216,40 @@ CNAME(BLASLONG M, #ifndef B0 const svfloat32_t beta_vec = svdup_f32(beta); #endif - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const BLASLONG v_m3 = M - (M % v_size3); const BLASLONG v_m1 = M & -v_size; - const int pack_b = M >= v_size3 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = - (pack_b) ? packed_b = (FLOAT*)malloc(K * 8 * sizeof(FLOAT)) : NULL; + (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; + + FLOAT* b_offset = B; + FLOAT* a_offset = A; + FLOAT* c_offset = C; BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, 1); + CREATE_C_POINTER(2, 2); + CREATE_C_POINTER(3, 3); + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); - DECLARE_RESULT_VECTOR(2, 2); - DECLARE_RESULT_VECTOR(2, 3); - DECLARE_RESULT_VECTOR(2, 4); - DECLARE_RESULT_VECTOR(2, 5); - DECLARE_RESULT_VECTOR(2, 6); - DECLARE_RESULT_VECTOR(2, 7); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -266,30 +262,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - QUADWORD_PACK_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); } } else { for (; k < K; k++) { @@ -300,29 +272,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); } } } else { @@ -334,120 +283,27 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 7, 4, 3, 0); } } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 1, 4); - VECTOR_STORE(pg_true, 1, 5); - VECTOR_STORE(pg_true, 1, 6); - VECTOR_STORE(pg_true, 1, 7); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); - VECTOR_STORE(pg_true, 2, 2); - VECTOR_STORE(pg_true, 2, 3); - VECTOR_STORE(pg_true, 2, 4); - VECTOR_STORE(pg_true, 2, 5); - VECTOR_STORE(pg_true, 2, 6); - VECTOR_STORE(pg_true, 2, 7); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - if (LIKELY(packed_b != NULL)) { - for (; k < K; k++) { - - UNPACK_QUADWORD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, v_size); + INCR_C_POINTER(1, v_size); + INCR_C_POINTER(2, v_size); + INCR_C_POINTER(3, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); if (LIKELY(packed_b != NULL)) { for (; k < K; k++) { @@ -458,11 +314,6 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_QUADWORD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); } } else { for (; k < K; k++) { @@ -473,146 +324,33 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); } } VECTOR_STORE(pg_tail, 0, 0); VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); - VECTOR_STORE(pg_tail, 0, 4); - VECTOR_STORE(pg_tail, 0, 5); - VECTOR_STORE(pg_tail, 0, 6); - VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 0); + INCR_C_POINTER(1, 0); + INCR_C_POINTER(2, 0); + INCR_C_POINTER(3, 0); } - } - for (; j < n4; j += 4) { - BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(2, 0); - DECLARE_RESULT_VECTOR(2, 1); - DECLARE_RESULT_VECTOR(2, 2); - DECLARE_RESULT_VECTOR(2, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(2, 3, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 2, 0); - VECTOR_STORE(pg_true, 2, 1); - VECTOR_STORE(pg_true, 2, 2); - VECTOR_STORE(pg_true, 2, 3); - } - for (; i < v_m1; i += v_size) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - } - for (; i < M; i += v_size) { - const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - VECTOR_STORE(pg_tail, 0, 0); - VECTOR_STORE(pg_tail, 0, 1); - VECTOR_STORE(pg_tail, 0, 2); - VECTOR_STORE(pg_tail, 0, 3); - } + UPDATE_B_POINTER(4); + RESET_A_POINTER(); + UPDATE_C_POINTER(4); } for (; j < N; j++) { + CREATE_C_POINTER(0, 0); + CREATE_B_POINTER(0, 0); + BLASLONG i = 0; - for (; i < v_m3; i += v_size3) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(2, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - VECTOR_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - VECTOR_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - VECTOR_LOAD_A(pg_true, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 2, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 2, 0); - } for (; i < v_m1; i += v_size) { + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -623,9 +361,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(0); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -637,11 +378,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 0); } + + UPDATE_B_POINTER(1); + RESET_A_POINTER(); + UPDATE_C_POINTER(1); } if (pack_b) free(packed_b); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 6fd3b12a6..9cbb60d40 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) + (j + (n)) * ldb] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k)) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * 2 * v_size + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -65,36 +80,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. float32x4_t a##m##_k##offset_k = vld1q_dup_f32(&A_ELEMENT_K(m, offset_k)); #define LOAD_A1(m, offset_k) \ float32_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); -#define VECTOR_LOAD_B_K4(n, offset_k) \ - float32x4_t b##k##n##_k##offset_k = vld1q_f32(&B_ELEMENT_K(n, offset_k)); -#define TRANSPOSE_B4_K4( \ - n0, n1, n2, n3, offset_k0, offset_k1, offset_k2, offset_k3) \ - float32x4_t b##t##n0##_k##offset_k0 = \ - vzip1q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ - float32x4_t b##t##n0##_k##offset_k1 = \ - vzip2q_f32(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ - float32x4_t b##t##n0##_k##offset_k2 = \ - vzip1q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ - float32x4_t b##t##n0##_k##offset_k3 = \ - vzip2q_f32(b##k##n2##_k##offset_k0, b##k##n3##_k##offset_k0); \ - float32x4_t b##n0##_k##offset_k0 = vreinterpretq_f32_f64( \ - vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ - float32x4_t b##n0##_k##offset_k1 = vreinterpretq_f32_f64( \ - vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k0), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k2))); \ - float32x4_t b##n0##_k##offset_k2 = vreinterpretq_f32_f64( \ - vzip1q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); \ - float32x4_t b##n0##_k##offset_k3 = vreinterpretq_f32_f64( \ - vzip2q_f64(vreinterpretq_f64_f32(b##t##n0##_k##offset_k1), \ - vreinterpretq_f64_f32(b##t##n0##_k##offset_k3))); - -#define SCALE_B4_K4(n0, offset_k0, offset_k1, offset_k2, offset_k3) \ - svfloat32_t b##s##n0##_k##offset_k0 = svdup_neonq_f32(b##n0##_k##offset_k0); \ - svfloat32_t b##s##n0##_k##offset_k1 = svdup_neonq_f32(b##n0##_k##offset_k1); \ - svfloat32_t b##s##n0##_k##offset_k2 = svdup_neonq_f32(b##n0##_k##offset_k2); \ - svfloat32_t b##s##n0##_k##offset_k3 = svdup_neonq_f32(b##n0##_k##offset_k3); #define GATHER_LOAD_B4(n, offset_k) \ float32x4_t b##n##_k##offset_k = vdupq_n_f32(B_ELEMENT_K(n, offset_k)); \ b##n##_k##offset_k = \ @@ -105,8 +90,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vsetq_lane_f32(B_ELEMENT_K(n + 3, offset_k), b##n##_k##offset_k, 3); #define VECTOR_UNPACK_B4(n, offset_k) \ float32x4_t b##n##_k##offset_k = vld1q_f32(&PACK_ELEMENT_K(n, offset_k)); -#define VECTOR_PACK_B4(n, offset_k) \ - vst1q_f32(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); #define PACK_B0(n, offset_k) \ PACK_ELEMENT_K(n, offset_k) = vget_lane_f32(b##n##_k##offset_k, 0); #define UPDATE_RESULT_VECTOR4(m, n, offset_k) \ @@ -141,14 +124,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ - svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg, &A_ELEMENT_K(v_size * m, offset_k)); -#define QUADWORD_LOAD_B(n, offset_k) \ - svfloat32_t b##s##n##_k##offset_k = \ - svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(v_size * m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); #define VECTOR_PACK_A(m, offset_k) \ @@ -172,26 +151,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef B0 #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else #define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(v_size * m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(v_size* m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -201,18 +177,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif - - -#define GATHER_LOAD_A64(pg, m, offset_k) \ - svfloat64_t a##t##m##_k##offset_k = \ - svld1_gather_offset(pg, (double *)&A_ELEMENT_K(v64_size * m, offset_k), lda_vec64); #ifdef B0 int @@ -242,8 +206,6 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size2 = v_size * 2; - const uint64_t v64_size = v_size / 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -252,173 +214,41 @@ CNAME(BLASLONG M, const svfloat32_t beta_vec = svdup_f32(beta); #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); - const svuint64_t lda_vec64 = svmul_m(pg_true, svindex_u64(0,sizeof(FLOAT)), lda); - const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const BLASLONG k4 = K & -4; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { + for (; i < v_m1; i += v_size) { + + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_a != NULL)) { if (j == 0) { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - - GATHER_LOAD_A64(pg_true, 0, 0); - GATHER_LOAD_A64(pg_true, 1, 0); - svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - VECTOR_PACK_A(0, 0); - VECTOR_PACK_A(0, 1); - - // GATHER_LOAD_A(pg_true, 0, 0); - // VECTOR_PACK_A(0, 0); - // GATHER_LOAD_A(pg_true, 0, 1); - // VECTOR_PACK_A(0, 1); - - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - - GATHER_LOAD_A64(pg_true, 0, 2); - GATHER_LOAD_A64(pg_true, 1, 2); - svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - VECTOR_PACK_A(0, 2); - VECTOR_PACK_A(0, 3); - - // GATHER_LOAD_A(pg_true, 0, 2); - // VECTOR_PACK_A(0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - // GATHER_LOAD_A(pg_true, 0, 3); - // VECTOR_PACK_A(0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - - GATHER_LOAD_A64(pg_true, 2, 0); - GATHER_LOAD_A64(pg_true, 3, 0); - svfloat32_t as1_k0 = svuzp1(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); - svfloat32_t as1_k1 = svuzp2(svreinterpret_f32(at2_k0), svreinterpret_f32(at3_k0)); - VECTOR_PACK_A(1, 0); - VECTOR_PACK_A(1, 1); - - // GATHER_LOAD_A(pg_true, 1, 0); - // VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - // GATHER_LOAD_A(pg_true, 1, 1); - // VECTOR_PACK_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - - // 64-bit load x2 then unzip into 32-bit - GATHER_LOAD_A64(pg_true, 2, 2); - GATHER_LOAD_A64(pg_true, 3, 2); - svfloat32_t as1_k2 = svuzp1(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); - svfloat32_t as1_k3 = svuzp2(svreinterpret_f32(at2_k2), svreinterpret_f32(at3_k2)); - VECTOR_PACK_A(1, 2); - VECTOR_PACK_A(1, 3); - - // GATHER_LOAD_A(pg_true, 1, 2); - // VECTOR_PACK_A(1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - // GATHER_LOAD_A(pg_true, 1, 3); - // VECTOR_PACK_A(1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -427,117 +257,12 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); BROADCAST_LOAD_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 0); - VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - UNPACK_VECTOR_A(0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - UNPACK_VECTOR_A(0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - UNPACK_VECTOR_A(1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - UNPACK_VECTOR_A(1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -545,117 +270,13 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); BROADCAST_LOAD_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 1); - GATHER_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 2); - GATHER_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -663,207 +284,25 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); BROADCAST_LOAD_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); VECTOR_STORE(pg_true, 0, 2); VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); - VECTOR_STORE(pg_true, 1, 4); - VECTOR_STORE(pg_true, 1, 5); - VECTOR_STORE(pg_true, 1, 6); - VECTOR_STORE(pg_true, 1, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - - if (LIKELY(packed_a != NULL)) { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - UNPACK_VECTOR_A(0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - UNPACK_VECTOR_A(0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - UNPACK_VECTOR_A(1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - UNPACK_VECTOR_A(1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - } - } else { - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); - GATHER_LOAD_A(pg_true, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); - GATHER_LOAD_A(pg_true, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); - } - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 1, 0); - VECTOR_STORE(pg_true, 1, 1); - VECTOR_STORE(pg_true, 1, 2); - VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); if (LIKELY(packed_a != NULL)) { for (; k < K; k++) { @@ -871,8 +310,6 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } else { for (; k < K; k++) { @@ -880,334 +317,36 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); } - } - for (; i < v_m1; i += v_size) { - BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - - GATHER_LOAD_A64(pg_true, 0, 0); - GATHER_LOAD_A64(pg_true, 1, 0); - svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - - // GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - // GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - - GATHER_LOAD_A64(pg_true, 0, 2); - GATHER_LOAD_A64(pg_true, 1, 2); - svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - - // GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - // GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - VECTOR_STORE(pg_true, 0, 4); - VECTOR_STORE(pg_true, 0, 5); - VECTOR_STORE(pg_true, 0, 6); - VECTOR_STORE(pg_true, 0, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - - GATHER_LOAD_A64(pg_true, 0, 0); - GATHER_LOAD_A64(pg_true, 1, 0); - svfloat32_t as0_k0 = svuzp1(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - svfloat32_t as0_k1 = svuzp2(svreinterpret_f32(at0_k0), svreinterpret_f32(at1_k0)); - - // GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - // GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - - GATHER_LOAD_A64(pg_true, 0, 2); - GATHER_LOAD_A64(pg_true, 1, 2); - svfloat32_t as0_k2 = svuzp1(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - svfloat32_t as0_k3 = svuzp2(svreinterpret_f32(at0_k2), svreinterpret_f32(at1_k2)); - - // GATHER_LOAD_A(pg_true, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - // GATHER_LOAD_A(pg_true, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0); - VECTOR_STORE(pg_true, 0, 1); - VECTOR_STORE(pg_true, 0, 2); - VECTOR_STORE(pg_true, 0, 3); - } - for (; j < N; j++) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0); - } + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_tail, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_tail, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - VECTOR_LOAD_B_K4(4, 0); - VECTOR_LOAD_B_K4(5, 0); - VECTOR_LOAD_B_K4(6, 0); - VECTOR_LOAD_B_K4(7, 0); - TRANSPOSE_B4_K4(4, 5, 6, 7, 0, 1, 2, 3); - SCALE_B4_K4(4, 0, 1, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 3); - } - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); - BROADCAST_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); - BROADCAST_LOAD_B(5, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); - BROADCAST_LOAD_B(6, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); - BROADCAST_LOAD_B(7, 0); - UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); - } - VECTOR_STORE(pg_tail, 0, 0); - VECTOR_STORE(pg_tail, 0, 1); - VECTOR_STORE(pg_tail, 0, 2); - VECTOR_STORE(pg_tail, 0, 3); - VECTOR_STORE(pg_tail, 0, 4); - VECTOR_STORE(pg_tail, 0, 5); - VECTOR_STORE(pg_tail, 0, 6); - VECTOR_STORE(pg_tail, 0, 7); - } for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - for (; k < k4; k += 4) { - - VECTOR_LOAD_B_K4(0, 0); - VECTOR_LOAD_B_K4(1, 0); - VECTOR_LOAD_B_K4(2, 0); - VECTOR_LOAD_B_K4(3, 0); - TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); - SCALE_B4_K4(0, 0, 1, 2, 3); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); - GATHER_LOAD_A(pg_tail, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); - GATHER_LOAD_A(pg_tail, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -1224,9 +363,13 @@ CNAME(BLASLONG M, VECTOR_STORE(pg_tail, 0, 1); VECTOR_STORE(pg_tail, 0, 2); VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); @@ -1237,11 +380,16 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index 894e7fd46..dd9840c37 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -46,15 +46,30 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. }) #endif -#define A_ELEMENT_K(m, offset_k) A[(i + (m)) * lda + (k + offset_k)] +#define RESET_A_POINTER() a_offset = A; + +#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda; +#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda; +#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k)) #define A_ELEMENT(m) A_ELEMENT_K(m, 0) -#define B_ELEMENT_K(n, offset_k) B[(k + offset_k) * ldb + (j + (n))] +#define RESET_B_POINTER() b_offset = B; + +#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale; +#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale; +#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb) #define B_ELEMENT(n) B_ELEMENT_K(n, 0) -#define C_ELEMENT(m, n) C[(i + (m)) + (j + (n)) * ldc] +#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale; +#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc; +#define UPDATE_C_POINTER(scale) c_offset += scale; +#define C_ELEMENT(m, n) \ + *(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] +// #undef C_ELEMENT +// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] + +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -106,22 +121,23 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. svfloat32_t a##s##m##_k##offset_k = svdup_f32(A_ELEMENT_K(m, offset_k)); #define BROADCAST_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = svdup_f32(B_ELEMENT_K(n, offset_k)); - +#define VECTOR_LOAD_A(pg, m, offset_k) \ + svfloat32_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); #define QUADWORD_LOAD_B(n, offset_k) \ svfloat32_t b##s##n##_k##offset_k = \ svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); -#define GATHER_LOAD_A(pg, offset_m, m, offset_k) \ +#define GATHER_LOAD_A(pg, m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = \ - svld1_gather_index(pg, &A_ELEMENT_K(offset_m, offset_k), lda_vec); + svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); #define PACK_A(m, offset_k) \ svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); -#define VECTOR_PACK_A(offset_m, m, offset_k) \ +#define VECTOR_PACK_A(m, offset_k) \ svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k); #define QUADWORD_PACK_A(m, offset_k) \ svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k); -#define UNPACK_VECTOR_A(offset_m, m, offset_k) \ +#define UNPACK_VECTOR_A(m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = \ - svld1(pg_true, &PACK_ELEMENT_K(offset_m, offset_k)); + svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k)); #define UNPACK_BROADCAST_A(m, offset_k) \ svfloat32_t a##s##m##_k##offset_k = svdup_f32(PACK_ELEMENT_K(m, offset_k)); #define UNPACK_QUADWORD_A(m, offset_k) \ @@ -134,28 +150,25 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. result##m##n = svmla_lane( \ result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane); #ifdef B0 -#define VECTOR_STORE(pg, offset_m, m, n) \ +#define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #else -#define VECTOR_STORE(pg, offset_m, m, n) \ +#define VECTOR_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ result##m##n = \ - svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(offset_m, n)), beta_vec); \ - svst1(pg, &C_ELEMENT(offset_m, n), result##m##n); + svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \ + svst1(pg, &C_ELEMENT(m, n), result##m##n); #define SCATTER_STORE(pg, m, n) \ result##m##n = svmul_m(pg, result##m##n, alpha_vec); \ - result##m##n = svmla_m( \ - pg, \ - result##m##n, \ - svld1_gather_index(pg, &C_ELEMENT(v_size * m, n), svindex_u32(0LL, ldc)), \ - beta_vec); \ - svst1_scatter_index( \ - pg, &C_ELEMENT(v_size* m, n), svindex_u32(0LL, ldc), result##m##n); + result##m##n = svmla_m(pg, \ + result##m##n, \ + svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \ + beta_vec); \ + svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n); #endif #ifndef LIKELY @@ -165,13 +178,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define LIKELY(x) (x) #endif #endif -#ifndef UNLIKELY -#ifdef __GNUC__ -#define UNLIKELY(x) __builtin_expect(!!(x), 0) -#else -#define UNLIKELY(x) (x) -#endif -#endif #ifdef B0 int @@ -201,337 +207,132 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); - const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); + const svbool_t pg_quad = svwhilelt_b32(0, 4); + const svbool_t pg_first = svwhilelt_b32(0, 1); const svfloat32_t alpha_vec = svdup_f32(alpha); #ifndef B0 const svfloat32_t beta_vec = svdup_f32(beta); #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); - const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; - const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * 2 * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + + FLOAT* a_offset = A; + FLOAT* b_offset = B; + FLOAT* c_offset = C; BLASLONG i = 0; - for (; i < v_m2; i += v_size2) { + for (; i < v_m1; i += v_size) { + + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - DECLARE_RESULT_VECTOR(1, 4); - DECLARE_RESULT_VECTOR(1, 5); - DECLARE_RESULT_VECTOR(1, 6); - DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_a != NULL)) { if (j == 0) { for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - VECTOR_PACK_A(0, 0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - VECTOR_PACK_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } else { for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0, 0); + UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - UNPACK_VECTOR_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } } else { for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); } } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - VECTOR_STORE(pg_true, 0, 0, 4); - VECTOR_STORE(pg_true, 0, 0, 5); - VECTOR_STORE(pg_true, 0, 0, 6); - VECTOR_STORE(pg_true, 0, 0, 7); - VECTOR_STORE(pg_true, v_size, 1, 0); - VECTOR_STORE(pg_true, v_size, 1, 1); - VECTOR_STORE(pg_true, v_size, 1, 2); - VECTOR_STORE(pg_true, v_size, 1, 3); - VECTOR_STORE(pg_true, v_size, 1, 4); - VECTOR_STORE(pg_true, v_size, 1, 5); - VECTOR_STORE(pg_true, v_size, 1, 6); - VECTOR_STORE(pg_true, v_size, 1, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(1, 0); - DECLARE_RESULT_VECTOR(1, 1); - DECLARE_RESULT_VECTOR(1, 2); - DECLARE_RESULT_VECTOR(1, 3); - - if (LIKELY(packed_a != NULL)) { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } - } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - VECTOR_STORE(pg_true, v_size, 1, 0); - VECTOR_STORE(pg_true, v_size, 1, 1); - VECTOR_STORE(pg_true, v_size, 1, 2); - VECTOR_STORE(pg_true, v_size, 1, 3); + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(1, 0); if (LIKELY(packed_a != NULL)) { for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0, 0); + UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - UNPACK_VECTOR_A(v_size, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } else { for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - GATHER_LOAD_A(pg_true, v_size, 1, 0); - UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); } } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, v_size, 1, 0); + VECTOR_STORE(pg_true, 0, 0); + INCR_C_POINTER(0, 1); } - } - for (; i < v_m1; i += v_size) { - BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - VECTOR_STORE(pg_true, 0, 0, 4); - VECTOR_STORE(pg_true, 0, 0, 5); - VECTOR_STORE(pg_true, 0, 0, 6); - VECTOR_STORE(pg_true, 0, 0, 7); - } - for (; j < n4; j += 4) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - VECTOR_STORE(pg_true, 0, 0, 0); - VECTOR_STORE(pg_true, 0, 0, 1); - VECTOR_STORE(pg_true, 0, 0, 2); - VECTOR_STORE(pg_true, 0, 0, 3); - } - for (; j < N; j++) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - VECTOR_STORE(pg_true, 0, 0, 0); - } + UPDATE_A_POINTER(v_size); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size); } for (; i < M; i += v_size) { const svbool_t pg_tail = svwhilelt_b32((uint32_t)i, (uint32_t)(M)); + CREATE_C_POINTER(0, 0); + CREATE_A_POINTER(0, 0); BLASLONG j = 0; - for (; j < n8; j += 8) { - - BLASLONG k = 0; - DECLARE_RESULT_VECTOR(0, 0); - DECLARE_RESULT_VECTOR(0, 1); - DECLARE_RESULT_VECTOR(0, 2); - DECLARE_RESULT_VECTOR(0, 3); - DECLARE_RESULT_VECTOR(0, 4); - DECLARE_RESULT_VECTOR(0, 5); - DECLARE_RESULT_VECTOR(0, 6); - DECLARE_RESULT_VECTOR(0, 7); - - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - QUADWORD_LOAD_B(4, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); - } - VECTOR_STORE(pg_tail, 0, 0, 0); - VECTOR_STORE(pg_tail, 0, 0, 1); - VECTOR_STORE(pg_tail, 0, 0, 2); - VECTOR_STORE(pg_tail, 0, 0, 3); - VECTOR_STORE(pg_tail, 0, 0, 4); - VECTOR_STORE(pg_tail, 0, 0, 5); - VECTOR_STORE(pg_tail, 0, 0, 6); - VECTOR_STORE(pg_tail, 0, 0, 7); - } for (; j < n4; j += 4) { + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); @@ -541,34 +342,43 @@ CNAME(BLASLONG M, for (; k < K; k++) { QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); } - VECTOR_STORE(pg_tail, 0, 0, 0); - VECTOR_STORE(pg_tail, 0, 0, 1); - VECTOR_STORE(pg_tail, 0, 0, 2); - VECTOR_STORE(pg_tail, 0, 0, 3); + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + INCR_C_POINTER(0, 4); } for (; j < N; j++) { + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_tail, 0, 0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); } - VECTOR_STORE(pg_tail, 0, 0, 0); + VECTOR_STORE(pg_tail, 0, 0); + INCR_C_POINTER(0, 1); } + + UPDATE_A_POINTER(0); + RESET_B_POINTER(); + UPDATE_C_POINTER(0); } if (pack_a) free(packed_a); return 0; -} +} \ No newline at end of file From f3cebb3ca3c0b3c75c287862462bc996f88095a2 Mon Sep 17 00:00:00 2001 From: gxw Date: Wed, 10 Jul 2024 15:11:12 +0800 Subject: [PATCH 03/52] x86: Fixed numpy CI failure when the target is ZEN. --- interface/scal.c | 4 +- kernel/x86_64/dscal.c | 302 ++++++++++++++++++++---------------------- kernel/x86_64/sscal.c | 269 ++++++++++++++++++------------------- 3 files changed, 279 insertions(+), 296 deletions(-) diff --git a/interface/scal.c b/interface/scal.c index 0a7fee640..c6638a62d 100644 --- a/interface/scal.c +++ b/interface/scal.c @@ -85,7 +85,7 @@ void CNAME(blasint n, FLOAT alpha, FLOAT *x, blasint incx){ if (nthreads == 1) { #endif - SCAL_K(n, 0, 0, alpha, x, incx, NULL, 0, NULL, 0); + SCAL_K(n, 0, 0, alpha, x, incx, NULL, 0, NULL, 1); #ifdef SMP } else { @@ -102,7 +102,7 @@ void CNAME(blasint n, FLOAT alpha, FLOAT *x, blasint incx){ #else &alpha, #endif - x, incx, NULL, 0, NULL, 0, (int (*)(void))SCAL_K, nthreads); + x, incx, NULL, 0, NULL, 1, (int (*)(void))SCAL_K, nthreads); } #endif diff --git a/kernel/x86_64/dscal.c b/kernel/x86_64/dscal.c index e7182c5ce..641f86f90 100644 --- a/kernel/x86_64/dscal.c +++ b/kernel/x86_64/dscal.c @@ -43,21 +43,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. static void dscal_kernel_8( BLASLONG n, FLOAT *da , FLOAT *x ) { - BLASLONG i; - FLOAT alpha = *da; + BLASLONG i; + FLOAT alpha = *da; - for( i=0; i 0 ) - { - dscal_kernel_inc_8(n1, &da, x, inc_x); - i = n1 * inc_x; - j = n1; - } - - while(j < n) - { - - x[i] *= da; - i += inc_x ; - j++; - - } - - } - - return(0); - } - - BLASLONG n1 = n & -8; - if ( n1 > 0 ) - { -// if ( da == 0.0 ) -// dscal_kernel_8_zero(n1 , &da , x); -// else - dscal_kernel_8(n1 , &da , x); - } - - if ( da == 0.0 ) - { - for ( i=n1 ; i 0 ) + { + dscal_kernel_inc_8(n1, &da, x, inc_x); + i = n1 * inc_x; + j = n1; + } + while(j < n) + { + x[i] *= da; + i += inc_x ; + j++; + } + } + else + { + BLASLONG n1 = n & -8; + if ( n1 > 0) + dscal_kernel_8(n1 , &da , x); + for ( i = n1 ; i < n; i++ ) + x[i] *= da; + } + } + else + { + if ( inc_x != 1 ) + { + if( da == 0.0) + { + BLASLONG n1 = n & -2; + while(j < n1) + { + x[i] = 0.0; + x[i+inc_x] = 0.0; + i += 2 * inc_x ; + j += 2; + } + while(j < n) + { + x[i] = 0.0; + i += inc_x ; + j++; + } + } + else + { + BLASLONG n1 = n & -8; + if ( n1 > 0 ) + { + dscal_kernel_inc_8(n1, &da, x, inc_x); + i = n1 * inc_x; + j = n1; + } + while(j < n) + { + x[i] *= da; + i += inc_x ; + j++; + } + } + } + else + { + if ( da == 0.0 ) + { + BLASLONG n1 = n & -8; + if ( n1 > 0) + dscal_kernel_8_zero(n1, &da, x); + for ( i = n1 ; i < n; i++ ) + x[i] = 0.0; + } + else + { + BLASLONG n1 = n & -8; + if ( n1 > 0) + dscal_kernel_8(n1 , &da , x); + for ( i = n1 ; i < n; i++ ) + x[i] *= da; + } + } + } } - - diff --git a/kernel/x86_64/sscal.c b/kernel/x86_64/sscal.c index a85d20564..6e54f8893 100644 --- a/kernel/x86_64/sscal.c +++ b/kernel/x86_64/sscal.c @@ -39,21 +39,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. static void sscal_kernel_16( BLASLONG n, FLOAT *da , FLOAT *x ) { - BLASLONG i; - FLOAT alpha = *da; + BLASLONG i; + FLOAT alpha = *da; - for( i=0; i 0 ) - { - sscal_kernel_inc_8(n1, &da, x, inc_x); - i = n1 * inc_x; - j = n1; - } -#endif - while(j < n) - { - x[i] *= da; - i += inc_x ; - j++; - - } - - } - return(0); - } - - BLASLONG n1 = n & -16; - if ( n1 > 0 ) - { - //if ( da == 0.0 ) - // sscal_kernel_16_zero(n1 , &da , x); - //else - sscal_kernel_16(n1 , &da , x); - } - - if ( da == 0.0 ) - { - for ( i=n1 ; i 0 ) + { + sscal_kernel_inc_8(n1, &da, x, inc_x); + i = n1 * inc_x; + j = n1; + } + while(j < n) + { + x[i] *= da; + i += inc_x ; + j++; + } + } + else + { + BLASLONG n1 = n & -16; + if ( n1 > 0) + sscal_kernel_16(n1 , &da , x); + for ( i = n1 ; i < n; i++ ) + x[i] *= da; + } + } + else + { + if ( inc_x != 1 ) + { + if( da == 0.0) + { + BLASLONG n1 = n & -2; + while(j < n1) + { + x[i] = 0.0; + x[i+inc_x] = 0.0; + i += 2 * inc_x ; + j += 2; + } + while(j < n) + { + x[i] = 0.0; + i += inc_x ; + j++; + } + } + else + { + BLASLONG n1 = n & -8; + if ( n1 > 0 ) + { + sscal_kernel_inc_8(n1, &da, x, inc_x); + i = n1 * inc_x; + j = n1; + } + while(j < n) + { + x[i] *= da; + i += inc_x ; + j++; + } + } + } + else + { + if ( da == 0.0 ) + { + BLASLONG n1 = n & -16; + if ( n1 > 0) + sscal_kernel_16_zero(n1, &da, x); + for ( i = n1 ; i < n; i++ ) + x[i] = 0.0; + } + else + { + BLASLONG n1 = n & -16; + if ( n1 > 0) + sscal_kernel_16(n1 , &da , x); + for ( i = n1 ; i < n; i++ ) + x[i] *= da; + } + } + } } - - From 3f39c8f94f5d61f69a7b1c578e7fc90c4c95d6e2 Mon Sep 17 00:00:00 2001 From: gxw Date: Fri, 12 Jul 2024 16:56:35 +0800 Subject: [PATCH 04/52] LoongArch: Fixed numpy CI failure --- kernel/loongarch64/scal.S | 77 +++++++++++++++++++++++++++++++ kernel/loongarch64/scal_lasx.S | 79 ++++++++++++++++++++++++++++++-- kernel/loongarch64/scal_lsx.S | 82 +++++++++++++++++++++++++++++++++- 3 files changed, 233 insertions(+), 5 deletions(-) diff --git a/kernel/loongarch64/scal.S b/kernel/loongarch64/scal.S index 8de710f41..431a44c1c 100644 --- a/kernel/loongarch64/scal.S +++ b/kernel/loongarch64/scal.S @@ -53,9 +53,86 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. PROLOGUE li.d TEMP, SIZE + ld.d XX, $sp, 0 // Load dummy2 + slli.d XX, XX, BASE_SHIFT MTC a1, $r0 slli.d INCX, INCX, BASE_SHIFT bge $r0, N, .L999 + CMPEQ $fcc0, ALPHA, a1 + bceqz $fcc0, .L50 + beq XX, TEMP, .L50 // if dummp2 == 1, do not directly copy 0 + srai.d I, N, 3 + bne INCX, TEMP, .L20 + bge $r0, I, .L15 + .align 3 + +.L12: + ST a1, X, 0 * SIZE + ST a1, X, 1 * SIZE + ST a1, X, 2 * SIZE + ST a1, X, 3 * SIZE + ST a1, X, 4 * SIZE + ST a1, X, 5 * SIZE + ST a1, X, 6 * SIZE + ST a1, X, 7 * SIZE + addi.w I, I, -1 + addi.d X, X, 8 * SIZE + blt $r0, I, .L12 + .align 3 + +.L15: + andi I, N, 7 + bge $r0, I, .L999 + .align 3 +.L16: + ST a1, X, 0 * SIZE + addi.d I, I, -1 + addi.d X, X, SIZE + blt $r0, I, .L16 + move $r4, $r17 + fmov.d $f0, $f22 + jirl $r0, $r1, 0x0 + .align 3 + +.L20: + srai.d I, N, 3 + bge $r0, I, .L25 + .align 3 + +.L22: + ST a1, X, 0 * SIZE + add.d X, X, INCX + ST a1, X, 0 * SIZE + add.d X, X, INCX + ST a1, X, 0 * SIZE + add.d X, X, INCX + ST a1, X, 0 * SIZE + add.d X, X, INCX + ST a1, X, 0 * SIZE + add.d X, X, INCX + ST a1, X, 0 * SIZE + add.d X, X, INCX + ST a1, X, 0 * SIZE + add.d X, X, INCX + ST a1, X, 0 * SIZE + addi.d I, I, -1 + add.d X, X, INCX + blt $r0, I, .L22 + .align 3 + +.L25: + andi I, N, 7 + bge $r0, I, .L999 + .align 3 +.L26: + addi.d I, I, -1 + ST a1, X, 0 * SIZE + add.d X, X, INCX + blt $r0, I, .L26 + move $r4, $r17 + fmov.d $f0, $f22 + jirl $r0, $r1, 0x0 + .align 3 .L50: srai.d I, N, 3 diff --git a/kernel/loongarch64/scal_lasx.S b/kernel/loongarch64/scal_lasx.S index b4585c1b9..dd69636e6 100644 --- a/kernel/loongarch64/scal_lasx.S +++ b/kernel/loongarch64/scal_lasx.S @@ -52,17 +52,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. bge $r0, N, .L999 bge $r0, INCX, .L999 li.d TEMP, 1 + ld.d t1, $sp, 0 // Load dummp2 movgr2fr.d a1, $r0 FFINT a1, a1 movgr2fr.d a2, TEMP FFINT a2, a2 slli.d TEMP, TEMP, BASE_SHIFT slli.d INCX, INCX, BASE_SHIFT + slli.d t1, t1, BASE_SHIFT + CMPEQ $fcc0, ALPHA, a1 + bcnez $fcc0, .L20 //ALPHA==0 CMPEQ $fcc0, ALPHA, a2 bcnez $fcc0, .L999 //ALPHA==1 return - +.L1: srai.d I, N, 3 - beq INCX, TEMP, .L30 //ALPHA!=1 and INCX==1 + beq INCX, TEMP, .L30 //ALPHA !=0|1 and INCX==1 MTG TEMP, ALPHA #ifdef DOUBLE xvreplgr2vr.d VALPHA, TEMP @@ -72,7 +76,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. move XX, X .align 3 -.L10: //ALPHA!=1 and INCX!=1 +.L10: //ALPHA !=0|1 and INCX!=1 bge $r0, I, .L32 .align 3 .L11: @@ -165,6 +169,75 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. blt $r0, I, .L11 b .L32 .align 3 + +.L20: + beq t1, TEMP, .L1 // if dummp2 == 1, do not directly copy 0 + srai.d I, N, 3 + beq INCX, TEMP, .L24 + bge $r0, I, .L22 + .align 3 + +.L21: + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + addi.d I, I, -1 + blt $r0, I, .L21 + .align 3 + +.L22: + andi I, N, 7 + bge $r0, I, .L999 + .align 3 + +.L23: + ST a1, X, 0 * SIZE + addi.d I, I, -1 + add.d X, X, INCX + blt $r0, I, .L23 + jirl $r0, $r1, 0 + .align 3 + +.L24: + bge $r0, I, .L26 /*N<8 INCX==1*/ + .align 3 +.L25: + xvxor.v VX0, VX0, VX0 + xvst VX0, X, 0 * SIZE +#ifdef DOUBLE + xvst VX0, X, 4 * SIZE +#endif + addi.d I, I, -1 + addi.d X, X, 8 * SIZE + blt $r0, I, .L25 + .align 3 + +.L26: + andi I, N, 7 + bge $r0, I, .L999 + .align 3 + +.L27: + ST a1, X, 0 * SIZE + addi.d I, I, -1 + addi.d X, X, SIZE + blt $r0, I, .L27 + jirl $r0, $r1, 0 + .align 3 + .L30: bge $r0, I, .L32/*N<8 INCX==1*/ MTG TEMP, ALPHA diff --git a/kernel/loongarch64/scal_lsx.S b/kernel/loongarch64/scal_lsx.S index a27e050ed..57dc5d0d3 100644 --- a/kernel/loongarch64/scal_lsx.S +++ b/kernel/loongarch64/scal_lsx.S @@ -51,6 +51,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. bge $r0, N, .L999 bge $r0, INCX, .L999 + ld.d t1, $sp, 0 // Load dummy2 li.d TEMP, 1 movgr2fr.d a1, $r0 FFINT a1, a1 @@ -58,10 +59,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FFINT a2, a2 slli.d TEMP, TEMP, BASE_SHIFT slli.d INCX, INCX, BASE_SHIFT + slli.d t1, t1, BASE_SHIFT + CMPEQ $fcc0, ALPHA, a1 + bcnez $fcc0, .L20 //ALPHA==0 CMPEQ $fcc0, ALPHA, a2 bcnez $fcc0, .L999 //ALPHA==1 return +.L1: srai.d I, N, 3 - beq INCX, TEMP, .L30 //ALPHA!=1 and INCX==1 + beq INCX, TEMP, .L30 //ALPHA !=0|1 and INCX==1 MTG TEMP, ALPHA #ifdef DOUBLE vreplgr2vr.d VALPHA, TEMP @@ -71,7 +76,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. move XX, X .align 3 -.L10: //ALPHA!=1 and INCX!=1 +.L10: //ALPHA !=0|1 and INCX!=1 bge $r0, I, .L32 .align 3 @@ -169,6 +174,79 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. b .L32 .align 3 +.L20: + beq t1, TEMP, .L1 // if dummp2 == 1, do not directly copy 0 + srai.d I, N, 3 + beq INCX, TEMP, .L24 + bge $r0, I, .L22 + .align 3 + +.L21: + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + ST a1, X, 0 + add.d X, X, INCX + addi.d I, I, -1 + blt $r0, I, .L21 + .align 3 + +.L22: + andi I, N, 7 + bge $r0, I, .L999 + .align 3 + +.L23: + ST a1, X, 0 * SIZE + addi.d I, I, -1 + add.d X, X, INCX + blt $r0, I, .L23 + jirl $r0, $r1, 0 + .align 3 + +.L24: + bge $r0, I, .L26 /*N<8 INCX==1*/ + .align 3 + +.L25: + vxor.v VX0, VX0, VX0 + vst VX0, X, 0 * SIZE +#ifdef DOUBLE + vst VX0, X, 2 * SIZE + vst VX0, X, 4 * SIZE + vst VX0, X, 6 * SIZE +#else + vst VX0, X, 4 * SIZE +#endif + addi.d I, I, -1 + addi.d X, X, 8 * SIZE + blt $r0, I, .L25 + .align 3 + +.L26: + andi I, N, 7 + bge $r0, I, .L999 + .align 3 + +.L27: + ST a1, X, 0 * SIZE + addi.d I, I, -1 + addi.d X, X, SIZE + blt $r0, I, .L27 + jirl $r0, $r1, 0 + .align 3 + .L30: bge $r0, I, .L32/*N<8 INCX==1*/ MTG TEMP, ALPHA From 9b3e80efe24046701a681ba53c4179c0b60ca970 Mon Sep 17 00:00:00 2001 From: gxw Date: Mon, 15 Jul 2024 16:33:09 +0800 Subject: [PATCH 05/52] utest: Add test_gemv --- utest/CMakeLists.txt | 1 + utest/Makefile | 2 +- utest/test_gemv.c | 126 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 128 insertions(+), 1 deletion(-) create mode 100644 utest/test_gemv.c diff --git a/utest/CMakeLists.txt b/utest/CMakeLists.txt index 4771d8a27..6a61899da 100644 --- a/utest/CMakeLists.txt +++ b/utest/CMakeLists.txt @@ -18,6 +18,7 @@ else () test_zscal.c test_amin.c test_axpby.c + test_gemv.c ) endif () diff --git a/utest/Makefile b/utest/Makefile index 36acf96cd..b82937093 100644 --- a/utest/Makefile +++ b/utest/Makefile @@ -14,7 +14,7 @@ UTESTEXTBIN=openblas_utest_ext include $(TOPDIR)/Makefile.system OBJS=utest_main.o test_min.o test_amax.o test_ismin.o test_rotmg.o test_axpy.o test_dotu.o test_dsdot.o test_swap.o test_rot.o test_dnrm2.o test_zscal.o \ - test_amin.o test_axpby.o + test_amin.o test_axpby.o test_gemv.o #test_rot.o test_swap.o test_axpy.o test_dotu.o test_dsdot.o test_fork.o OBJS_EXT=utest_main.o $(DIR_EXT)/xerbla.o $(DIR_EXT)/common.o OBJS_EXT+=$(DIR_EXT)/test_isamin.o $(DIR_EXT)/test_idamin.o $(DIR_EXT)/test_icamin.o $(DIR_EXT)/test_izamin.o diff --git a/utest/test_gemv.c b/utest/test_gemv.c new file mode 100644 index 000000000..c85ef3f38 --- /dev/null +++ b/utest/test_gemv.c @@ -0,0 +1,126 @@ +#include "openblas_utest.h" +#include + +#ifndef NAN +#define NAN 0.0/0.0 +#endif +#ifndef INFINITY +#define INFINITY 1.0/0.0 +#endif + +#ifdef BUILD_SINGLE + +CTEST(sgemv, 0_nan_inf) +{ + blasint N = 17; + blasint incX = 1; + blasint incY = 1; + float alpha = 0.0; + float beta = 0.0; + char trans = 'N'; + float A[N * N]; + float X[N]; + float Y[N]; + + memset(A, 0, sizeof(A)); + memset(X, 0, sizeof(X)); + for (int i = 0; i < (N - 1); i += 2) + { + Y[i] = NAN; + Y[i + 1] = INFINITY; + } + Y[N - 1] = NAN; + BLASFUNC(sgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); + for (int i = 0; i < N; i ++) + ASSERT_TRUE(Y[i] == 0.0); +} + +CTEST(sgemv, 0_nan_inf_incy_2) +{ + blasint N = 17; + blasint Ny = 33; + blasint incX = 1; + blasint incY = 2; + float alpha = 0.0; + float beta = 0.0; + char trans = 'N'; + float A[N * N]; + float X[N]; + float Y[Ny]; + float *ay = Y; + + memset(A, 0, sizeof(A)); + memset(X, 0, sizeof(X)); + memset(Y, 0, sizeof(Y)); + for (int i = 0; i < (N - 1); i += 2) + { + ay[0] = NAN; + ay += 2; + ay[0] = INFINITY; + ay += 2; + } + Y[Ny - 1] = NAN; + BLASFUNC(sgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); + for (int i = 0; i < Ny; i ++) + ASSERT_TRUE(Y[i] == 0.0); +} + +#endif + +#ifdef BUILD_DOUBLE +CTEST(dgemv, 0_nan_inf) +{ + blasint N = 17; + blasint incX = 1; + blasint incY = 1; + double alpha = 0.0; + double beta = 0.0; + char trans = 'N'; + double A[N * N]; + double X[N]; + double Y[N]; + + memset(A, 0, sizeof(A)); + memset(X, 0, sizeof(X)); + for (int i = 0; i < (N - 1); i += 2) + { + Y[i] = NAN; + Y[i + 1] = INFINITY; + } + Y[N - 1] = NAN; + BLASFUNC(dgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); + for (int i = 0; i < N; i ++) + ASSERT_TRUE(Y[i] == 0.0); +} + +CTEST(dgemv, 0_nan_inf_incy_2) +{ + blasint N = 17; + blasint Ny = 33; + blasint incX = 1; + blasint incY = 2; + double alpha = 0.0; + double beta = 0.0; + char trans = 'N'; + double A[N * N]; + double X[N]; + double Y[Ny]; + double *ay = Y; + + memset(A, 0, sizeof(A)); + memset(X, 0, sizeof(X)); + memset(Y, 0, sizeof(Y)); + for (int i = 0; i < (N - 1); i += 2) + { + ay[0] = NAN; + ay += 2; + ay[0] = INFINITY; + ay += 2; + } + Y[Ny - 1] = NAN; + BLASFUNC(dgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); + for (int i = 0; i < Ny; i ++) + ASSERT_TRUE(Y[i] == 0.0); +} + +#endif From 3b715e6162686edd2ec1e62eb4d273c037b26730 Mon Sep 17 00:00:00 2001 From: Mark Ryan Date: Fri, 5 Jul 2024 10:39:07 +0000 Subject: [PATCH 06/52] Add autodetection for riscv64 Implement DYNAMIC_ARCH support for riscv64. Three cpu types are supported, riscv64_generic, riscv64_zvl256b, riscv64_zvl128b. The two non-generic kernels require CPU support for RVV 1.0 to function correctly. Detecting that a riscv64 device supports RVV 1.0 is a little complicated as there are some boards on the market that advertise support for V via hwcap but only support RVV 0.7.1, which is not binary compatible with RVV 1.0. The approach taken is to first try hwprobe. If hwprobe is not available, we fall back to hwcap + an additional check to distinguish between RVV 1.0 and RVV 0.7.1. Tested on a VM with VLEN=256, a CanMV K230 with VLEN=128 (with only the big core enabled), a Lichee Pi with RVV 0.7.1 and a VF2 with no vector. A compiler with RVV 1.0 support must be used to build OpenBLAS for riscv64 when DYNAMIC_ARCH=1. Signed-off-by: Mark Ryan --- .github/workflows/riscv64_vector.yml | 3 + Makefile.system | 11 ++ README.md | 2 + driver/others/Makefile | 11 ++ driver/others/detect_riscv64.c | 75 +++++++ driver/others/dynamic_riscv64.c | 269 ++++++++++++++++++++++++++ kernel/riscv64/KERNEL.RISCV64_GENERIC | 16 +- kernel/setparam-ref.c | 31 +++ lapack/laswp/riscv64/Makefile | 5 + 9 files changed, 415 insertions(+), 8 deletions(-) create mode 100644 driver/others/detect_riscv64.c create mode 100644 driver/others/dynamic_riscv64.c diff --git a/.github/workflows/riscv64_vector.yml b/.github/workflows/riscv64_vector.yml index dd6fe9ca8..9209ebb7d 100644 --- a/.github/workflows/riscv64_vector.yml +++ b/.github/workflows/riscv64_vector.yml @@ -28,6 +28,9 @@ jobs: - target: RISCV64_ZVL256B opts: TARGET=RISCV64_ZVL256B BINARY=64 ARCH=riscv64 qemu_cpu: rv64,g=true,c=true,v=true,vext_spec=v1.0,vlen=256,elen=64 + - target: DYNAMIC_ARCH=1 + opts: TARGET=RISCV64_GENERIC BINARY=64 ARCH=riscv64 DYNAMIC_ARCH=1 + qemu_cpu: rv64,g=true,c=true,v=true,vext_spec=v1.0,vlen=256,elen=64 steps: - name: Checkout repository diff --git a/Makefile.system b/Makefile.system index 4cd4e4a1c..847bab179 100644 --- a/Makefile.system +++ b/Makefile.system @@ -715,6 +715,17 @@ ifeq ($(ARCH), loongarch64) DYNAMIC_CORE = LOONGSON3R5 LOONGSON2K1000 LOONGSONGENERIC endif +ifeq ($(ARCH), riscv64) +DYNAMIC_CORE = RISCV64_GENERIC +DYNAMIC_CORE += RISCV64_ZVL128B +DYNAMIC_CORE += RISCV64_ZVL256B +ifdef DYNAMIC_LIST +override DYNAMIC_CORE = RISCV64_GENERIC $(DYNAMIC_LIST) +XCCOMMON_OPT = -DDYNAMIC_LIST -DDYN_RISCV64_GENERIC +XCCOMMON_OPT += $(foreach dcore,$(DYNAMIC_LIST),-DDYN_$(dcore)) +endif +endif + ifeq ($(ARCH), zarch) DYNAMIC_CORE = ZARCH_GENERIC diff --git a/README.md b/README.md index b52e48564..169087cec 100644 --- a/README.md +++ b/README.md @@ -234,6 +234,8 @@ For **POWER**, the list encompasses POWER6, POWER8 and POWER9. POWER10 is additi on **ZARCH** it comprises Z13 and Z14 as well as generic zarch support. +On **riscv64**, DYNAMIC_ARCH enables support for riscv64_zvl128b and riscv64_zvl256b in addition to generic riscv64 support. A compiler that supports RVV 1.0 is required to build OpenBLAS for riscv64 when DYNAMIC_ARCH is enabled. + The `TARGET` option can be used in conjunction with `DYNAMIC_ARCH=1` to specify which cpu model should be assumed for all the common code in the library, usually you will want to set this to the oldest model you expect to encounter. Please note that it is not possible to combine support for different architectures, so no combined 32 and 64 bit or x86_64 and arm64 in the same library. diff --git a/driver/others/Makefile b/driver/others/Makefile index ff7e3e96d..719d617c4 100644 --- a/driver/others/Makefile +++ b/driver/others/Makefile @@ -30,12 +30,16 @@ else ifeq ($(ARCH),loongarch64) COMMONOBJS += dynamic_loongarch64.$(SUFFIX) else +ifeq ($(ARCH),riscv64) +COMMONOBJS += dynamic_riscv64.$(SUFFIX) detect_riscv64.$(SUFFIX) +else COMMONOBJS += dynamic.$(SUFFIX) endif endif endif endif endif +endif else COMMONOBJS += parameter.$(SUFFIX) endif @@ -106,12 +110,16 @@ else ifeq ($(ARCH),loongarch64) HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_loongarch64.$(SUFFIX) else +ifeq ($(ARCH),riscv64) +HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_riscv64.$(SUFFIX) detect_riscv64.$(SUFFIX) +else HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic.$(SUFFIX) endif endif endif endif endif +endif else HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) parameter.$(SUFFIX) endif @@ -209,6 +217,9 @@ addx.$(SUFFIX) : $(ARCH)/addx.c mulx.$(SUFFIX) : $(ARCH)/mulx.c $(CC) $(CFLAGS) -c -DXDOUBLE -UCOMPLEX $< -o $(@F) +detect_riscv64.$(SUFFIX): detect_riscv64.c + $(CC) $(CFLAGS) -c -march=rv64imafdcv $< -o $(@F) + xerbla.$(PSUFFIX) : xerbla.c $(CC) $(PFLAGS) -c $< -o $(@F) diff --git a/driver/others/detect_riscv64.c b/driver/others/detect_riscv64.c new file mode 100644 index 000000000..5a5cc0391 --- /dev/null +++ b/driver/others/detect_riscv64.c @@ -0,0 +1,75 @@ +/***************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ + +#include + +#ifdef __riscv_v_intrinsic +#include +#endif + +unsigned detect_riscv64_get_vlenb(void) { +#ifdef __riscv_v_intrinsic + return __riscv_vlenb(); +#else + return 0; +#endif +} + +/* + * Based on the approach taken here: + * https://code.videolan.org/videolan/dav1d/-/merge_requests/1629 + * + * Only to be called after we've determined we have some sort of + * RVV support. + */ + +uint64_t detect_riscv64_rvv100(void) +{ + uint64_t rvv10_supported; + + /* + * After the vsetvli statement vtype will either be a value > 0 if the + * vsetvli succeeded or less than 0 if it failed. If 0 < vtype + * we're good and the function will return 1, otherwise there's no + * RVV 1.0 and we return 0. + */ + + asm volatile("vsetvli x0, x0, e8, m1, ta, ma\n\t" + "csrr %0, vtype\n\t" + "slt %0, x0, %0\n" + : "=r" (rvv10_supported) + : + :); + + return rvv10_supported; +} + diff --git a/driver/others/dynamic_riscv64.c b/driver/others/dynamic_riscv64.c new file mode 100644 index 000000000..78e3bb67a --- /dev/null +++ b/driver/others/dynamic_riscv64.c @@ -0,0 +1,269 @@ +/***************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ + +#include + +#include "common.h" + +/* + * OpenBLAS contains some kernels that are optimised for RVV 1.0. Before we + * can use these kernels we need to determine whether the device supports + * RVV 1.0 and what the device's VLEN is. Our strategy will be as follows. + * + * First we'll invoke the hwprobe syscall to detect RVV 1.0. In an ideal world, + * this is all we should need to do. If the syscall is not implemented we + * should be able to deduce that RVV 1.0 is not supported (as it was added to + * Linux after hwprobe) and if the syscall is implemented we can use it to + * determine whether RVV 1.0 is supported. However, there are some riscv64 + * boards out there that implement RVV 1.0 but ship with a Linux kernel that + * predates RVV vector support and hwprobe support. These kernels contain + * the backported RVV patches but not the hwprobe patches and so they + * advertise support for RVV via hwcap. To cater for these boards we need + * to fall back to hwcap if hwprobe is not supported. Unfortunately, some + * boards indicate support for RVV via hwcap even though they only support + * RVV 0.7.1, which is incompatible with RVV 1.0. So an additional check is + * required to test if the devices advertising support for RVV via hwcap really + * support RVV 1.0. This test works by executing a vsetvli instruction that + * sets the tail agnostic and mask agnostic bits in the vtype register. + * These bits are not supported prior to RVV 0.9 so will cause the VIL bit to + * be set on the VTYPE register in CPUs supporting 0.7.1. If this bit is set + * we can determine that RVV 1.0 is not supported. + * + * This approach is borrowed from + * VideoLan dav1d: + * (https://code.videolan.org/videolan/dav1d/-/merge_requests/1629). + * + * We assume that if a kernel reports the presence of RVV via hwcap that + * the device supports the vsetvli instruction. + * + * For now we're just going to invoke the hwprobe syscall directly, rather than + * invoking it through glibc. Support for hwprobe has been added to glibc but + * at the time of writing this support has not yet been included in a glibc + * release. Once it has, it will be better to invoke hwprobe via glibc as doing + * so should take advantage of the vdso entry and be more efficient. + */ + +/* + * This should work on Android as well but I have no way of testing. + */ + +#if defined(OS_LINUX) +#include +#include +#include +#include + +#define DETECT_RISCV64_HWCAP_ISA_V (1 << ('V' - 'A')) + +struct riscv_hwprobe { + int64_t key; + uint64_t value; +}; + +/* The constants below are copied from + * /usr/include/riscv64-linux-gnu/asm/hwprobe.h. We duplicate the + * constants as the header file from which they are copied will only + * be present if we're building on a device with Linux 6.5 or greater. + */ + +#define RISCV_HWPROBE_KEY_IMA_EXT_0 4 +#define RISCV_HWPROBE_IMA_V (1 << 2) + +#ifndef NR_riscv_hwprobe +#ifndef NR_arch_specific_syscall +#define NR_arch_specific_syscall 244 +#endif +#define NR_riscv_hwprobe (NR_arch_specific_syscall + 14) +#endif +#endif // defined(OS_LINUX) + +unsigned detect_riscv64_get_vlenb(void); +uint64_t detect_riscv64_rvv100(void); + +extern gotoblas_t gotoblas_RISCV64_GENERIC; +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B) +extern gotoblas_t gotoblas_RISCV64_ZVL256B; +#endif +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B) +extern gotoblas_t gotoblas_RISCV64_ZVL128B; +#endif + +#define CPU_GENERIC 0 +#define CPU_RISCV64_ZVL256B 1 +#define CPU_RISCV64_ZVL128B 2 + +static char *cpuname[] = { + "riscv64_generic", + "riscv64_zvl256b", + "riscv64_zvl128b" +}; +#define NUM_CORETYPES (sizeof(cpuname)/sizeof(char*)) + +extern int openblas_verbose(void); +extern void openblas_warning(int verbose, const char* msg); + +char* gotoblas_corename(void) { +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B) + if (gotoblas == &gotoblas_RISCV64_ZVL256B) + return cpuname[CPU_RISCV64_ZVL256B]; +#endif +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B) + if (gotoblas == &gotoblas_RISCV64_ZVL128B) + return cpuname[CPU_RISCV64_ZVL128B]; +#endif + if (gotoblas == &gotoblas_RISCV64_GENERIC) + return cpuname[CPU_GENERIC]; + + return "unknown"; +} + +static gotoblas_t* get_coretype(void) { + unsigned vlenb = 0; + +#if !defined(OS_LINUX) + return NULL; +#else + + /* + * See the hwprobe documentation + * + * ( https://docs.kernel.org/arch/riscv/hwprobe.html ) + * for more details. + */ + + struct riscv_hwprobe pairs[] = { + { .key = RISCV_HWPROBE_KEY_IMA_EXT_0, }, + }; + int ret = syscall(NR_riscv_hwprobe, pairs, 1, 0, NULL, 0); + if (ret == 0) { + if (!(pairs[0].value & RISCV_HWPROBE_IMA_V)) + return NULL; + } else { + if (!(getauxval(AT_HWCAP) & DETECT_RISCV64_HWCAP_ISA_V)) + return NULL; + + if (!detect_riscv64_rvv100()) + return NULL; + } + + /* + * RVV 1.0 is supported. We now just need to determine the coretype + * based on the VLEN. + */ + + vlenb = detect_riscv64_get_vlenb(); + + if (vlenb < 16) + return NULL; +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B) + if (vlenb >= 32) + return &gotoblas_RISCV64_ZVL256B; +#endif + +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B) + return &gotoblas_RISCV64_ZVL128B; +#else + return NULL; +#endif + +#endif // !defined(OS_LINUX) +} + +static gotoblas_t* force_coretype(char* coretype) { + size_t i; + char message[128]; + + for (i = 0; i < NUM_CORETYPES && strcasecmp(coretype, cpuname[i]); i++); + + if (i == CPU_GENERIC) + return &gotoblas_RISCV64_GENERIC; + + if (i == CPU_RISCV64_ZVL256B) { +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B) + return &gotoblas_RISCV64_ZVL256B; +#else + openblas_warning(1, + "riscv64_zvl256b support not compiled in\n"); + return NULL; +#endif + } + + if (i == CPU_RISCV64_ZVL128B) { +#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B) + return &gotoblas_RISCV64_ZVL128B; +#else + openblas_warning(1, + "riscv64_zvl128b support not compiled in\n"); + return NULL; +#endif + } + + snprintf(message, sizeof(message), "Core not found: %s\n", coretype); + openblas_warning(1, message); + + return NULL; +} + +void gotoblas_dynamic_init(void) { + + char coremsg[128]; + char* p; + + if (gotoblas) return; + + p = getenv("OPENBLAS_CORETYPE"); + if (p) + gotoblas = force_coretype(p); + else + gotoblas = get_coretype(); + + if (!gotoblas) { + snprintf(coremsg, sizeof(coremsg), "Falling back to generic riscv64 core\n"); + openblas_warning(1, coremsg); + gotoblas = &gotoblas_RISCV64_GENERIC; + } + + if (gotoblas->init) { + snprintf(coremsg, sizeof(coremsg), "Core: %s\n", + gotoblas_corename()); + openblas_warning(2, coremsg); + gotoblas->init(); + return; + } + + openblas_warning(0, "OpenBLAS : Architecture Initialization failed. No initialization function found.\n"); + exit(1); +} + +void gotoblas_dynamic_quit(void) { + gotoblas = NULL; +} diff --git a/kernel/riscv64/KERNEL.RISCV64_GENERIC b/kernel/riscv64/KERNEL.RISCV64_GENERIC index 15bcd2289..67f81cacd 100644 --- a/kernel/riscv64/KERNEL.RISCV64_GENERIC +++ b/kernel/riscv64/KERNEL.RISCV64_GENERIC @@ -99,26 +99,26 @@ ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c SGEMMKERNEL = ../generic/gemmkernel_2x2.c SGEMMONCOPY = ../generic/gemm_ncopy_2.c SGEMMOTCOPY = ../generic/gemm_tcopy_2.c -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) DGEMMKERNEL = ../generic/gemmkernel_2x2.c DGEMMONCOPY = ../generic/gemm_ncopy_2.c DGEMMOTCOPY = ../generic/gemm_tcopy_2.c -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMMKERNEL = ../generic/zgemmkernel_2x2.c CGEMMONCOPY = ../generic/zgemm_ncopy_2.c CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -ZGEMMONCOPYOBJ = zgemm_oncopy.o -ZGEMMOTCOPYOBJ = zgemm_otcopy.o +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c diff --git a/kernel/setparam-ref.c b/kernel/setparam-ref.c index c790b0a58..9d494bfc6 100644 --- a/kernel/setparam-ref.c +++ b/kernel/setparam-ref.c @@ -1244,6 +1244,36 @@ static void init_parameter(void) { } #else //ZARCH +#if (ARCH_RISCV64) +static void init_parameter(void) { + +#ifdef BUILD_BFLOAT16 + TABLE_NAME.sbgemm_p = SBGEMM_DEFAULT_P; +#endif + TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; + TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; + TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; + TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; + +#ifdef BUILD_BFLOAT16 + TABLE_NAME.sbgemm_r = SBGEMM_DEFAULT_R; +#endif + TABLE_NAME.sgemm_r = SGEMM_DEFAULT_R; + TABLE_NAME.dgemm_r = DGEMM_DEFAULT_R; + TABLE_NAME.cgemm_r = CGEMM_DEFAULT_R; + TABLE_NAME.zgemm_r = ZGEMM_DEFAULT_R; + + +#ifdef BUILD_BFLOAT16 + TABLE_NAME.sbgemm_q = SBGEMM_DEFAULT_Q; +#endif + TABLE_NAME.sgemm_q = SGEMM_DEFAULT_Q; + TABLE_NAME.dgemm_q = DGEMM_DEFAULT_Q; + TABLE_NAME.cgemm_q = CGEMM_DEFAULT_Q; + TABLE_NAME.zgemm_q = ZGEMM_DEFAULT_Q; +} +#else //RISCV64 + #ifdef ARCH_X86 static int get_l2_size_old(void){ int i, eax, ebx, ecx, edx, cpuid_level; @@ -2046,6 +2076,7 @@ static void init_parameter(void) { } +#endif //RISCV64 #endif //POWER #endif //ZARCH #endif //(ARCH_LOONGARCH64) diff --git a/lapack/laswp/riscv64/Makefile b/lapack/laswp/riscv64/Makefile index 75411deb5..bc39a30f6 100644 --- a/lapack/laswp/riscv64/Makefile +++ b/lapack/laswp/riscv64/Makefile @@ -1,6 +1,11 @@ TOPDIR = ../../.. include ../../../Makefile.system +ifeq ($(DYNAMIC_ARCH), 1) +LASWP = ../generic/laswp_k_4.c +ZLASWP = ../generic/zlaswp_k_4.c +endif + ifndef LASWP LASWP = ../generic/laswp_k.c endif From 127ea5d0d95bd7665897994ea204c85096c8f3e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vladimir=20Nikoli=C4=87?= Date: Mon, 15 Jul 2024 15:12:21 -0700 Subject: [PATCH 07/52] Add missing parenthesis --- ctest/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ctest/Makefile b/ctest/Makefile index 46885f3bb..c02e04e1a 100644 --- a/ctest/Makefile +++ b/ctest/Makefile @@ -26,7 +26,7 @@ endif override CFLAGS += -DADD$(BU) -DCBLAS ifeq ($(F_COMPILER),GFORTRAN) ifneq (, $(filter $(CORE),LOONGSON3R3 LOONGSON3R4)) - override FFLAGS = $(filter_out(-O2 -O3,$(FFLAGS)) -O0 + override FFLAGS = $(filter_out(-O2 -O3,$(FFLAGS))) -O0 endif override FFLAGS += -fno-tree-vectorize endif From 56e1782ffba67271d6884e8d95e012a2980cd1b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vladimir=20Nikoli=C4=87?= Date: Mon, 15 Jul 2024 15:15:23 -0700 Subject: [PATCH 08/52] Add another missing parenthesis --- test/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/Makefile b/test/Makefile index 85bdca103..27afce8d2 100644 --- a/test/Makefile +++ b/test/Makefile @@ -2,7 +2,7 @@ TOPDIR = .. include ../Makefile.system ifeq ($(F_COMPILER),GFORTRAN) ifneq (, $(filter $(CORE),LOONGSON3R3 LOONGSON3R4)) - override FFLAGS = $(filter_out(-O2 -O3,$(FFLAGS)) -O0 + override FFLAGS = $(filter_out(-O2 -O3,$(FFLAGS))) -O0 endif override FFLAGS += -fno-tree-vectorize endif From 0985fdc82b727ec3283f78e30f34910ea48937f9 Mon Sep 17 00:00:00 2001 From: iha fujitsu Date: Tue, 16 Jul 2024 17:31:33 +0900 Subject: [PATCH 09/52] A64FX: Add support for SVE to SGEMV/DGEMV kernels. --- kernel/arm64/KERNEL.A64FX | 5 +++ kernel/arm64/gemv_n_sve.c | 92 ++++++++++++++++++++++++++++++++++++++ kernel/arm64/gemv_t_sve.c | 94 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 191 insertions(+) create mode 100644 kernel/arm64/gemv_n_sve.c create mode 100644 kernel/arm64/gemv_t_sve.c diff --git a/kernel/arm64/KERNEL.A64FX b/kernel/arm64/KERNEL.A64FX index bc5999097..4abc84040 100644 --- a/kernel/arm64/KERNEL.A64FX +++ b/kernel/arm64/KERNEL.A64FX @@ -1 +1,6 @@ include $(KERNELDIR)/KERNEL.ARMV8SVE + +SGEMVNKERNEL = gemv_n_sve.c +DGEMVNKERNEL = gemv_n_sve.c +SGEMVTKERNEL = gemv_t_sve.c +DGEMVTKERNEL = gemv_t_sve.c diff --git a/kernel/arm64/gemv_n_sve.c b/kernel/arm64/gemv_n_sve.c new file mode 100644 index 000000000..d3aa57ae3 --- /dev/null +++ b/kernel/arm64/gemv_n_sve.c @@ -0,0 +1,92 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include +#include "common.h" + +#ifdef DOUBLE +#define SV_COUNT svcntd +#define SV_TYPE svfloat64_t +#define SV_TRUE svptrue_b64 +#define SV_WHILE svwhilelt_b64 +#define SV_DUP svdup_f64 +#else +#define SV_COUNT svcntw +#define SV_TYPE svfloat32_t +#define SV_TRUE svptrue_b32 +#define SV_WHILE svwhilelt_b32 +#define SV_DUP svdup_f32 +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) +{ + BLASLONG i; + BLASLONG ix,iy; + BLASLONG j; + FLOAT *a_ptr; + FLOAT temp; + + ix = 0; + a_ptr = a; + + if (inc_y == 1) { + uint64_t sve_size = SV_COUNT(); + for (j = 0; j < n; j++) { + SV_TYPE temp_vec = SV_DUP(alpha * x[ix]); + i = 0; + svbool_t pg = SV_WHILE(i, m); + while (svptest_any(SV_TRUE(), pg)) { + SV_TYPE a_vec = svld1(pg, a_ptr + i); + SV_TYPE y_vec = svld1(pg, y + i); + y_vec = svmla_x(pg, y_vec, temp_vec, a_vec); + svst1(pg, y + i, y_vec); + i += sve_size; + pg = SV_WHILE(i, m); + } + a_ptr += lda; + ix += inc_x; + } + return(0); + } + + for (j = 0; j < n; j++) { + temp = alpha * x[ix]; + iy = 0; + for (i = 0; i < m; i++) { + y[iy] += temp * a_ptr[i]; + iy += inc_y; + } + a_ptr += lda; + ix += inc_x; + } + return (0); +} diff --git a/kernel/arm64/gemv_t_sve.c b/kernel/arm64/gemv_t_sve.c new file mode 100644 index 000000000..bff08b257 --- /dev/null +++ b/kernel/arm64/gemv_t_sve.c @@ -0,0 +1,94 @@ +/*************************************************************************** +Copyright (c) 2024, The OpenBLAS Project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include +#include "common.h" + +#ifdef DOUBLE +#define SV_COUNT svcntd +#define SV_TYPE svfloat64_t +#define SV_TRUE svptrue_b64 +#define SV_WHILE svwhilelt_b64 +#define SV_DUP svdup_f64 +#else +#define SV_COUNT svcntw +#define SV_TYPE svfloat32_t +#define SV_TRUE svptrue_b32 +#define SV_WHILE svwhilelt_b32 +#define SV_DUP svdup_f32 +#endif + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) +{ + BLASLONG i; + BLASLONG ix,iy; + BLASLONG j; + FLOAT *a_ptr; + FLOAT temp; + + iy = 0; + a_ptr = a; + + if (inc_x == 1) { + uint64_t sve_size = SV_COUNT(); + for (j = 0; j < n; j++) { + SV_TYPE temp_vec = SV_DUP(0.0); + i = 0; + svbool_t pg = SV_WHILE(i, m); + while (svptest_any(SV_TRUE(), pg)) { + SV_TYPE a_vec = svld1(pg, a_ptr + i); + SV_TYPE x_vec = svld1(pg, x + i); + temp_vec = svmla_m(pg, temp_vec, a_vec, x_vec); + i += sve_size; + pg = SV_WHILE(i, m); + } + temp = svaddv(SV_TRUE(), temp_vec); + y[iy] += alpha * temp; + iy += inc_y; + a_ptr += lda; + } + return(0); + } + + for (j = 0; j < n; j++) { + temp = 0.0; + ix = 0; + for (i = 0; i < m; i++) { + temp += a_ptr[i] * x[ix]; + ix += inc_x; + } + y[iy] += alpha * temp; + iy += inc_y; + a_ptr += lda; + } + return (0); +} From f6d6c14a96602fb2cfb53584541017434f7492e2 Mon Sep 17 00:00:00 2001 From: gxw Date: Wed, 17 Jul 2024 09:23:49 +0800 Subject: [PATCH 10/52] mips: Fixed numpy CI failure --- kernel/mips/dscal_msa.c | 12 +++++------- kernel/mips/scal.c | 41 ++++++++++++++++++++++++----------------- kernel/mips/sscal_msa.c | 9 +++------ 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/kernel/mips/dscal_msa.c b/kernel/mips/dscal_msa.c index e95f0a655..cc8d83441 100644 --- a/kernel/mips/dscal_msa.c +++ b/kernel/mips/dscal_msa.c @@ -42,7 +42,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, if (1 == inc_x) { - if (0) //if (0.0 == da ) + if (0.0 == da && !dummy2) { v2f64 zero_v = {0.0, 0.0}; @@ -240,14 +240,12 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, } else { - if (da == 0.0) + if (da == 0.0 && !dummy2) { for (i = n; i--;) - { - if (isfinite(*x)) - *x = 0.0; - else - *x = NAN; + { + *x = 0.0; + x += inc_x; } } diff --git a/kernel/mips/scal.c b/kernel/mips/scal.c index d51fd9ccd..5f12d4271 100644 --- a/kernel/mips/scal.c +++ b/kernel/mips/scal.c @@ -29,27 +29,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) { - BLASLONG i=0,j=0; + BLASLONG i = 0, j = 0; - while(j < n) - { + // Resolved issue 4728 when the caller is {s/d}scal + if (da == 0.0 && dummy2 == 1) + { + while(j < n) + { + x[i] = da * x[i] ; - if ( da == 0.0 ) - if (isnan(x[i])||isinf(x[i])) - x[i]=NAN; - else - x[i]=0.0; - else if (isnan(da)) - x[i]=NAN; - else - x[i] = da * x[i] ; + i += inc_x ; + j++; + } + } + else + { + while(j < n) + { - i += inc_x ; - j++; - - } - return 0; + if ( da == 0.0 ) + x[i] = 0.0; + else + x[i] = da * x[i] ; + i += inc_x ; + j++; + } + } + return 0; } diff --git a/kernel/mips/sscal_msa.c b/kernel/mips/sscal_msa.c index bfd477b6a..953cf0fbc 100644 --- a/kernel/mips/sscal_msa.c +++ b/kernel/mips/sscal_msa.c @@ -42,7 +42,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, if (1 == inc_x) { - if (0) // if (0.0 == da) + if (0.0 == da && !dummy2) { v4f32 zero_v = {0.0, 0.0, 0.0, 0.0}; @@ -255,14 +255,11 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, } else { - if (0.0 == da) + if (0.0 == da && !dummy2) { for (i = n; i--;) { - if (isfinite(*x)) - *x = 0; - else - *x = NAN; + *x = 0; x += inc_x; } } From 34b80ce03f922ebdde8e081a7c5380f460cf215c Mon Sep 17 00:00:00 2001 From: gxw Date: Wed, 17 Jul 2024 09:52:14 +0800 Subject: [PATCH 11/52] mips64: Fixed numpy CI failure --- kernel/mips64/scal.S | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/mips64/scal.S b/kernel/mips64/scal.S index e392a9c6a..49716d3ed 100644 --- a/kernel/mips64/scal.S +++ b/kernel/mips64/scal.S @@ -48,6 +48,7 @@ #define TEMP $3 #define XX $5 +#define DUMMY2 $6 #define ALPHA $f15 @@ -73,13 +74,13 @@ blez N, .L999 dsll INCX, INCX, BASE_SHIFT - CMPEQ $fcc0, ALPHA, a1 - NOP + CMPEQ $fcc0, ALPHA, a1 + LDARG DUMMY2, 8($sp) bc1f $fcc0, .L50 - NOP + dsll DUMMY2, DUMMY2, BASE_SHIFT - bc1t $fcc0, .L50 + beq DUMMY2, TEMP, .L50 // If dummy2 == 1, do not directly copy 0 NOP bne INCX, TEMP, .L20 From eb4879e04cb93cb112c2ec5ec79cb4a837b5d517 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 17 Jul 2024 23:24:19 +0200 Subject: [PATCH 12/52] make NAN handling depend on the dummy2 parameter --- kernel/arm64/scal.S | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/kernel/arm64/scal.S b/kernel/arm64/scal.S index 5029890f6..33400b630 100644 --- a/kernel/arm64/scal.S +++ b/kernel/arm64/scal.S @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define X_COPY x5 /* X vector address */ #define INC_X x4 /* X stride */ #define I x1 /* loop variable */ - +#define FLAG x9 /******************************************************************************* * Macro definitions *******************************************************************************/ @@ -168,9 +168,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp N, xzr ble .Lscal_kernel_L999 - //fcmp DA, #0.0 - //beq .Lscal_kernel_zero + ldr FLAG, [sp] + cmp FLAG, #1 + beq .Lscal_kernel_nansafe + fcmp DA, #0.0 + beq .Lscal_kernel_zero + +.Lscal_kernel_nansafe: cmp INC_X, #1 bne .Lscal_kernel_S_BEGIN From b9bfc8ce095754231912686c3a9e41c7a8a2df94 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 17 Jul 2024 23:29:50 +0200 Subject: [PATCH 13/52] make NAN handling depend on dummy2 parameter --- kernel/power/scal.S | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/kernel/power/scal.S b/kernel/power/scal.S index 7d3e23245..5e92a88aa 100644 --- a/kernel/power/scal.S +++ b/kernel/power/scal.S @@ -47,9 +47,11 @@ #ifndef __64BIT__ #define X r6 #define INCX r7 +#define FLAG r11 #else #define X r7 #define INCX r8 +#define FLAG r12 #endif #endif @@ -57,9 +59,11 @@ #if !defined(__64BIT__) && defined(DOUBLE) #define X r8 #define INCX r9 +#define FLAG r13 #else #define X r7 #define INCX r8 +#define FLAG r12 #endif #endif @@ -84,9 +88,12 @@ cmpwi cr0, N, 0 blelr- cr0 -// fcmpu cr0, FZERO, ALPHA -// bne- cr0, LL(A1I1) - b LL(A1I1) + fcmpu cr0, FZERO, ALPHA + bne- cr0, LL(A1I1) + + ld FLAG, 48+64+8(SP) + cmpwi cr0, FLAG, 1 + beq- cr0, LL(A1I1) cmpwi cr0, INCX, SIZE bne- cr0, LL(A0IN) From 73751218a42d23017cc0f7899d9e52862867e5a3 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 17 Jul 2024 23:41:26 +0200 Subject: [PATCH 14/52] make NAN handling depend on dummy2 parameter --- kernel/arm/scal.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/kernel/arm/scal.c b/kernel/arm/scal.c index 4455d7643..1f96f9b95 100644 --- a/kernel/arm/scal.c +++ b/kernel/arm/scal.c @@ -43,9 +43,22 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( (n <= 0) || (inc_x <= 0)) return(0); + if (dummy2 == 0) + while(j < n) + { - while(j < n) - { + if ( da == 0.0 ) + x[i]=0.0; + else + x[i] = da * x[i] ; + + i += inc_x ; + j++; + } + } else { + + while(j < n) + { if ( da == 0.0 ) if (!isnan(x[i]) && !isinf(x[i])) { @@ -59,6 +72,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS i += inc_x ; j++; + } } return 0; From 7284c533b56748975c1794058f61e3d044c24ec2 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 17 Jul 2024 23:50:40 +0200 Subject: [PATCH 15/52] make NAN handling depend on dummy2 parameter --- kernel/riscv64/scal.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/kernel/riscv64/scal.c b/kernel/riscv64/scal.c index 6c713aa18..bebbed67e 100644 --- a/kernel/riscv64/scal.c +++ b/kernel/riscv64/scal.c @@ -43,9 +43,9 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( (n <= 0) || (inc_x <= 0)) return(0); - - while(j < n) - { + if (dummy2 == 0) { + while(j < n) + { if ( da == 0.0 ) if (isfinite(x[i])) @@ -57,7 +57,19 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS i += inc_x ; j++; + } + } else { + while(j < n) + { + if ( da == 0.0 ) + x[i]=0.0; + else + x[i] = da * x[i] ; + + i += inc_x ; + j++; + } } return 0; From 3870995f01d731dc80861e7760a8216025c639ab Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 17 Jul 2024 23:54:24 +0200 Subject: [PATCH 16/52] make NAN handling depend on dummy2 parameter --- kernel/riscv64/scal_vector.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/riscv64/scal_vector.c b/kernel/riscv64/scal_vector.c index a1ba41c4f..4792b514c 100644 --- a/kernel/riscv64/scal_vector.c +++ b/kernel/riscv64/scal_vector.c @@ -71,7 +71,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS FLOAT_V_T v0, v1; unsigned int gvl = 0; if(inc_x == 1){ - if (0){ //if(da == 0.0){ + if(dummy2 == 0 && da == 0.0){ memset(&x[0], 0, n * sizeof(FLOAT)); }else{ gvl = VSETVL(n); @@ -96,7 +96,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS } } }else{ - if (0) { //if(da == 0.0){ + if(dummy2 == 0 && da == 0.0){ BLASLONG stride_x = inc_x * sizeof(FLOAT); BLASLONG ix = 0; gvl = VSETVL(n); From 2020569705b4fcf40e5f4e6aa7e600b038f295bc Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 17 Jul 2024 23:55:54 +0200 Subject: [PATCH 17/52] fix NAN handling and make it depend on dummy2 parameter --- kernel/riscv64/scal_rvv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/riscv64/scal_rvv.c b/kernel/riscv64/scal_rvv.c index 2c273fb63..827ab120a 100644 --- a/kernel/riscv64/scal_rvv.c +++ b/kernel/riscv64/scal_rvv.c @@ -56,7 +56,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS FLOAT_V_T v0; if(inc_x == 1) { - if(da == 0.0) { + if(dummy2 == 0 && da == 0.0) { int gvl = VSETVL_MAX; v0 = VFMVVF_FLOAT(0.0, gvl); for (size_t vl; n > 0; n -= vl, x += vl) { @@ -75,7 +75,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS } else { BLASLONG stride_x = inc_x * sizeof(FLOAT); - if(da == 0.0) { + if(dummy2 == 0 && da == 0.0) { int gvl = VSETVL_MAX; v0 = VFMVVF_FLOAT(0.0, gvl); for (size_t vl; n > 0; n -= vl, x += vl*inc_x) { From b1c9fafabb028c73f57d90be991356ea0760a56a Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Thu, 18 Jul 2024 17:37:18 +0100 Subject: [PATCH 18/52] Remove k2 loop from DGEMM TN and use a more conservative heuristic for SGEMM --- kernel/arm64/dgemm_small_kernel_tn_sve.c | 209 +------------------- kernel/arm64/gemm_small_kernel_permit_sve.c | 4 +- 2 files changed, 2 insertions(+), 211 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index b8783c1d5..6d3f4dd28 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -265,43 +265,7 @@ CNAME(BLASLONG M, if (LIKELY(packed_a != NULL)) { if (j == 0) { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - VECTOR_PACK_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - GATHER_LOAD_A(pg_true, 1, 0); - VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - GATHER_LOAD_A(pg_true, 1, 1); - VECTOR_PACK_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); VECTOR_PACK_A(0, 0); @@ -320,39 +284,7 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); } } else { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -370,37 +302,6 @@ CNAME(BLASLONG M, } } } else { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -443,27 +344,7 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(1, 1); if (LIKELY(packed_a != NULL)) { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UNPACK_VECTOR_A(0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UNPACK_VECTOR_A(1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -474,27 +355,7 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); } } else { - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - GATHER_LOAD_A(pg_true, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 1, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); - } for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -570,27 +431,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -619,19 +459,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_true, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -686,27 +513,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - VECTOR_LOAD_B_K2(2, 0); - VECTOR_LOAD_B_K2(3, 0); - TRANSPOSE_B2_K2(2, 3, 0, 1); - SCALE_B2_K2(2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -735,19 +541,6 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); - for (; k < k2; k += 2) { - - VECTOR_LOAD_B_K2(0, 0); - VECTOR_LOAD_B_K2(1, 0); - TRANSPOSE_B2_K2(0, 1, 0, 1); - SCALE_B2_K2(0, 0, 1); - GATHER_LOAD_A(pg_tail, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - GATHER_LOAD_A(pg_tail, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); - } for (; k < K; k++) { BROADCAST_LOAD_B(0, 0); @@ -787,4 +580,4 @@ CNAME(BLASLONG M, free(packed_a); return 0; -} \ No newline at end of file +} diff --git a/kernel/arm64/gemm_small_kernel_permit_sve.c b/kernel/arm64/gemm_small_kernel_permit_sve.c index c1275129d..3d425624a 100644 --- a/kernel/arm64/gemm_small_kernel_permit_sve.c +++ b/kernel/arm64/gemm_small_kernel_permit_sve.c @@ -35,11 +35,9 @@ int CNAME(int transa, int transb, BLASLONG M, BLASLONG N, BLASLONG K, FLOAT alph if (MNK <= 64*64*64) return 1; #else // sgemm - if (MNK <= 256*256*256) + if (MNK <= 64*64*64) return 1; #endif - - return 0; } From 9984c5ce9dae74305b5bbfed0b1fa5602849ac35 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Thu, 18 Jul 2024 17:34:43 +0000 Subject: [PATCH 19/52] Clean up k2 removal more and unroll SGEMM more --- kernel/arm64/dgemm_small_kernel_tn_sve.c | 23 +-- kernel/arm64/sgemm_small_kernel_nn_sve.c | 167 +++++++++++++++++- kernel/arm64/sgemm_small_kernel_nt_sve.c | 95 +++++++++- kernel/arm64/sgemm_small_kernel_tn_sve.c | 210 +++++++++++++++++------ kernel/arm64/sgemm_small_kernel_tt_sve.c | 189 ++++++++++++++------ 5 files changed, 555 insertions(+), 129 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index 6d3f4dd28..1b0fada2a 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -80,25 +80,12 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k)); #define LOAD_A1(m, offset_k) \ float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k); -#define VECTOR_LOAD_B_K2(n, offset_k) \ - float64x2_t b##k##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k)); -#define TRANSPOSE_B2_K2(n0, n1, offset_k0, offset_k1) \ - float64x2_t b##n0##_k##offset_k0 = \ - vzip1q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \ - float64x2_t b##n0##_k##offset_k1 = \ - vzip2q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); - -#define SCALE_B2_K2(n0, offset_k0, offset_k1) \ - svfloat64_t b##s##n0##_k##offset_k0 = svdup_neonq_f64(b##n0##_k##offset_k0); \ - svfloat64_t b##s##n0##_k##offset_k1 = svdup_neonq_f64(b##n0##_k##offset_k1); #define GATHER_LOAD_B2(n, offset_k) \ float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \ b##n##_k##offset_k = \ vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1); #define VECTOR_UNPACK_B2(n, offset_k) \ float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k)); -#define VECTOR_PACK_B2(n, offset_k) \ - vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k); #define PACK_B0(n, offset_k) \ PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0); #define UPDATE_RESULT_VECTOR2(m, n, offset_k) \ @@ -128,9 +115,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k)); #define VECTOR_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k)); -#define QUADWORD_LOAD_B(n, offset_k) \ - svfloat64_t b##s##n##_k##offset_k = \ - svld1rq(pg_true, &B_ELEMENT_K(n, offset_k)); #define GATHER_LOAD_A(pg, m, offset_k) \ svfloat64_t a##s##m##_k##offset_k = \ svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec); @@ -226,7 +210,6 @@ CNAME(BLASLONG M, const BLASLONG v_m1 = M & -v_size; const BLASLONG n4 = N & -4; const BLASLONG n2 = N & -2; - const BLASLONG k2 = K & -2; const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = @@ -266,6 +249,7 @@ CNAME(BLASLONG M, if (LIKELY(packed_a != NULL)) { if (j == 0) { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); VECTOR_PACK_A(0, 0); @@ -285,6 +269,7 @@ CNAME(BLASLONG M, } } else { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -345,6 +330,7 @@ CNAME(BLASLONG M, if (LIKELY(packed_a != NULL)) { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); UNPACK_VECTOR_A(0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -356,6 +342,7 @@ CNAME(BLASLONG M, } } else { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -580,4 +567,4 @@ CNAME(BLASLONG M, free(packed_a); return 0; -} +} \ No newline at end of file diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c index 2e65e61ff..0af073a14 100644 --- a/kernel/arm64/sgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -237,6 +237,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -245,10 +246,11 @@ CNAME(BLASLONG M, const svfloat32_t beta_vec = svdup_f32(beta); #endif const BLASLONG n4 = N & -4; + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG k4 = K & -4; - const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; @@ -269,16 +271,21 @@ CNAME(BLASLONG M, CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m1; i += v_size) { + for (; i < v_m2; i += v_size2) { CREATE_A_POINTER(0, 0); - UPDATE_A_POINTER(v_size); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -314,6 +321,26 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); } for (; k < K; k++) { @@ -324,12 +351,17 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(1, 0); PACK_B(1, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); BROADCAST_LOAD_B(2, 0); PACK_B(2, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); BROADCAST_LOAD_B(3, 0); PACK_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); } } else { for (; k < K; k++) { @@ -340,11 +372,118 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } } else { for (; k < k4; k += 4) { + VECTOR_LOAD_B_K4(0, 0); + VECTOR_LOAD_B_K4(1, 0); + VECTOR_LOAD_B_K4(2, 0); + VECTOR_LOAD_B_K4(3, 0); + TRANSPOSE_B4_K4(0, 1, 2, 3, 0, 1, 2, 3); + SCALE_B4_K4(0, 0, 1, 2, 3); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 3); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 1); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 1); + VECTOR_LOAD_A(pg_true, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 2); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 2); + VECTOR_LOAD_A(pg_true, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 3); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 3); + } + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); + } + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + } else { + for (; k < k4; k += 4) { + VECTOR_LOAD_B_K4(0, 0); VECTOR_LOAD_B_K4(1, 0); VECTOR_LOAD_B_K4(2, 0); @@ -478,6 +617,28 @@ CNAME(BLASLONG M, CREATE_B_POINTER(0, 0); BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, v_size2); + } for (; i < v_m1; i += v_size) { CREATE_A_POINTER(0, 0); diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c index 9f99c2422..ed7ee6bd6 100644 --- a/kernel/arm64/sgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -209,6 +209,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -217,9 +218,10 @@ CNAME(BLASLONG M, const svfloat32_t beta_vec = svdup_f32(beta); #endif const BLASLONG n4 = N & -4; + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; - const int pack_b = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_b = (pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL; @@ -240,16 +242,21 @@ CNAME(BLASLONG M, CREATE_B_POINTER(3, 3); BLASLONG i = 0; - for (; i < v_m1; i += v_size) { + for (; i < v_m2; i += v_size2) { CREATE_A_POINTER(0, 0); - UPDATE_A_POINTER(v_size); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); if (LIKELY(packed_b != NULL)) { if (i == 0) { @@ -262,6 +269,11 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } else { for (; k < K; k++) { @@ -272,11 +284,66 @@ CNAME(BLASLONG M, UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } } else { for (; k < K; k++) { + QUADWORD_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, v_size2); + INCR_C_POINTER(1, v_size2); + INCR_C_POINTER(2, v_size2); + INCR_C_POINTER(3, v_size2); + } + for (; i < v_m1; i += v_size) { + + CREATE_A_POINTER(0, 0); + UPDATE_A_POINTER(v_size); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + + if (LIKELY(packed_b != NULL)) { + for (; k < K; k++) { + + UNPACK_QUADWORD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + } + } else { + for (; k < K; k++) { + QUADWORD_LOAD_B(0, 0); VECTOR_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); @@ -346,6 +413,28 @@ CNAME(BLASLONG M, CREATE_B_POINTER(0, 0); BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + UPDATE_A_POINTER(v_size2); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + VECTOR_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + VECTOR_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, v_size2); + } for (; i < v_m1; i += v_size) { CREATE_A_POINTER(0, 0); diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 9cbb60d40..54608a47b 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -69,7 +69,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #undef C_ELEMENT // #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -206,6 +206,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -215,18 +216,153 @@ CNAME(BLASLONG M, #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; FLOAT* a_offset = A; FLOAT* b_offset = B; FLOAT* c_offset = C; BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); + } + for (; j < N; j++) { + + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); + } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); + } for (; i < v_m1; i += v_size) { CREATE_C_POINTER(0, 0); @@ -247,48 +383,17 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - if (LIKELY(packed_a != NULL)) { - if (j == 0) { - for (; k < K; k++) { + for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - BROADCAST_LOAD_B(1, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); - BROADCAST_LOAD_B(2, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); - BROADCAST_LOAD_B(3, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); - } + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); @@ -304,20 +409,11 @@ CNAME(BLASLONG M, BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - if (LIKELY(packed_a != NULL)) { - for (; k < K; k++) { + for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); INCR_C_POINTER(0, 1); diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index dd9840c37..50dbd7399 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -69,7 +69,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #undef C_ELEMENT // #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc] -#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size + m] +#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m] #define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0) // ASIMD @@ -207,6 +207,7 @@ CNAME(BLASLONG M, #endif { const uint64_t v_size = svcntw(); + const uint64_t v_size2 = v_size * 2; const svbool_t pg_true = svptrue_b32(); const svbool_t pg_quad = svwhilelt_b32(0, 4); const svbool_t pg_first = svwhilelt_b32(0, 1); @@ -216,18 +217,144 @@ CNAME(BLASLONG M, #endif const svuint32_t lda_vec = svindex_u32(0LL, lda); + const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; const BLASLONG n4 = N & -4; - const int pack_a = M >= v_size && N >= 8 && K >= 8 ? 1 : 0; + const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; FLOAT* packed_a = - (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size * sizeof(FLOAT)) : NULL; + (pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL; FLOAT* a_offset = A; FLOAT* b_offset = B; FLOAT* c_offset = C; BLASLONG i = 0; + for (; i < v_m2; i += v_size2) { + + CREATE_C_POINTER(0, 0); + CREATE_C_POINTER(1, v_size); + CREATE_A_POINTER(0, 0); + CREATE_A_POINTER(1, v_size); + + BLASLONG j = 0; + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + INCR_C_POINTER(0, 4); + INCR_C_POINTER(1, 4); + } + for (; j < N; j++) { + + CREATE_B_POINTER(0, 0); + UPDATE_B_POINTER(1); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(1, 0); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } else { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 1, 0); + INCR_C_POINTER(0, 1); + INCR_C_POINTER(1, 1); + } + + UPDATE_A_POINTER(v_size2); + RESET_B_POINTER(); + UPDATE_C_POINTER(v_size2); + } for (; i < v_m1; i += v_size) { CREATE_C_POINTER(0, 0); @@ -248,39 +375,14 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); - if (LIKELY(packed_a != NULL)) { - if (j == 0) { - for (; k < K; k++) { + for (; k < K; k++) { - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - } + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); } VECTOR_STORE(pg_true, 0, 0); VECTOR_STORE(pg_true, 0, 1); @@ -296,20 +398,11 @@ CNAME(BLASLONG M, BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); - if (LIKELY(packed_a != NULL)) { - for (; k < K; k++) { + for (; k < K; k++) { - BROADCAST_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } - } else { - for (; k < K; k++) { - - BROADCAST_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); - } + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); } VECTOR_STORE(pg_true, 0, 0); INCR_C_POINTER(0, 1); From a9edddb6953cf70020f81e0d97f37b892325b459 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Thu, 18 Jul 2024 19:03:34 +0000 Subject: [PATCH 20/52] Unroll TN further --- kernel/arm64/sgemm_small_kernel_tn_sve.c | 231 ++++++++++++++++++++++- 1 file changed, 229 insertions(+), 2 deletions(-) diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 54608a47b..03406daa6 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -218,6 +218,7 @@ CNAME(BLASLONG M, const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; @@ -237,23 +238,35 @@ CNAME(BLASLONG M, CREATE_A_POINTER(1, v_size); BLASLONG j = 0; - for (; j < n4; j += 4) { + for (; j < n8; j += 8) { CREATE_B_POINTER(0, 0); CREATE_B_POINTER(1, 1); CREATE_B_POINTER(2, 2); CREATE_B_POINTER(3, 3); - UPDATE_B_POINTER(4); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); BLASLONG k = 0; DECLARE_RESULT_VECTOR(0, 0); DECLARE_RESULT_VECTOR(0, 1); DECLARE_RESULT_VECTOR(0, 2); DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); DECLARE_RESULT_VECTOR(1, 0); DECLARE_RESULT_VECTOR(1, 1); DECLARE_RESULT_VECTOR(1, 2); DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); if (LIKELY(packed_a != NULL)) { if (j == 0) { @@ -275,6 +288,18 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } else { for (; k < K; k++) { @@ -293,11 +318,109 @@ CNAME(BLASLONG M, BROADCAST_LOAD_B(3, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); } } } else { for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 7, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + INCR_C_POINTER(0, 8); + INCR_C_POINTER(1, 8); + } + for (; j < n4; j += 4) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + UPDATE_B_POINTER(4); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + + if (LIKELY(packed_a != NULL)) { + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0); + } + } else { + for (; k < K; k++) { + BROADCAST_LOAD_B(0, 0); GATHER_LOAD_A(pg_true, 0, 0); UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); @@ -369,6 +492,58 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_true, 0, 7, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); @@ -429,6 +604,58 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + BROADCAST_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0); + BROADCAST_LOAD_B(1, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0); + BROADCAST_LOAD_B(2, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0); + BROADCAST_LOAD_B(3, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0); + BROADCAST_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 4, 0); + BROADCAST_LOAD_B(5, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 5, 0); + BROADCAST_LOAD_B(6, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 6, 0); + BROADCAST_LOAD_B(7, 0); + UPDATE_RESULT_VECTOR(pg_tail, 0, 7, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); From db98f8753f71e02812b357dc32ae53a6694fa113 Mon Sep 17 00:00:00 2001 From: Hong Bo Peng Date: Fri, 19 Jul 2024 02:08:19 -0400 Subject: [PATCH 21/52] Try to fix LAPACK testing failures on P7. 1. Remove the FADD insn from the GEMV Transpose code. 2. Remove the FADD insn from GEMM and ZGEMM code. 3. Reorder the compution of the Imaginary part in ZGEMM code. --- kernel/power/gemm_kernel_power6.S | 138 ++--- kernel/power/gemv_t.S | 821 +++++++++++++---------------- kernel/power/zgemm_kernel_power6.S | 551 +++++++++---------- 3 files changed, 678 insertions(+), 832 deletions(-) diff --git a/kernel/power/gemm_kernel_power6.S b/kernel/power/gemm_kernel_power6.S index b274f7655..47771faf8 100644 --- a/kernel/power/gemm_kernel_power6.S +++ b/kernel/power/gemm_kernel_power6.S @@ -864,15 +864,15 @@ LL(22): LFD f22, 10 * SIZE(BO) LFD f23, 11 * SIZE(BO) - FMADD f2, f18, f24, f2 - FMADD f3, f19, f24, f3 - FMADD f6, f18, f25, f6 - FMADD f7, f19, f25, f7 + FMADD f0, f18, f24, f0 + FMADD f1, f19, f24, f1 + FMADD f4, f18, f25, f4 + FMADD f5, f19, f25, f5 - FMADD f10, f18, f26, f10 - FMADD f11, f19, f26, f11 - FMADD f14, f18, f27, f14 - FMADD f15, f19, f27, f15 + FMADD f8, f18, f26, f8 + FMADD f9, f19, f26, f9 + FMADD f12, f18, f27, f12 + FMADD f13, f19, f27, f13 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -899,15 +899,15 @@ LL(22): LFD f22, 18 * SIZE(BO) LFD f23, 19 * SIZE(BO) - FMADD f2, f18, f24, f2 - FMADD f3, f19, f24, f3 - FMADD f6, f18, f25, f6 - FMADD f7, f19, f25, f7 + FMADD f0, f18, f24, f0 + FMADD f1, f19, f24, f1 + FMADD f4, f18, f25, f4 + FMADD f5, f19, f25, f5 - FMADD f10, f18, f26, f10 - FMADD f11, f19, f26, f11 - FMADD f14, f18, f27, f14 - FMADD f15, f19, f27, f15 + FMADD f8, f18, f26, f8 + FMADD f9, f19, f26, f9 + FMADD f12, f18, f27, f12 + FMADD f13, f19, f27, f13 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -923,14 +923,6 @@ LL(22): addi BO, BO, 16 * SIZE bdnz LL(22) - fadd f0, f2, f0 - fadd f1, f3, f1 - fadd f4, f6, f4 - fadd f5, f7, f5 - fadd f8, f10, f8 - fadd f9, f11, f9 - fadd f12, f14, f12 - fadd f13, f15, f13 .align 4 LL(25): @@ -1161,10 +1153,10 @@ LL(32): LFD f22, 10 * SIZE(BO) LFD f23, 11 * SIZE(BO) - FMADD f1, f17, f24, f1 - FMADD f5, f17, f25, f5 - FMADD f9, f17, f26, f9 - FMADD f13, f17, f27, f13 + FMADD f0, f17, f24, f0 + FMADD f4, f17, f25, f4 + FMADD f8, f17, f26, f8 + FMADD f12, f17, f27, f12 LFD f24, 12 * SIZE(BO) LFD f25, 13 * SIZE(BO) @@ -1181,10 +1173,10 @@ LL(32): LFD f22, 18 * SIZE(BO) LFD f23, 19 * SIZE(BO) - FMADD f1, f19, f24, f1 - FMADD f5, f19, f25, f5 - FMADD f9, f19, f26, f9 - FMADD f13, f19, f27, f13 + FMADD f0, f19, f24, f0 + FMADD f4, f19, f25, f4 + FMADD f8, f19, f26, f8 + FMADD f12, f19, f27, f12 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -1200,10 +1192,6 @@ LL(32): addi BO, BO, 16 * SIZE bdnz LL(32) - fadd f0, f1, f0 - fadd f4, f5, f4 - fadd f8, f9, f8 - fadd f12, f13, f12 .align 4 LL(35): @@ -1691,10 +1679,10 @@ LL(52): FMADD f2, f16, f21, f2 FMADD f3, f17, f21, f3 - FMADD f4, f18, f22, f4 - FMADD f5, f19, f22, f5 - FMADD f6, f18, f23, f6 - FMADD f7, f19, f23, f7 + FMADD f0, f18, f22, f0 + FMADD f1, f19, f22, f1 + FMADD f2, f18, f23, f2 + FMADD f3, f19, f23, f3 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -1711,10 +1699,10 @@ LL(52): FMADD f2, f16, f25, f2 FMADD f3, f17, f25, f3 - FMADD f4, f18, f26, f4 - FMADD f5, f19, f26, f5 - FMADD f6, f18, f27, f6 - FMADD f7, f19, f27, f7 + FMADD f0, f18, f26, f0 + FMADD f1, f19, f26, f1 + FMADD f2, f18, f27, f2 + FMADD f3, f19, f27, f3 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -1775,21 +1763,11 @@ LL(58): LFD f18, 0 * SIZE(CO2) LFD f19, 1 * SIZE(CO2) - FADD f0, f4, f0 - FADD f1, f5, f1 - FADD f2, f6, f2 - FADD f3, f7, f3 - FMADD f0, f0, f30, f16 FMADD f1, f1, f30, f17 FMADD f2, f2, f30, f18 FMADD f3, f3, f30, f19 #else - FADD f0, f4, f0 - FADD f1, f5, f1 - FADD f2, f6, f2 - FADD f3, f7, f3 - FMUL f0, f0, f30 FMUL f1, f1, f30 FMUL f2, f2, f30 @@ -1916,8 +1894,8 @@ LL(60): LL(62): FMADD f0, f16, f20, f0 FMADD f1, f16, f21, f1 - FMADD f2, f17, f22, f2 - FMADD f3, f17, f23, f3 + FMADD f0, f17, f22, f0 + FMADD f1, f17, f23, f1 LFD f20, 8 * SIZE(BO) LFD f21, 9 * SIZE(BO) @@ -1926,8 +1904,8 @@ LL(62): FMADD f0, f18, f24, f0 FMADD f1, f18, f25, f1 - FMADD f2, f19, f26, f2 - FMADD f3, f19, f27, f3 + FMADD f0, f19, f26, f0 + FMADD f1, f19, f27, f1 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -1986,15 +1964,9 @@ LL(68): LFD f16, 0 * SIZE(CO1) LFD f18, 0 * SIZE(CO2) - FADD f0, f2, f0 - FADD f1, f3, f1 - FMADD f0, f0, f30, f16 FMADD f1, f1, f30, f18 #else - FADD f0, f2, f0 - FADD f1, f3, f1 - FMUL f0, f0, f30 FMUL f1, f1, f30 #endif @@ -2007,7 +1979,6 @@ LL(68): fmr f4, f0 fmr f5, f0 - #ifdef TRMMKERNEL #if ( defined(LEFT) && defined(TRANSA)) || \ (!defined(LEFT) && !defined(TRANSA)) @@ -2332,8 +2303,8 @@ LL(80): LL(82): FMADD f0, f16, f20, f0 FMADD f1, f17, f20, f1 - FMADD f2, f18, f21, f2 - FMADD f3, f19, f21, f3 + FMADD f0, f18, f21, f0 + FMADD f1, f19, f21, f1 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -2342,8 +2313,8 @@ LL(82): FMADD f0, f16, f22, f0 FMADD f1, f17, f22, f1 - FMADD f2, f18, f23, f2 - FMADD f3, f19, f23, f3 + FMADD f0, f18, f23, f0 + FMADD f1, f19, f23, f1 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -2401,15 +2372,9 @@ LL(88): LFD f16, 0 * SIZE(CO1) LFD f17, 1 * SIZE(CO1) - FADD f0, f2, f0 - FADD f1, f3, f1 - FMADD f0, f0, f30, f16 FMADD f1, f1, f30, f17 #else - FADD f0, f2, f0 - FADD f1, f3, f1 - FMUL f0, f0, f30 FMUL f1, f1, f30 #endif @@ -2418,9 +2383,6 @@ LL(88): STFD f1, 1 * SIZE(CO1) lfs f0, FZERO - fmr f1, f0 - fmr f2, f0 - fmr f3, f0 addi CO1, CO1, 2 * SIZE @@ -2512,9 +2474,9 @@ LL(90): LL(92): FMADD f0, f16, f20, f0 - FMADD f1, f17, f21, f1 - FMADD f2, f18, f22, f2 - FMADD f3, f19, f23, f3 + FMADD f0, f17, f21, f0 + FMADD f0, f18, f22, f0 + FMADD f0, f19, f23, f0 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -2527,9 +2489,9 @@ LL(92): LFD f23, 7 * SIZE(BO) FMADD f0, f16, f20, f0 - FMADD f1, f17, f21, f1 - FMADD f2, f18, f22, f2 - FMADD f3, f19, f23, f3 + FMADD f0, f17, f21, f0 + FMADD f0, f18, f22, f0 + FMADD f0, f19, f23, f0 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -2583,16 +2545,8 @@ LL(98): #ifndef TRMMKERNEL LFD f16, 0 * SIZE(CO1) - FADD f0, f1, f0 - FADD f2, f3, f2 - FADD f0, f2, f0 - FMADD f0, f0, f30, f16 #else - FADD f0, f1, f0 - FADD f2, f3, f2 - FADD f0, f2, f0 - FMUL f0, f0, f30 #endif diff --git a/kernel/power/gemv_t.S b/kernel/power/gemv_t.S index accdad702..0cabe89e0 100644 --- a/kernel/power/gemv_t.S +++ b/kernel/power/gemv_t.S @@ -409,14 +409,6 @@ LL(11): fmr y06, y01 fmr y07, y01 fmr y08, y01 - fmr y09, y01 - fmr y10, y01 - fmr y11, y01 - fmr y12, y01 - fmr y13, y01 - fmr y14, y01 - fmr y15, y01 - fmr y16, y01 DCBT(Y1, PREC) @@ -465,24 +457,24 @@ LL(12): FMADD y08, a8, b1, y08 LFD a8, 2 * SIZE(AO8) - FMADD y09, a1, b2, y09 + FMADD y01, a1, b2, y01 LFD a1, 3 * SIZE(AO1) - FMADD y10, a2, b2, y10 + FMADD y02, a2, b2, y02 LFD a2, 3 * SIZE(AO2) - FMADD y11, a3, b2, y11 + FMADD y03, a3, b2, y03 LFD a3, 3 * SIZE(AO3) - FMADD y12, a4, b2, y12 + FMADD y04, a4, b2, y04 LFD a4, 3 * SIZE(AO4) - FMADD y13, a5, b2, y13 + FMADD y05, a5, b2, y05 LFD a5, 3 * SIZE(AO5) - FMADD y14, a6, b2, y14 + FMADD y06, a6, b2, y06 LFD a6, 3 * SIZE(AO6) - FMADD y15, a7, b2, y15 + FMADD y07, a7, b2, y07 LFD a7, 3 * SIZE(AO7) - FMADD y16, a8, b2, y16 + FMADD y08, a8, b2, y08 LFD a8, 3 * SIZE(AO8) FMADD y01, a1, b3, y01 @@ -505,24 +497,24 @@ LL(12): FMADD y08, a8, b3, y08 LFD a8, 4 * SIZE(AO8) - FMADD y09, a1, b4, y09 + FMADD y01, a1, b4, y01 LFD a1, 5 * SIZE(AO1) - FMADD y10, a2, b4, y10 + FMADD y02, a2, b4, y02 LFD a2, 5 * SIZE(AO2) - FMADD y11, a3, b4, y11 + FMADD y03, a3, b4, y03 LFD a3, 5 * SIZE(AO3) - FMADD y12, a4, b4, y12 + FMADD y04, a4, b4, y04 LFD a4, 5 * SIZE(AO4) - FMADD y13, a5, b4, y13 + FMADD y05, a5, b4, y05 LFD a5, 5 * SIZE(AO5) - FMADD y14, a6, b4, y14 + FMADD y06, a6, b4, y06 LFD a6, 5 * SIZE(AO6) - FMADD y15, a7, b4, y15 + FMADD y07, a7, b4, y07 LFD a7, 5 * SIZE(AO7) - FMADD y16, a8, b4, y16 + FMADD y08, a8, b4, y08 LFD a8, 5 * SIZE(AO8) LFD b1, 9 * SIZE(BO) @@ -550,24 +542,24 @@ LL(12): FMADD y08, a8, b5, y08 LFD a8, 6 * SIZE(AO8) - FMADD y09, a1, b6, y09 + FMADD y01, a1, b6, y01 LFD a1, 7 * SIZE(AO1) - FMADD y10, a2, b6, y10 + FMADD y02, a2, b6, y02 LFD a2, 7 * SIZE(AO2) - FMADD y11, a3, b6, y11 + FMADD y03, a3, b6, y03 LFD a3, 7 * SIZE(AO3) - FMADD y12, a4, b6, y12 + FMADD y04, a4, b6, y04 LFD a4, 7 * SIZE(AO4) - FMADD y13, a5, b6, y13 + FMADD y05, a5, b6, y05 LFD a5, 7 * SIZE(AO5) - FMADD y14, a6, b6, y14 + FMADD y06, a6, b6, y06 LFD a6, 7 * SIZE(AO6) - FMADD y15, a7, b6, y15 + FMADD y07, a7, b6, y07 LFD a7, 7 * SIZE(AO7) - FMADD y16, a8, b6, y16 + FMADD y08, a8, b6, y08 LFD a8, 7 * SIZE(AO8) FMADD y01, a1, b7, y01 @@ -590,24 +582,24 @@ LL(12): FMADD y08, a8, b7, y08 LFD a8, 8 * SIZE(AO8) - FMADD y09, a1, b8, y09 + FMADD y01, a1, b8, y01 LFD a1, 9 * SIZE(AO1) - FMADD y10, a2, b8, y10 + FMADD y02, a2, b8, y02 LFD a2, 9 * SIZE(AO2) - FMADD y11, a3, b8, y11 + FMADD y03, a3, b8, y03 LFD a3, 9 * SIZE(AO3) - FMADD y12, a4, b8, y12 + FMADD y04, a4, b8, y04 LFD a4, 9 * SIZE(AO4) - FMADD y13, a5, b8, y13 + FMADD y05, a5, b8, y05 LFD a5, 9 * SIZE(AO5) - FMADD y14, a6, b8, y14 + FMADD y06, a6, b8, y06 LFD a6, 9 * SIZE(AO6) - FMADD y15, a7, b8, y15 + FMADD y07, a7, b8, y07 LFD a7, 9 * SIZE(AO7) - FMADD y16, a8, b8, y16 + FMADD y08, a8, b8, y08 LFD a8, 9 * SIZE(AO8) LFD b5, 13 * SIZE(BO) @@ -640,24 +632,24 @@ LL(12): FMADD y08, a8, b1, y08 LFD a8, 10 * SIZE(AO8) - FMADD y09, a1, b2, y09 + FMADD y01, a1, b2, y01 LFD a1, 11 * SIZE(AO1) - FMADD y10, a2, b2, y10 + FMADD y02, a2, b2, y02 LFD a2, 11 * SIZE(AO2) - FMADD y11, a3, b2, y11 + FMADD y03, a3, b2, y03 LFD a3, 11 * SIZE(AO3) - FMADD y12, a4, b2, y12 + FMADD y04, a4, b2, y04 LFD a4, 11 * SIZE(AO4) - FMADD y13, a5, b2, y13 + FMADD y05, a5, b2, y05 LFD a5, 11 * SIZE(AO5) - FMADD y14, a6, b2, y14 + FMADD y06, a6, b2, y06 LFD a6, 11 * SIZE(AO6) - FMADD y15, a7, b2, y15 + FMADD y07, a7, b2, y07 LFD a7, 11 * SIZE(AO7) - FMADD y16, a8, b2, y16 + FMADD y08, a8, b2, y08 LFD a8, 11 * SIZE(AO8) FMADD y01, a1, b3, y01 @@ -680,24 +672,24 @@ LL(12): FMADD y08, a8, b3, y08 LFD a8, 12 * SIZE(AO8) - FMADD y09, a1, b4, y09 + FMADD y01, a1, b4, y01 LFD a1, 13 * SIZE(AO1) - FMADD y10, a2, b4, y10 + FMADD y02, a2, b4, y02 LFD a2, 13 * SIZE(AO2) - FMADD y11, a3, b4, y11 + FMADD y03, a3, b4, y03 LFD a3, 13 * SIZE(AO3) - FMADD y12, a4, b4, y12 + FMADD y04, a4, b4, y04 LFD a4, 13 * SIZE(AO4) - FMADD y13, a5, b4, y13 + FMADD y05, a5, b4, y05 LFD a5, 13 * SIZE(AO5) - FMADD y14, a6, b4, y14 + FMADD y06, a6, b4, y06 LFD a6, 13 * SIZE(AO6) - FMADD y15, a7, b4, y15 + FMADD y07, a7, b4, y07 LFD a7, 13 * SIZE(AO7) - FMADD y16, a8, b4, y16 + FMADD y08, a8, b4, y08 LFD a8, 13 * SIZE(AO8) LFD b1, 17 * SIZE(BO) @@ -725,24 +717,24 @@ LL(12): FMADD y08, a8, b5, y08 LFD a8, 14 * SIZE(AO8) - FMADD y09, a1, b6, y09 + FMADD y01, a1, b6, y01 LFD a1, 15 * SIZE(AO1) - FMADD y10, a2, b6, y10 + FMADD y02, a2, b6, y02 LFD a2, 15 * SIZE(AO2) - FMADD y11, a3, b6, y11 + FMADD y03, a3, b6, y03 LFD a3, 15 * SIZE(AO3) - FMADD y12, a4, b6, y12 + FMADD y04, a4, b6, y04 LFD a4, 15 * SIZE(AO4) - FMADD y13, a5, b6, y13 + FMADD y05, a5, b6, y05 LFD a5, 15 * SIZE(AO5) - FMADD y14, a6, b6, y14 + FMADD y06, a6, b6, y06 LFD a6, 15 * SIZE(AO6) - FMADD y15, a7, b6, y15 + FMADD y07, a7, b6, y07 LFD a7, 15 * SIZE(AO7) - FMADD y16, a8, b6, y16 + FMADD y08, a8, b6, y08 LFD a8, 15 * SIZE(AO8) FMADD y01, a1, b7, y01 @@ -765,14 +757,14 @@ LL(12): FMADD y08, a8, b7, y08 LFD a8, 16 * SIZE(AO8) - FMADD y09, a1, b8, y09 + FMADD y01, a1, b8, y01 LFD a1, 17 * SIZE(AO1) - FMADD y10, a2, b8, y10 + FMADD y02, a2, b8, y02 LFD a2, 17 * SIZE(AO2) - FMADD y11, a3, b8, y11 + FMADD y03, a3, b8, y03 LFD a3, 17 * SIZE(AO3) - FMADD y12, a4, b8, y12 + FMADD y04, a4, b8, y04 LFD a4, 17 * SIZE(AO4) addi AO1, AO1, 16 * SIZE @@ -780,14 +772,14 @@ LL(12): addi AO3, AO3, 16 * SIZE addi AO4, AO4, 16 * SIZE - FMADD y13, a5, b8, y13 + FMADD y05, a5, b8, y05 LFD a5, 17 * SIZE(AO5) - FMADD y14, a6, b8, y14 + FMADD y06, a6, b8, y06 LFD a6, 17 * SIZE(AO6) - FMADD y15, a7, b8, y15 + FMADD y07, a7, b8, y07 LFD a7, 17 * SIZE(AO7) - FMADD y16, a8, b8, y16 + FMADD y08, a8, b8, y08 LFD a8, 17 * SIZE(AO8) LFD b5, 21 * SIZE(BO) @@ -830,24 +822,24 @@ LL(13): FMADD y08, a8, b1, y08 LFD a8, 2 * SIZE(AO8) - FMADD y09, a1, b2, y09 + FMADD y01, a1, b2, y01 LFD a1, 3 * SIZE(AO1) - FMADD y10, a2, b2, y10 + FMADD y02, a2, b2, y02 LFD a2, 3 * SIZE(AO2) - FMADD y11, a3, b2, y11 + FMADD y03, a3, b2, y03 LFD a3, 3 * SIZE(AO3) - FMADD y12, a4, b2, y12 + FMADD y04, a4, b2, y04 LFD a4, 3 * SIZE(AO4) - FMADD y13, a5, b2, y13 + FMADD y05, a5, b2, y05 LFD a5, 3 * SIZE(AO5) - FMADD y14, a6, b2, y14 + FMADD y06, a6, b2, y06 LFD a6, 3 * SIZE(AO6) - FMADD y15, a7, b2, y15 + FMADD y07, a7, b2, y07 LFD a7, 3 * SIZE(AO7) - FMADD y16, a8, b2, y16 + FMADD y08, a8, b2, y08 LFD a8, 3 * SIZE(AO8) FMADD y01, a1, b3, y01 @@ -870,24 +862,24 @@ LL(13): FMADD y08, a8, b3, y08 LFD a8, 4 * SIZE(AO8) - FMADD y09, a1, b4, y09 + FMADD y01, a1, b4, y01 LFD a1, 5 * SIZE(AO1) - FMADD y10, a2, b4, y10 + FMADD y02, a2, b4, y02 LFD a2, 5 * SIZE(AO2) - FMADD y11, a3, b4, y11 + FMADD y03, a3, b4, y03 LFD a3, 5 * SIZE(AO3) - FMADD y12, a4, b4, y12 + FMADD y04, a4, b4, y04 LFD a4, 5 * SIZE(AO4) - FMADD y13, a5, b4, y13 + FMADD y05, a5, b4, y05 LFD a5, 5 * SIZE(AO5) - FMADD y14, a6, b4, y14 + FMADD y06, a6, b4, y06 LFD a6, 5 * SIZE(AO6) - FMADD y15, a7, b4, y15 + FMADD y07, a7, b4, y07 LFD a7, 5 * SIZE(AO7) - FMADD y16, a8, b4, y16 + FMADD y08, a8, b4, y08 LFD a8, 5 * SIZE(AO8) LFD b1, 9 * SIZE(BO) @@ -915,24 +907,24 @@ LL(13): FMADD y08, a8, b5, y08 LFD a8, 6 * SIZE(AO8) - FMADD y09, a1, b6, y09 + FMADD y01, a1, b6, y01 LFD a1, 7 * SIZE(AO1) - FMADD y10, a2, b6, y10 + FMADD y02, a2, b6, y02 LFD a2, 7 * SIZE(AO2) - FMADD y11, a3, b6, y11 + FMADD y03, a3, b6, y03 LFD a3, 7 * SIZE(AO3) - FMADD y12, a4, b6, y12 + FMADD y04, a4, b6, y04 LFD a4, 7 * SIZE(AO4) - FMADD y13, a5, b6, y13 + FMADD y05, a5, b6, y05 LFD a5, 7 * SIZE(AO5) - FMADD y14, a6, b6, y14 + FMADD y06, a6, b6, y06 LFD a6, 7 * SIZE(AO6) - FMADD y15, a7, b6, y15 + FMADD y07, a7, b6, y07 LFD a7, 7 * SIZE(AO7) - FMADD y16, a8, b6, y16 + FMADD y08, a8, b6, y08 LFD a8, 7 * SIZE(AO8) FMADD y01, a1, b7, y01 @@ -955,24 +947,24 @@ LL(13): FMADD y08, a8, b7, y08 LFD a8, 8 * SIZE(AO8) - FMADD y09, a1, b8, y09 + FMADD y01, a1, b8, y01 LFD a1, 9 * SIZE(AO1) - FMADD y10, a2, b8, y10 + FMADD y02, a2, b8, y02 LFD a2, 9 * SIZE(AO2) - FMADD y11, a3, b8, y11 + FMADD y03, a3, b8, y03 LFD a3, 9 * SIZE(AO3) - FMADD y12, a4, b8, y12 + FMADD y04, a4, b8, y04 LFD a4, 9 * SIZE(AO4) - FMADD y13, a5, b8, y13 + FMADD y05, a5, b8, y05 LFD a5, 9 * SIZE(AO5) - FMADD y14, a6, b8, y14 + FMADD y06, a6, b8, y06 LFD a6, 9 * SIZE(AO6) - FMADD y15, a7, b8, y15 + FMADD y07, a7, b8, y07 LFD a7, 9 * SIZE(AO7) - FMADD y16, a8, b8, y16 + FMADD y08, a8, b8, y08 LFD a8, 9 * SIZE(AO8) LFD b5, 13 * SIZE(BO) @@ -1000,24 +992,24 @@ LL(13): FMADD y08, a8, b1, y08 LFD a8, 10 * SIZE(AO8) - FMADD y09, a1, b2, y09 + FMADD y01, a1, b2, y01 LFD a1, 11 * SIZE(AO1) - FMADD y10, a2, b2, y10 + FMADD y02, a2, b2, y02 LFD a2, 11 * SIZE(AO2) - FMADD y11, a3, b2, y11 + FMADD y03, a3, b2, y03 LFD a3, 11 * SIZE(AO3) - FMADD y12, a4, b2, y12 + FMADD y04, a4, b2, y04 LFD a4, 11 * SIZE(AO4) - FMADD y13, a5, b2, y13 + FMADD y05, a5, b2, y05 LFD a5, 11 * SIZE(AO5) - FMADD y14, a6, b2, y14 + FMADD y06, a6, b2, y06 LFD a6, 11 * SIZE(AO6) - FMADD y15, a7, b2, y15 + FMADD y07, a7, b2, y07 LFD a7, 11 * SIZE(AO7) - FMADD y16, a8, b2, y16 + FMADD y08, a8, b2, y08 LFD a8, 11 * SIZE(AO8) FMADD y01, a1, b3, y01 @@ -1040,24 +1032,24 @@ LL(13): FMADD y08, a8, b3, y08 LFD a8, 12 * SIZE(AO8) - FMADD y09, a1, b4, y09 + FMADD y01, a1, b4, y01 LFD a1, 13 * SIZE(AO1) - FMADD y10, a2, b4, y10 + FMADD y02, a2, b4, y02 LFD a2, 13 * SIZE(AO2) - FMADD y11, a3, b4, y11 + FMADD y03, a3, b4, y03 LFD a3, 13 * SIZE(AO3) - FMADD y12, a4, b4, y12 + FMADD y04, a4, b4, y04 LFD a4, 13 * SIZE(AO4) - FMADD y13, a5, b4, y13 + FMADD y05, a5, b4, y05 LFD a5, 13 * SIZE(AO5) - FMADD y14, a6, b4, y14 + FMADD y06, a6, b4, y06 LFD a6, 13 * SIZE(AO6) - FMADD y15, a7, b4, y15 + FMADD y07, a7, b4, y07 LFD a7, 13 * SIZE(AO7) - FMADD y16, a8, b4, y16 + FMADD y08, a8, b4, y08 LFD a8, 13 * SIZE(AO8) FMADD y01, a1, b5, y01 @@ -1080,24 +1072,24 @@ LL(13): FMADD y08, a8, b5, y08 LFD a8, 14 * SIZE(AO8) - FMADD y09, a1, b6, y09 + FMADD y01, a1, b6, y01 LFD a1, 15 * SIZE(AO1) - FMADD y10, a2, b6, y10 + FMADD y02, a2, b6, y02 LFD a2, 15 * SIZE(AO2) - FMADD y11, a3, b6, y11 + FMADD y03, a3, b6, y03 LFD a3, 15 * SIZE(AO3) - FMADD y12, a4, b6, y12 + FMADD y04, a4, b6, y04 LFD a4, 15 * SIZE(AO4) - FMADD y13, a5, b6, y13 + FMADD y05, a5, b6, y05 LFD a5, 15 * SIZE(AO5) - FMADD y14, a6, b6, y14 + FMADD y06, a6, b6, y06 LFD a6, 15 * SIZE(AO6) - FMADD y15, a7, b6, y15 + FMADD y07, a7, b6, y07 LFD a7, 15 * SIZE(AO7) - FMADD y16, a8, b6, y16 + FMADD y08, a8, b6, y08 LFD a8, 15 * SIZE(AO8) FMADD y01, a1, b7, y01 @@ -1120,20 +1112,20 @@ LL(13): FMADD y08, a8, b7, y08 LFD a8, 16 * SIZE(AO8) - FMADD y09, a1, b8, y09 - FMADD y10, a2, b8, y10 - FMADD y11, a3, b8, y11 - FMADD y12, a4, b8, y12 + FMADD y01, a1, b8, y01 + FMADD y02, a2, b8, y02 + FMADD y03, a3, b8, y03 + FMADD y04, a4, b8, y04 addi AO1, AO1, 16 * SIZE addi AO2, AO2, 16 * SIZE addi AO3, AO3, 16 * SIZE addi AO4, AO4, 16 * SIZE - FMADD y13, a5, b8, y13 - FMADD y14, a6, b8, y14 - FMADD y15, a7, b8, y15 - FMADD y16, a8, b8, y16 + FMADD y05, a5, b8, y05 + FMADD y06, a6, b8, y06 + FMADD y07, a7, b8, y07 + FMADD y08, a8, b8, y08 addi AO5, AO5, 16 * SIZE addi AO6, AO6, 16 * SIZE @@ -1180,21 +1172,21 @@ LL(14): FMADD y08, a8, b1, y08 LFD a8, 2 * SIZE(AO8) - FMADD y09, a1, b2, y09 + FMADD y01, a1, b2, y01 LFD a1, 3 * SIZE(AO1) - FMADD y10, a2, b2, y10 + FMADD y02, a2, b2, y02 LFD a2, 3 * SIZE(AO2) - FMADD y11, a3, b2, y11 + FMADD y03, a3, b2, y03 LFD a3, 3 * SIZE(AO3) - FMADD y12, a4, b2, y12 + FMADD y04, a4, b2, y04 LFD a4, 3 * SIZE(AO4) - FMADD y13, a5, b2, y13 + FMADD y05, a5, b2, y05 LFD a5, 3 * SIZE(AO5) - FMADD y14, a6, b2, y14 + FMADD y06, a6, b2, y06 LFD a6, 3 * SIZE(AO6) - FMADD y15, a7, b2, y15 + FMADD y07, a7, b2, y07 LFD a7, 3 * SIZE(AO7) - FMADD y16, a8, b2, y16 + FMADD y08, a8, b2, y08 LFD a8, 3 * SIZE(AO8) LFD b5, 5 * SIZE(BO) @@ -1219,21 +1211,21 @@ LL(14): FMADD y08, a8, b3, y08 LFD a8, 4 * SIZE(AO8) - FMADD y09, a1, b4, y09 + FMADD y01, a1, b4, y01 LFD a1, 5 * SIZE(AO1) - FMADD y10, a2, b4, y10 + FMADD y02, a2, b4, y02 LFD a2, 5 * SIZE(AO2) - FMADD y11, a3, b4, y11 + FMADD y03, a3, b4, y03 LFD a3, 5 * SIZE(AO3) - FMADD y12, a4, b4, y12 + FMADD y04, a4, b4, y04 LFD a4, 5 * SIZE(AO4) - FMADD y13, a5, b4, y13 + FMADD y05, a5, b4, y05 LFD a5, 5 * SIZE(AO5) - FMADD y14, a6, b4, y14 + FMADD y06, a6, b4, y06 LFD a6, 5 * SIZE(AO6) - FMADD y15, a7, b4, y15 + FMADD y07, a7, b4, y07 LFD a7, 5 * SIZE(AO7) - FMADD y16, a8, b4, y16 + FMADD y08, a8, b4, y08 LFD a8, 5 * SIZE(AO8) FMADD y01, a1, b5, y01 @@ -1253,21 +1245,21 @@ LL(14): FMADD y08, a8, b5, y08 LFD a8, 6 * SIZE(AO8) - FMADD y09, a1, b6, y09 + FMADD y01, a1, b6, y01 LFD a1, 7 * SIZE(AO1) - FMADD y10, a2, b6, y10 + FMADD y02, a2, b6, y02 LFD a2, 7 * SIZE(AO2) - FMADD y11, a3, b6, y11 + FMADD y03, a3, b6, y03 LFD a3, 7 * SIZE(AO3) - FMADD y12, a4, b6, y12 + FMADD y04, a4, b6, y04 LFD a4, 7 * SIZE(AO4) - FMADD y13, a5, b6, y13 + FMADD y05, a5, b6, y05 LFD a5, 7 * SIZE(AO5) - FMADD y14, a6, b6, y14 + FMADD y06, a6, b6, y06 LFD a6, 7 * SIZE(AO6) - FMADD y15, a7, b6, y15 + FMADD y07, a7, b6, y07 LFD a7, 7 * SIZE(AO7) - FMADD y16, a8, b6, y16 + FMADD y08, a8, b6, y08 LFD a8, 7 * SIZE(AO8) FMADD y01, a1, b7, y01 @@ -1287,21 +1279,21 @@ LL(14): FMADD y08, a8, b7, y08 LFD a8, 8 * SIZE(AO8) - FMADD y09, a1, b8, y09 + FMADD y01, a1, b8, y01 addi AO1, AO1, 8 * SIZE - FMADD y10, a2, b8, y10 + FMADD y02, a2, b8, y02 addi AO2, AO2, 8 * SIZE - FMADD y11, a3, b8, y11 + FMADD y03, a3, b8, y03 addi AO3, AO3, 8 * SIZE - FMADD y12, a4, b8, y12 + FMADD y04, a4, b8, y04 addi AO4, AO4, 8 * SIZE - FMADD y13, a5, b8, y13 + FMADD y05, a5, b8, y05 addi AO5, AO5, 8 * SIZE - FMADD y14, a6, b8, y14 + FMADD y06, a6, b8, y06 addi AO6, AO6, 8 * SIZE - FMADD y15, a7, b8, y15 + FMADD y07, a7, b8, y07 addi AO7, AO7, 8 * SIZE - FMADD y16, a8, b8, y16 + FMADD y08, a8, b8, y08 addi AO8, AO8, 8 * SIZE addi BO, BO, 8 * SIZE .align 4 @@ -1341,21 +1333,21 @@ LL(15): FMADD y08, a8, b1, y08 LFD a8, 2 * SIZE(AO8) - FMADD y09, a1, b2, y09 + FMADD y01, a1, b2, y01 LFD a1, 3 * SIZE(AO1) - FMADD y10, a2, b2, y10 + FMADD y02, a2, b2, y02 LFD a2, 3 * SIZE(AO2) - FMADD y11, a3, b2, y11 + FMADD y03, a3, b2, y03 LFD a3, 3 * SIZE(AO3) - FMADD y12, a4, b2, y12 + FMADD y04, a4, b2, y04 LFD a4, 3 * SIZE(AO4) - FMADD y13, a5, b2, y13 + FMADD y05, a5, b2, y05 LFD a5, 3 * SIZE(AO5) - FMADD y14, a6, b2, y14 + FMADD y06, a6, b2, y06 LFD a6, 3 * SIZE(AO6) - FMADD y15, a7, b2, y15 + FMADD y07, a7, b2, y07 LFD a7, 3 * SIZE(AO7) - FMADD y16, a8, b2, y16 + FMADD y08, a8, b2, y08 LFD a8, 3 * SIZE(AO8) FMADD y01, a1, b3, y01 @@ -1376,21 +1368,21 @@ LL(15): FMADD y08, a8, b3, y08 LFD a8, 4 * SIZE(AO8) - FMADD y09, a1, b4, y09 + FMADD y01, a1, b4, y01 addi AO1, AO1, 4 * SIZE - FMADD y10, a2, b4, y10 + FMADD y02, a2, b4, y02 addi AO2, AO2, 4 * SIZE - FMADD y11, a3, b4, y11 + FMADD y03, a3, b4, y03 addi AO3, AO3, 4 * SIZE - FMADD y12, a4, b4, y12 + FMADD y04, a4, b4, y04 addi AO4, AO4, 4 * SIZE - FMADD y13, a5, b4, y13 + FMADD y05, a5, b4, y05 addi AO5, AO5, 4 * SIZE - FMADD y14, a6, b4, y14 + FMADD y06, a6, b4, y06 addi AO6, AO6, 4 * SIZE - FMADD y15, a7, b4, y15 + FMADD y07, a7, b4, y07 addi AO7, AO7, 4 * SIZE - FMADD y16, a8, b4, y16 + FMADD y08, a8, b4, y08 addi AO8, AO8, 4 * SIZE addi BO, BO, 4 * SIZE .align 4 @@ -1428,22 +1420,22 @@ LL(16): FMADD y08, a8, b1, y08 LFD a8, 2 * SIZE(AO8) - FMADD y09, a1, b2, y09 + FMADD y01, a1, b2, y01 addi AO1, AO1, 2 * SIZE addi AO2, AO2, 2 * SIZE - FMADD y10, a2, b2, y10 + FMADD y02, a2, b2, y02 addi AO3, AO3, 2 * SIZE addi AO4, AO4, 2 * SIZE - FMADD y11, a3, b2, y11 - FMADD y12, a4, b2, y12 + FMADD y03, a3, b2, y03 + FMADD y04, a4, b2, y04 addi AO5, AO5, 2 * SIZE addi AO6, AO6, 2 * SIZE - FMADD y13, a5, b2, y13 - FMADD y14, a6, b2, y14 + FMADD y05, a5, b2, y05 + FMADD y06, a6, b2, y06 addi AO7, AO7, 2 * SIZE addi AO8, AO8, 2 * SIZE - FMADD y15, a7, b2, y15 - FMADD y16, a8, b2, y16 + FMADD y07, a7, b2, y07 + FMADD y08, a8, b2, y08 addi BO, BO, 2 * SIZE .align 4 @@ -1486,15 +1478,6 @@ LL(18): LFD a7, 7 * SIZE(CO) LFD a8, 8 * SIZE(CO) - FADD y01, y09, y01 - FADD y02, y10, y02 - FADD y03, y11, y03 - FADD y04, y12, y04 - FADD y05, y13, y05 - FADD y06, y14, y06 - FADD y07, y15, y07 - FADD y08, y16, y08 - FMADD a1, alpha, y01, a1 FMADD a2, alpha, y02, a2 FMADD a3, alpha, y03, a3 @@ -1530,15 +1513,6 @@ LL(19): LFDUX a7, CO, INCY LFDUX a8, CO, INCY - FADD y01, y09, y01 - FADD y02, y10, y02 - FADD y03, y11, y03 - FADD y04, y12, y04 - FADD y05, y13, y05 - FADD y06, y14, y06 - FADD y07, y15, y07 - FADD y08, y16, y08 - FMADD a1, alpha, f0, a1 FMADD a2, alpha, f1, a2 FMADD a3, alpha, f2, a3 @@ -1580,10 +1554,6 @@ LL(20): fmr y02, y01 fmr y03, y01 fmr y04, y01 - fmr y09, y01 - fmr y10, y01 - fmr y11, y01 - fmr y12, y01 DCBT(Y1, PREC) @@ -1621,13 +1591,13 @@ LL(22): FMADD y04, a4, b1, y04 LFD a4, 3 * SIZE(AO4) - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 LFD a5, 4 * SIZE(AO1) - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 LFD a6, 4 * SIZE(AO2) - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 LFD a7, 4 * SIZE(AO3) - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 LFD a8, 4 * SIZE(AO4) FMADD y01, a1, b3, y01 @@ -1639,13 +1609,13 @@ LL(22): FMADD y04, a4, b3, y04 LFD a4, 5 * SIZE(AO4) - FMADD y09, a5, b4, y09 + FMADD y01, a5, b4, y01 LFD a5, 6 * SIZE(AO1) - FMADD y10, a6, b4, y10 + FMADD y02, a6, b4, y02 LFD a6, 6 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y03, a7, b4, y03 LFD a7, 6 * SIZE(AO3) - FMADD y12, a8, b4, y12 + FMADD y04, a8, b4, y04 LFD a8, 6 * SIZE(AO4) LFD b1, 9 * SIZE(BO) @@ -1662,13 +1632,13 @@ LL(22): FMADD y04, a4, b5, y04 LFD a4, 7 * SIZE(AO4) - FMADD y09, a5, b6, y09 + FMADD y01, a5, b6, y01 LFD a5, 8 * SIZE(AO1) - FMADD y10, a6, b6, y10 + FMADD y02, a6, b6, y02 LFD a6, 8 * SIZE(AO2) - FMADD y11, a7, b6, y11 + FMADD y03, a7, b6, y03 LFD a7, 8 * SIZE(AO3) - FMADD y12, a8, b6, y12 + FMADD y04, a8, b6, y04 LFD a8, 8 * SIZE(AO4) FMADD y01, a1, b7, y01 @@ -1680,13 +1650,13 @@ LL(22): FMADD y04, a4, b7, y04 LFD a4, 9 * SIZE(AO4) - FMADD y09, a5, b8, y09 + FMADD y01, a5, b8, y01 LFD a5, 10 * SIZE(AO1) - FMADD y10, a6, b8, y10 + FMADD y02, a6, b8, y02 LFD a6, 10 * SIZE(AO2) - FMADD y11, a7, b8, y11 + FMADD y03, a7, b8, y03 LFD a7, 10 * SIZE(AO3) - FMADD y12, a8, b8, y12 + FMADD y04, a8, b8, y04 LFD a8, 10 * SIZE(AO4) LFD b5, 13 * SIZE(BO) @@ -1703,13 +1673,13 @@ LL(22): FMADD y04, a4, b1, y04 LFD a4, 11 * SIZE(AO4) - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 LFD a5, 12 * SIZE(AO1) - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 LFD a6, 12 * SIZE(AO2) - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 LFD a7, 12 * SIZE(AO3) - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 LFD a8, 12 * SIZE(AO4) FMADD y01, a1, b3, y01 @@ -1721,13 +1691,13 @@ LL(22): FMADD y04, a4, b3, y04 LFD a4, 13 * SIZE(AO4) - FMADD y09, a5, b4, y09 + FMADD y01, a5, b4, y01 LFD a5, 14 * SIZE(AO1) - FMADD y10, a6, b4, y10 + FMADD y02, a6, b4, y02 LFD a6, 14 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y03, a7, b4, y03 LFD a7, 14 * SIZE(AO3) - FMADD y12, a8, b4, y12 + FMADD y04, a8, b4, y04 LFD a8, 14 * SIZE(AO4) LFD b1, 17 * SIZE(BO) @@ -1744,13 +1714,13 @@ LL(22): FMADD y04, a4, b5, y04 LFD a4, 15 * SIZE(AO4) - FMADD y09, a5, b6, y09 + FMADD y01, a5, b6, y01 LFD a5, 16 * SIZE(AO1) - FMADD y10, a6, b6, y10 + FMADD y02, a6, b6, y02 LFD a6, 16 * SIZE(AO2) - FMADD y11, a7, b6, y11 + FMADD y03, a7, b6, y03 LFD a7, 16 * SIZE(AO3) - FMADD y12, a8, b6, y12 + FMADD y04, a8, b6, y04 LFD a8, 16 * SIZE(AO4) FMADD y01, a1, b7, y01 @@ -1762,13 +1732,13 @@ LL(22): FMADD y04, a4, b7, y04 LFD a4, 17 * SIZE(AO4) - FMADD y09, a5, b8, y09 + FMADD y01, a5, b8, y01 LFD a5, 18 * SIZE(AO1) - FMADD y10, a6, b8, y10 + FMADD y02, a6, b8, y02 LFD a6, 18 * SIZE(AO2) - FMADD y11, a7, b8, y11 + FMADD y03, a7, b8, y03 LFD a7, 18 * SIZE(AO3) - FMADD y12, a8, b8, y12 + FMADD y04, a8, b8, y04 LFD a8, 18 * SIZE(AO4) LFD b5, 21 * SIZE(BO) @@ -1800,13 +1770,13 @@ LL(23): FMADD y04, a4, b1, y04 LFD a4, 3 * SIZE(AO4) - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 LFD a5, 4 * SIZE(AO1) - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 LFD a6, 4 * SIZE(AO2) - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 LFD a7, 4 * SIZE(AO3) - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 LFD a8, 4 * SIZE(AO4) FMADD y01, a1, b3, y01 @@ -1818,13 +1788,13 @@ LL(23): FMADD y04, a4, b3, y04 LFD a4, 5 * SIZE(AO4) - FMADD y09, a5, b4, y09 + FMADD y01, a5, b4, y01 LFD a5, 6 * SIZE(AO1) - FMADD y10, a6, b4, y10 + FMADD y02, a6, b4, y02 LFD a6, 6 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y03, a7, b4, y03 LFD a7, 6 * SIZE(AO3) - FMADD y12, a8, b4, y12 + FMADD y04, a8, b4, y04 LFD a8, 6 * SIZE(AO4) LFD b1, 9 * SIZE(BO) @@ -1841,13 +1811,13 @@ LL(23): FMADD y04, a4, b5, y04 LFD a4, 7 * SIZE(AO4) - FMADD y09, a5, b6, y09 + FMADD y01, a5, b6, y01 LFD a5, 8 * SIZE(AO1) - FMADD y10, a6, b6, y10 + FMADD y02, a6, b6, y02 LFD a6, 8 * SIZE(AO2) - FMADD y11, a7, b6, y11 + FMADD y03, a7, b6, y03 LFD a7, 8 * SIZE(AO3) - FMADD y12, a8, b6, y12 + FMADD y04, a8, b6, y04 LFD a8, 8 * SIZE(AO4) FMADD y01, a1, b7, y01 @@ -1859,13 +1829,13 @@ LL(23): FMADD y04, a4, b7, y04 LFD a4, 9 * SIZE(AO4) - FMADD y09, a5, b8, y09 + FMADD y01, a5, b8, y01 LFD a5, 10 * SIZE(AO1) - FMADD y10, a6, b8, y10 + FMADD y02, a6, b8, y02 LFD a6, 10 * SIZE(AO2) - FMADD y11, a7, b8, y11 + FMADD y03, a7, b8, y03 LFD a7, 10 * SIZE(AO3) - FMADD y12, a8, b8, y12 + FMADD y04, a8, b8, y04 LFD a8, 10 * SIZE(AO4) LFD b5, 13 * SIZE(BO) @@ -1882,13 +1852,13 @@ LL(23): FMADD y04, a4, b1, y04 LFD a4, 11 * SIZE(AO4) - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 LFD a5, 12 * SIZE(AO1) - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 LFD a6, 12 * SIZE(AO2) - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 LFD a7, 12 * SIZE(AO3) - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 LFD a8, 12 * SIZE(AO4) FMADD y01, a1, b3, y01 @@ -1900,13 +1870,13 @@ LL(23): FMADD y04, a4, b3, y04 LFD a4, 13 * SIZE(AO4) - FMADD y09, a5, b4, y09 + FMADD y01, a5, b4, y01 LFD a5, 14 * SIZE(AO1) - FMADD y10, a6, b4, y10 + FMADD y02, a6, b4, y02 LFD a6, 14 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y03, a7, b4, y03 LFD a7, 14 * SIZE(AO3) - FMADD y12, a8, b4, y12 + FMADD y04, a8, b4, y04 LFD a8, 14 * SIZE(AO4) FMADD y01, a1, b5, y01 @@ -1918,13 +1888,13 @@ LL(23): FMADD y04, a4, b5, y04 LFD a4, 15 * SIZE(AO4) - FMADD y09, a5, b6, y09 + FMADD y01, a5, b6, y01 LFD a5, 16 * SIZE(AO1) - FMADD y10, a6, b6, y10 + FMADD y02, a6, b6, y02 LFD a6, 16 * SIZE(AO2) - FMADD y11, a7, b6, y11 + FMADD y03, a7, b6, y03 LFD a7, 16 * SIZE(AO3) - FMADD y12, a8, b6, y12 + FMADD y04, a8, b6, y04 LFD a8, 16 * SIZE(AO4) FMADD y01, a1, b7, y01 @@ -1932,10 +1902,10 @@ LL(23): FMADD y03, a3, b7, y03 FMADD y04, a4, b7, y04 - FMADD y09, a5, b8, y09 - FMADD y10, a6, b8, y10 - FMADD y11, a7, b8, y11 - FMADD y12, a8, b8, y12 + FMADD y01, a5, b8, y01 + FMADD y02, a6, b8, y02 + FMADD y03, a7, b8, y03 + FMADD y04, a8, b8, y04 addi AO1, AO1, 16 * SIZE addi AO2, AO2, 16 * SIZE @@ -1975,13 +1945,13 @@ LL(24): FMADD y04, a4, b1, y04 LFD a4, 3 * SIZE(AO4) - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 LFD a5, 4 * SIZE(AO1) - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 LFD a6, 4 * SIZE(AO2) - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 LFD a7, 4 * SIZE(AO3) - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 LFD a8, 4 * SIZE(AO4) FMADD y01, a1, b3, y01 @@ -1993,13 +1963,13 @@ LL(24): FMADD y04, a4, b3, y04 LFD a4, 5 * SIZE(AO4) - FMADD y09, a5, b4, y09 + FMADD y01, a5, b4, y01 LFD a5, 6 * SIZE(AO1) - FMADD y10, a6, b4, y10 + FMADD y02, a6, b4, y02 LFD a6, 6 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y03, a7, b4, y03 LFD a7, 6 * SIZE(AO3) - FMADD y12, a8, b4, y12 + FMADD y04, a8, b4, y04 LFD a8, 6 * SIZE(AO4) LFD b1, 5 * SIZE(BO) @@ -2016,13 +1986,13 @@ LL(24): FMADD y04, a4, b1, y04 LFD a4, 7 * SIZE(AO4) - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 LFD a5, 8 * SIZE(AO1) - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 LFD a6, 8 * SIZE(AO2) - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 LFD a7, 8 * SIZE(AO3) - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 LFD a8, 8 * SIZE(AO4) FMADD y01, a1, b3, y01 @@ -2030,13 +2000,13 @@ LL(24): FMADD y03, a3, b3, y03 FMADD y04, a4, b3, y04 - FMADD y09, a5, b4, y09 + FMADD y01, a5, b4, y01 addi AO1, AO1, 8 * SIZE - FMADD y10, a6, b4, y10 + FMADD y02, a6, b4, y02 addi AO2, AO2, 8 * SIZE - FMADD y11, a7, b4, y11 + FMADD y03, a7, b4, y03 addi AO3, AO3, 8 * SIZE - FMADD y12, a8, b4, y12 + FMADD y04, a8, b4, y04 addi AO4, AO4, 8 * SIZE addi BO, BO, 8 * SIZE @@ -2070,13 +2040,13 @@ LL(25): FMADD y04, a4, b1, y04 LFD a4, 3 * SIZE(AO4) - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 LFD a5, 4 * SIZE(AO1) - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 LFD a6, 4 * SIZE(AO2) - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 LFD a7, 4 * SIZE(AO3) - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 LFD a8, 4 * SIZE(AO4) FMADD y01, a1, b3, y01 @@ -2084,13 +2054,13 @@ LL(25): FMADD y03, a3, b3, y03 FMADD y04, a4, b3, y04 - FMADD y09, a5, b4, y09 + FMADD y01, a5, b4, y01 addi AO1, AO1, 4 * SIZE - FMADD y10, a6, b4, y10 + FMADD y02, a6, b4, y02 addi AO2, AO2, 4 * SIZE - FMADD y11, a7, b4, y11 + FMADD y03, a7, b4, y03 addi AO3, AO3, 4 * SIZE - FMADD y12, a8, b4, y12 + FMADD y04, a8, b4, y04 addi AO4, AO4, 4 * SIZE addi BO, BO, 4 * SIZE .align 4 @@ -2117,13 +2087,13 @@ LL(26): FMADD y03, a3, b1, y03 FMADD y04, a4, b1, y04 - FMADD y09, a5, b2, y09 + FMADD y01, a5, b2, y01 addi AO1, AO1, 2 * SIZE - FMADD y10, a6, b2, y10 + FMADD y02, a6, b2, y02 addi AO2, AO2, 2 * SIZE - FMADD y11, a7, b2, y11 + FMADD y03, a7, b2, y03 addi AO3, AO3, 2 * SIZE - FMADD y12, a8, b2, y12 + FMADD y04, a8, b2, y04 addi AO4, AO4, 2 * SIZE addi BO, BO, 2 * SIZE .align 4 @@ -2156,11 +2126,6 @@ LL(28): LFD a3, 3 * SIZE(CO) LFD a4, 4 * SIZE(CO) - FADD y01, y09, y01 - FADD y02, y10, y02 - FADD y03, y11, y03 - FADD y04, y12, y04 - FMADD a1, alpha, y01, a1 FMADD a2, alpha, y02, a2 FMADD a3, alpha, y03, a3 @@ -2181,11 +2146,6 @@ LL(29): LFDUX a3, CO, INCY LFDUX a4, CO, INCY - FADD y01, y09, y01 - FADD y02, y10, y02 - FADD y03, y11, y03 - FADD y04, y12, y04 - FMADD a1, alpha, f0, a1 FMADD a2, alpha, f1, a2 FMADD a3, alpha, f2, a3 @@ -2209,12 +2169,6 @@ LL(30): lfd y01, FZERO fmr y02, y01 - fmr y03, y01 - fmr y04, y01 - fmr y09, y01 - fmr y10, y01 - fmr y11, y01 - fmr y12, y01 DCBT(Y1, PREC) @@ -2247,18 +2201,18 @@ LL(32): LFD a1, 5 * SIZE(AO1) FMADD y02, a2, b1, y02 LFD a2, 5 * SIZE(AO2) - FMADD y03, a3, b2, y03 + FMADD y01, a3, b2, y01 LFD a3, 6 * SIZE(AO1) - FMADD y04, a4, b2, y04 + FMADD y02, a4, b2, y02 LFD a4, 6 * SIZE(AO2) - FMADD y09, a5, b3, y09 + FMADD y01, a5, b3, y01 LFD a5, 7 * SIZE(AO1) - FMADD y10, a6, b3, y10 + FMADD y02, a6, b3, y02 LFD a6, 7 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y01, a7, b4, y01 LFD a7, 8 * SIZE(AO1) - FMADD y12, a8, b4, y12 + FMADD y02, a8, b4, y02 LFD a8, 8 * SIZE(AO2) LFD b1, 9 * SIZE(BO) @@ -2270,18 +2224,18 @@ LL(32): LFD a1, 9 * SIZE(AO1) FMADD y02, a2, b5, y02 LFD a2, 9 * SIZE(AO2) - FMADD y03, a3, b6, y03 + FMADD y01, a3, b6, y01 LFD a3, 10 * SIZE(AO1) - FMADD y04, a4, b6, y04 + FMADD y02, a4, b6, y02 LFD a4, 10 * SIZE(AO2) - FMADD y09, a5, b7, y09 + FMADD y01, a5, b7, y01 LFD a5, 11 * SIZE(AO1) - FMADD y10, a6, b7, y10 + FMADD y02, a6, b7, y02 LFD a6, 11 * SIZE(AO2) - FMADD y11, a7, b8, y11 + FMADD y01, a7, b8, y01 LFD a7, 12 * SIZE(AO1) - FMADD y12, a8, b8, y12 + FMADD y02, a8, b8, y02 LFD a8, 12 * SIZE(AO2) LFD b5, 13 * SIZE(BO) @@ -2293,18 +2247,18 @@ LL(32): LFD a1, 13 * SIZE(AO1) FMADD y02, a2, b1, y02 LFD a2, 13 * SIZE(AO2) - FMADD y03, a3, b2, y03 + FMADD y01, a3, b2, y01 LFD a3, 14 * SIZE(AO1) - FMADD y04, a4, b2, y04 + FMADD y02, a4, b2, y02 LFD a4, 14 * SIZE(AO2) - FMADD y09, a5, b3, y09 + FMADD y01, a5, b3, y01 LFD a5, 15 * SIZE(AO1) - FMADD y10, a6, b3, y10 + FMADD y02, a6, b3, y02 LFD a6, 15 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y01, a7, b4, y01 LFD a7, 16 * SIZE(AO1) - FMADD y12, a8, b4, y12 + FMADD y02, a8, b4, y02 LFD a8, 16 * SIZE(AO2) LFD b1, 17 * SIZE(BO) @@ -2316,18 +2270,18 @@ LL(32): LFD a1, 17 * SIZE(AO1) FMADD y02, a2, b5, y02 LFD a2, 17 * SIZE(AO2) - FMADD y03, a3, b6, y03 + FMADD y01, a3, b6, y01 LFD a3, 18 * SIZE(AO1) - FMADD y04, a4, b6, y04 + FMADD y02, a4, b6, y02 LFD a4, 18 * SIZE(AO2) - FMADD y09, a5, b7, y09 + FMADD y01, a5, b7, y01 LFD a5, 19 * SIZE(AO1) - FMADD y10, a6, b7, y10 + FMADD y02, a6, b7, y02 LFD a6, 19 * SIZE(AO2) - FMADD y11, a7, b8, y11 + FMADD y01, a7, b8, y01 LFD a7, 20 * SIZE(AO1) - FMADD y12, a8, b8, y12 + FMADD y02, a8, b8, y02 LFD a8, 20 * SIZE(AO2) LFD b5, 21 * SIZE(BO) @@ -2349,18 +2303,18 @@ LL(33): LFD a1, 5 * SIZE(AO1) FMADD y02, a2, b1, y02 LFD a2, 5 * SIZE(AO2) - FMADD y03, a3, b2, y03 + FMADD y01, a3, b2, y01 LFD a3, 6 * SIZE(AO1) - FMADD y04, a4, b2, y04 + FMADD y02, a4, b2, y02 LFD a4, 6 * SIZE(AO2) - FMADD y09, a5, b3, y09 + FMADD y01, a5, b3, y01 LFD a5, 7 * SIZE(AO1) - FMADD y10, a6, b3, y10 + FMADD y02, a6, b3, y02 LFD a6, 7 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y01, a7, b4, y01 LFD a7, 8 * SIZE(AO1) - FMADD y12, a8, b4, y12 + FMADD y02, a8, b4, y02 LFD a8, 8 * SIZE(AO2) LFD b1, 9 * SIZE(BO) @@ -2372,18 +2326,18 @@ LL(33): LFD a1, 9 * SIZE(AO1) FMADD y02, a2, b5, y02 LFD a2, 9 * SIZE(AO2) - FMADD y03, a3, b6, y03 + FMADD y01, a3, b6, y01 LFD a3, 10 * SIZE(AO1) - FMADD y04, a4, b6, y04 + FMADD y02, a4, b6, y02 LFD a4, 10 * SIZE(AO2) - FMADD y09, a5, b7, y09 + FMADD y01, a5, b7, y01 LFD a5, 11 * SIZE(AO1) - FMADD y10, a6, b7, y10 + FMADD y02, a6, b7, y02 LFD a6, 11 * SIZE(AO2) - FMADD y11, a7, b8, y11 + FMADD y01, a7, b8, y01 LFD a7, 12 * SIZE(AO1) - FMADD y12, a8, b8, y12 + FMADD y02, a8, b8, y02 LFD a8, 12 * SIZE(AO2) LFD b5, 13 * SIZE(BO) @@ -2395,29 +2349,29 @@ LL(33): LFD a1, 13 * SIZE(AO1) FMADD y02, a2, b1, y02 LFD a2, 13 * SIZE(AO2) - FMADD y03, a3, b2, y03 + FMADD y01, a3, b2, y01 LFD a3, 14 * SIZE(AO1) - FMADD y04, a4, b2, y04 + FMADD y02, a4, b2, y02 LFD a4, 14 * SIZE(AO2) - FMADD y09, a5, b3, y09 + FMADD y01, a5, b3, y01 LFD a5, 15 * SIZE(AO1) - FMADD y10, a6, b3, y10 + FMADD y02, a6, b3, y02 LFD a6, 15 * SIZE(AO2) - FMADD y11, a7, b4, y11 + FMADD y01, a7, b4, y01 LFD a7, 16 * SIZE(AO1) - FMADD y12, a8, b4, y12 + FMADD y02, a8, b4, y02 LFD a8, 16 * SIZE(AO2) FMADD y01, a1, b5, y01 FMADD y02, a2, b5, y02 - FMADD y03, a3, b6, y03 - FMADD y04, a4, b6, y04 + FMADD y01, a3, b6, y01 + FMADD y02, a4, b6, y02 - FMADD y09, a5, b7, y09 - FMADD y10, a6, b7, y10 - FMADD y11, a7, b8, y11 - FMADD y12, a8, b8, y12 + FMADD y01, a5, b7, y01 + FMADD y02, a6, b7, y02 + FMADD y01, a7, b8, y01 + FMADD y02, a8, b8, y02 addi AO1, AO1, 16 * SIZE addi AO2, AO2, 16 * SIZE @@ -2454,32 +2408,32 @@ LL(34): LFD a1, 5 * SIZE(AO1) FMADD y02, a2, b1, y02 LFD a2, 5 * SIZE(AO2) - FMADD y09, a3, b2, y09 + FMADD y01, a3, b2, y01 LFD a3, 6 * SIZE(AO1) - FMADD y10, a4, b2, y10 + FMADD y02, a4, b2, y02 LFD a4, 6 * SIZE(AO2) FMADD y01, a5, b3, y01 LFD a5, 7 * SIZE(AO1) FMADD y02, a6, b3, y02 LFD a6, 7 * SIZE(AO2) - FMADD y09, a7, b4, y09 + FMADD y01, a7, b4, y01 LFD a7, 8 * SIZE(AO1) - FMADD y10, a8, b4, y10 + FMADD y02, a8, b4, y02 LFD a8, 8 * SIZE(AO2) FMADD y01, a1, b5, y01 FMADD y02, a2, b5, y02 - FMADD y09, a3, b6, y09 - FMADD y10, a4, b6, y10 + FMADD y01, a3, b6, y01 + FMADD y02, a4, b6, y02 FMADD y01, a5, b7, y01 addi AO1, AO1, 8 * SIZE FMADD y02, a6, b7, y02 addi AO2, AO2, 8 * SIZE - FMADD y09, a7, b8, y09 + FMADD y01, a7, b8, y01 addi BO, BO, 8 * SIZE - FMADD y10, a8, b8, y10 + FMADD y02, a8, b8, y02 nop .align 4 @@ -2504,17 +2458,17 @@ LL(35): FMADD y01, a1, b1, y01 FMADD y02, a2, b1, y02 - FMADD y09, a3, b2, y09 - FMADD y10, a4, b2, y10 + FMADD y01, a3, b2, y01 + FMADD y02, a4, b2, y02 FMADD y01, a5, b3, y01 addi AO1, AO1, 4 * SIZE FMADD y02, a6, b3, y02 addi AO2, AO2, 4 * SIZE - FMADD y09, a7, b4, y09 + FMADD y01, a7, b4, y01 addi BO, BO, 4 * SIZE - FMADD y10, a8, b4, y10 + FMADD y02, a8, b4, y02 .align 4 LL(36): @@ -2531,8 +2485,8 @@ LL(36): FMADD y01, a1, b1, y01 FMADD y02, a2, b1, y02 - FMADD y09, a3, b2, y09 - FMADD y10, a4, b2, y10 + FMADD y01, a3, b2, y01 + FMADD y02, a4, b2, y02 addi AO1, AO1, 2 * SIZE addi AO2, AO2, 2 * SIZE @@ -2560,14 +2514,6 @@ LL(38): LFD a1, 1 * SIZE(CO) LFD a2, 2 * SIZE(CO) - FADD y01, y03, y01 - FADD y02, y04, y02 - FADD y09, y11, y09 - FADD y10, y12, y10 - - FADD y01, y09, y01 - FADD y02, y10, y02 - FMADD a1, alpha, y01, a1 FMADD a2, alpha, y02, a2 @@ -2582,14 +2528,6 @@ LL(39): LFDUX a1, CO, INCY LFDUX a2, CO, INCY - FADD y01, y03, y01 - FADD y02, y04, y02 - FADD y09, y11, y09 - FADD y10, y12, y10 - - FADD y01, y09, y01 - FADD y02, y10, y02 - FMADD a1, alpha, f0, a1 FMADD a2, alpha, f1, a2 @@ -2606,13 +2544,6 @@ LL(40): mr BO, XP lfd y01, FZERO - fmr y02, y01 - fmr y03, y01 - fmr y04, y01 - fmr y09, y01 - fmr y10, y01 - fmr y11, y01 - fmr y12, y01 DCBT(Y1, PREC) @@ -2646,17 +2577,17 @@ LL(42): LFD a1, 9 * SIZE(AO1) LFD b1, 9 * SIZE(BO) - FMADD y02, a2, b2, y02 + FMADD y01, a2, b2, y01 nop LFD a2, 10 * SIZE(AO1) LFD b2, 10 * SIZE(BO) - FMADD y03, a3, b3, y03 + FMADD y01, a3, b3, y01 nop LFD a3, 11 * SIZE(AO1) LFD b3, 11 * SIZE(BO) - FMADD y04, a4, b4, y04 + FMADD y01, a4, b4, y01 nop LFD a4, 12 * SIZE(AO1) LFD b4, 12 * SIZE(BO) @@ -2666,17 +2597,17 @@ LL(42): LFD a5, 13 * SIZE(AO1) LFD b5, 13 * SIZE(BO) - FMADD y02, a6, b6, y02 + FMADD y01, a6, b6, y01 nop LFD a6, 14 * SIZE(AO1) LFD b6, 14 * SIZE(BO) - FMADD y03, a7, b7, y03 + FMADD y01, a7, b7, y01 nop LFD a7, 15 * SIZE(AO1) LFD b7, 15 * SIZE(BO) - FMADD y04, a8, b8, y04 + FMADD y01, a8, b8, y01 nop LFD a8, 16 * SIZE(AO1) LFD b8, 16 * SIZE(BO) @@ -2686,17 +2617,17 @@ LL(42): LFD a1, 17 * SIZE(AO1) LFD b1, 17 * SIZE(BO) - FMADD y02, a2, b2, y02 + FMADD y01, a2, b2, y01 nop LFD a2, 18 * SIZE(AO1) LFD b2, 18 * SIZE(BO) - FMADD y03, a3, b3, y03 + FMADD y01, a3, b3, y01 nop LFD a3, 19 * SIZE(AO1) LFD b3, 19 * SIZE(BO) - FMADD y04, a4, b4, y04 + FMADD y01, a4, b4, y01 nop LFD a4, 20 * SIZE(AO1) LFD b4, 20 * SIZE(BO) @@ -2706,17 +2637,17 @@ LL(42): LFD a5, 21 * SIZE(AO1) LFD b5, 21 * SIZE(BO) - FMADD y02, a6, b6, y02 + FMADD y01, a6, b6, y01 nop LFD a6, 22 * SIZE(AO1) LFD b6, 22 * SIZE(BO) - FMADD y03, a7, b7, y03 + FMADD y01, a7, b7, y01 nop LFD a7, 23 * SIZE(AO1) LFD b7, 23 * SIZE(BO) - FMADD y04, a8, b8, y04 + FMADD y01, a8, b8, y01 nop LFD a8, 24 * SIZE(AO1) LFD b8, 24 * SIZE(BO) @@ -2733,17 +2664,17 @@ LL(43): LFD a1, 9 * SIZE(AO1) LFD b1, 9 * SIZE(BO) - FMADD y02, a2, b2, y02 + FMADD y01, a2, b2, y01 nop LFD a2, 10 * SIZE(AO1) LFD b2, 10 * SIZE(BO) - FMADD y03, a3, b3, y03 + FMADD y01, a3, b3, y01 nop LFD a3, 11 * SIZE(AO1) LFD b3, 11 * SIZE(BO) - FMADD y04, a4, b4, y04 + FMADD y01, a4, b4, y01 nop LFD a4, 12 * SIZE(AO1) LFD b4, 12 * SIZE(BO) @@ -2753,34 +2684,34 @@ LL(43): LFD a5, 13 * SIZE(AO1) LFD b5, 13 * SIZE(BO) - FMADD y02, a6, b6, y02 + FMADD y01, a6, b6, y01 nop LFD a6, 14 * SIZE(AO1) LFD b6, 14 * SIZE(BO) - FMADD y03, a7, b7, y03 + FMADD y01, a7, b7, y01 nop LFD a7, 15 * SIZE(AO1) LFD b7, 15 * SIZE(BO) - FMADD y04, a8, b8, y04 + FMADD y01, a8, b8, y01 nop LFD a8, 16 * SIZE(AO1) LFD b8, 16 * SIZE(BO) FMADD y01, a1, b1, y01 - FMADD y02, a2, b2, y02 - FMADD y03, a3, b3, y03 - FMADD y04, a4, b4, y04 + FMADD y01, a2, b2, y01 + FMADD y01, a3, b3, y01 + FMADD y01, a4, b4, y01 FMADD y01, a5, b5, y01 addi AO1, AO1, 16 * SIZE - FMADD y02, a6, b6, y02 + FMADD y01, a6, b6, y01 addi BO, BO, 16 * SIZE - FMADD y03, a7, b7, y03 + FMADD y01, a7, b7, y01 nop - FMADD y04, a8, b8, y04 + FMADD y01, a8, b8, y01 nop .align 4 @@ -2811,17 +2742,17 @@ LL(44): LFD b8, 8 * SIZE(BO) FMADD y01, a1, b1, y01 - FMADD y02, a2, b2, y02 - FMADD y03, a3, b3, y03 - FMADD y04, a4, b4, y04 + FMADD y01, a2, b2, y01 + FMADD y01, a3, b3, y01 + FMADD y01, a4, b4, y01 FMADD y01, a5, b5, y01 addi AO1, AO1, 8 * SIZE - FMADD y02, a6, b6, y02 + FMADD y01, a6, b6, y01 addi BO, BO, 8 * SIZE - FMADD y03, a7, b7, y03 + FMADD y01, a7, b7, y01 nop - FMADD y04, a8, b8, y04 + FMADD y01, a8, b8, y01 nop .align 4 @@ -2841,12 +2772,12 @@ LL(45): FMADD y01, a1, b1, y01 addi AO1, AO1, 4 * SIZE - FMADD y02, a2, b2, y02 + FMADD y01, a2, b2, y01 addi AO2, AO2, 4 * SIZE - FMADD y03, a3, b3, y03 + FMADD y01, a3, b3, y01 addi BO, BO, 4 * SIZE - FMADD y04, a4, b4, y04 + FMADD y01, a4, b4, y01 nop .align 4 @@ -2861,7 +2792,7 @@ LL(46): FMADD y01, a1, b1, y01 addi AO1, AO1, 2 * SIZE - FMADD y02, a2, b2, y02 + FMADD y01, a2, b2, y01 addi BO, BO, 2 * SIZE .align 4 @@ -2882,10 +2813,6 @@ LL(48): LFD a1, 1 * SIZE(CO) - FADD y01, y02, y01 - FADD y03, y04, y03 - FADD y01, y03, y01 - FMADD a1, alpha, y01, a1 STFD a1, 1 * SIZE(CO) b LL(99) @@ -2893,9 +2820,7 @@ LL(48): LL(49): LFDUX a1, CO, INCY - FADD y01, y02, y01 - FADD y03, y04, y03 - FADD y01, y03, y01 + FMADD a1, alpha, f0, a1 STFDUX a1, BO, INCY .align 4 diff --git a/kernel/power/zgemm_kernel_power6.S b/kernel/power/zgemm_kernel_power6.S index 9b47b9fc1..c513285df 100644 --- a/kernel/power/zgemm_kernel_power6.S +++ b/kernel/power/zgemm_kernel_power6.S @@ -1159,9 +1159,9 @@ LL(20): LL(22): FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 LFD f28, 4 * SIZE(AO) LFD f29, 5 * SIZE(AO) @@ -1169,9 +1169,9 @@ LL(22): LFD f31, 7 * SIZE(AO) FMA1 f4, f16, f22, f4 - FMA4 f7, f17, f22, f7 FMA2 f5, f16, f23, f5 - FMA3 f6, f17, f23, f6 + FMA4 f5, f17, f22, f5 + FMA3 f4, f17, f23, f4 LFD f20, 8 * SIZE(BO) LFD f21, 9 * SIZE(BO) @@ -1179,14 +1179,14 @@ LL(22): LFD f23, 11 * SIZE(BO) FMA1 f8, f16, f24, f8 - FMA4 f11, f17, f24, f11 FMA2 f9, f16, f25, f9 - FMA3 f10, f17, f25, f10 + FMA4 f9, f17, f24, f9 + FMA3 f8, f17, f25, f8 FMA1 f12, f16, f26, f12 - FMA4 f15, f17, f26, f15 FMA2 f13, f16, f27, f13 - FMA3 f14, f17, f27, f14 + FMA4 f13, f17, f26, f13 + FMA3 f12, f17, f27, f12 LFD f24, 12 * SIZE(BO) LFD f25, 13 * SIZE(BO) @@ -1194,14 +1194,14 @@ LL(22): LFD f27, 15 * SIZE(BO) FMA1 f0, f18, f20, f0 - FMA4 f3, f19, f20, f3 FMA2 f1, f18, f21, f1 - FMA3 f2, f19, f21, f2 + FMA4 f1, f19, f20, f1 + FMA3 f0, f19, f21, f0 FMA1 f4, f18, f22, f4 - FMA4 f7, f19, f22, f7 FMA2 f5, f18, f23, f5 - FMA3 f6, f19, f23, f6 + FMA4 f5, f19, f22, f5 + FMA3 f4, f19, f23, f4 LFD f20, 16 * SIZE(BO) LFD f21, 17 * SIZE(BO) @@ -1209,14 +1209,14 @@ LL(22): LFD f23, 19 * SIZE(BO) FMA1 f8, f18, f24, f8 - FMA4 f11, f19, f24, f11 FMA2 f9, f18, f25, f9 - FMA3 f10, f19, f25, f10 + FMA4 f9, f19, f24, f9 + FMA3 f8, f19, f25, f8 FMA1 f12, f18, f26, f12 - FMA4 f15, f19, f26, f15 FMA2 f13, f18, f27, f13 - FMA3 f14, f19, f27, f14 + FMA4 f13, f19, f26, f13 + FMA3 f12, f19, f27, f12 LFD f24, 20 * SIZE(BO) LFD f25, 21 * SIZE(BO) @@ -1224,9 +1224,9 @@ LL(22): LFD f27, 23 * SIZE(BO) FMA1 f0, f28, f20, f0 - FMA4 f3, f29, f20, f3 FMA2 f1, f28, f21, f1 - FMA3 f2, f29, f21, f2 + FMA4 f1, f29, f20, f1 + FMA3 f0, f29, f21, f0 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -1234,9 +1234,9 @@ LL(22): LFD f19, 11 * SIZE(AO) FMA1 f4, f28, f22, f4 - FMA4 f7, f29, f22, f7 FMA2 f5, f28, f23, f5 - FMA3 f6, f29, f23, f6 + FMA4 f5, f29, f22, f5 + FMA3 f4, f29, f23, f4 LFD f20, 24 * SIZE(BO) LFD f21, 25 * SIZE(BO) @@ -1244,14 +1244,14 @@ LL(22): LFD f23, 27 * SIZE(BO) FMA1 f8, f28, f24, f8 - FMA4 f11, f29, f24, f11 FMA2 f9, f28, f25, f9 - FMA3 f10, f29, f25, f10 + FMA4 f9, f29, f24, f9 + FMA3 f8, f29, f25, f8 FMA1 f12, f28, f26, f12 - FMA4 f15, f29, f26, f15 FMA2 f13, f28, f27, f13 - FMA3 f14, f29, f27, f14 + FMA4 f13, f29, f26, f13 + FMA3 f12, f29, f27, f12 LFD f24, 28 * SIZE(BO) LFD f25, 29 * SIZE(BO) @@ -1259,14 +1259,14 @@ LL(22): LFD f27, 31 * SIZE(BO) FMA1 f0, f30, f20, f0 - FMA4 f3, f31, f20, f3 FMA2 f1, f30, f21, f1 - FMA3 f2, f31, f21, f2 + FMA4 f1, f31, f20, f1 + FMA3 f0, f31, f21, f0 FMA1 f4, f30, f22, f4 - FMA4 f7, f31, f22, f7 FMA2 f5, f30, f23, f5 - FMA3 f6, f31, f23, f6 + FMA4 f5, f31, f22, f5 + FMA3 f4, f31, f23, f4 LFD f20, 32 * SIZE(BO) LFD f21, 33 * SIZE(BO) @@ -1274,14 +1274,14 @@ LL(22): LFD f23, 35 * SIZE(BO) FMA1 f8, f30, f24, f8 - FMA4 f11, f31, f24, f11 FMA2 f9, f30, f25, f9 - FMA3 f10, f31, f25, f10 + FMA4 f9, f31, f24, f9 + FMA3 f8, f31, f25, f8 FMA1 f12, f30, f26, f12 - FMA4 f15, f31, f26, f15 FMA2 f13, f30, f27, f13 - FMA3 f14, f31, f27, f14 + FMA4 f13, f31, f26, f13 + FMA3 f12, f31, f27, f12 LFD f24, 36 * SIZE(BO) LFD f25, 37 * SIZE(BO) @@ -1318,14 +1318,14 @@ LL(25): LL(26): FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 FMA1 f4, f16, f22, f4 - FMA4 f7, f17, f22, f7 FMA2 f5, f16, f23, f5 - FMA3 f6, f17, f23, f6 + FMA4 f5, f17, f22, f5 + FMA3 f4, f17, f23, f4 LFD f20, 8 * SIZE(BO) LFD f21, 9 * SIZE(BO) @@ -1333,14 +1333,14 @@ LL(26): LFD f23, 11 * SIZE(BO) FMA1 f8, f16, f24, f8 - FMA4 f11, f17, f24, f11 FMA2 f9, f16, f25, f9 - FMA3 f10, f17, f25, f10 + FMA4 f9, f17, f24, f9 + FMA3 f8, f17, f25, f8 FMA1 f12, f16, f26, f12 - FMA4 f15, f17, f26, f15 FMA2 f13, f16, f27, f13 - FMA3 f14, f17, f27, f14 + FMA4 f13, f17, f26, f13 + FMA3 f12, f17, f27, f12 LFD f16, 2 * SIZE(AO) LFD f17, 3 * SIZE(AO) @@ -1363,47 +1363,42 @@ LL(28): LFD f18, 0 * SIZE(CO2) LFD f19, 1 * SIZE(CO2) - FADD f0, f0, f2 - FADD f1, f1, f3 - FADD f4, f4, f6 - FADD f5, f5, f7 - LFD f20, 0 * SIZE(CO3) LFD f21, 1 * SIZE(CO3) LFD f22, 0 * SIZE(CO4) LFD f23, 1 * SIZE(CO4) - FADD f8, f8, f10 - FADD f9, f9, f11 - FADD f12, f12, f14 - FADD f13, f13, f15 + fmr f2, f0 + fmr f3, f1 + fmr f6, f4 + fmr f7, f5 - FNMSUB f24, f31, f1, f16 - FMADD f25, f31, f0, f17 - FNMSUB f26, f31, f5, f18 - FMADD f27, f31, f4, f19 + FMADD f24, f30, f0, f16 + FMADD f25, f30, f1, f17 + FMADD f26, f30, f4, f18 + FMADD f27, f30, f5, f19 - FMADD f0, f30, f0, f24 - FMADD f1, f30, f1, f25 - FMADD f4, f30, f4, f26 - FMADD f5, f30, f5, f27 + FNMSUB f0, f31, f3, f24 + FMADD f1, f31, f2, f25 + FNMSUB f4, f31, f7, f26 + FMADD f5, f31, f6, f27 - FNMSUB f24, f31, f9, f20 - FMADD f25, f31, f8, f21 - FNMSUB f26, f31, f13, f22 - FMADD f27, f31, f12, f23 + fmr f10, f8 + fmr f11, f9 + fmr f14, f12 + fmr f15, f13 - FMADD f8, f30, f8, f24 - FMADD f9, f30, f9, f25 - FMADD f12, f30, f12, f26 - FMADD f13, f30, f13, f27 + FMADD f24, f30, f8, f20 + FMADD f25, f30, f9, f21 + FMADD f26, f30, f12, f22 + FMADD f27, f30, f13, f23 + + FNMSUB f8, f31, f11, f24 + FMADD f9, f31, f10, f25 + FNMSUB f12, f31, f15, f26 + FMADD f13, f31, f14, f27 #else - FADD f0, f0, f2 - FADD f1, f1, f3 - FADD f4, f4, f6 - FADD f5, f5, f7 - FMUL f16, f31, f1 FMUL f17, f31, f0 FMUL f18, f31, f5 @@ -1414,11 +1409,6 @@ LL(28): FMSUB f4, f30, f4, f18 FMADD f5, f30, f5, f19 - FADD f8, f8, f10 - FADD f9, f9, f11 - FADD f12, f12, f14 - FADD f13, f13, f15 - FMUL f20, f31, f9 FMUL f21, f31, f8 FMUL f22, f31, f13 @@ -1616,15 +1606,15 @@ LL(32): FMA2 f5, f16, f23, f5 FMA2 f7, f18, f23, f7 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 - FMA4 f13, f17, f22, f13 - FMA4 f15, f19, f22, f15 - FMA3 f12, f17, f23, f12 - FMA3 f14, f19, f23, f14 + FMA4 f5, f17, f22, f5 + FMA4 f7, f19, f22, f7 + FMA3 f4, f17, f23, f4 + FMA3 f6, f19, f23, f6 LFD f20, 8 * SIZE(BO) LFD f21, 9 * SIZE(BO) @@ -1646,15 +1636,15 @@ LL(32): FMA2 f5, f28, f27, f5 FMA2 f7, f30, f27, f7 - FMA4 f9, f29, f24, f9 - FMA4 f11, f31, f24, f11 - FMA3 f8, f29, f25, f8 - FMA3 f10, f31, f25, f10 + FMA4 f1, f29, f24, f1 + FMA4 f3, f31, f24, f3 + FMA3 f0, f29, f25, f0 + FMA3 f2, f31, f25, f2 - FMA4 f13, f29, f26, f13 - FMA4 f15, f31, f26, f15 - FMA3 f12, f29, f27, f12 - FMA3 f14, f31, f27, f14 + FMA4 f5, f29, f26, f5 + FMA4 f7, f31, f26, f7 + FMA3 f4, f29, f27, f4 + FMA3 f6, f31, f27, f6 LFD f24, 12 * SIZE(BO) LFD f25, 13 * SIZE(BO) @@ -1676,15 +1666,15 @@ LL(32): FMA2 f5, f16, f23, f5 FMA2 f7, f18, f23, f7 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 - FMA4 f13, f17, f22, f13 - FMA4 f15, f19, f22, f15 - FMA3 f12, f17, f23, f12 - FMA3 f14, f19, f23, f14 + FMA4 f5, f17, f22, f5 + FMA4 f7, f19, f22, f7 + FMA3 f4, f17, f23, f4 + FMA3 f6, f19, f23, f6 LFD f20, 16 * SIZE(BO) LFD f21, 17 * SIZE(BO) @@ -1706,15 +1696,15 @@ LL(32): FMA2 f5, f28, f27, f5 FMA2 f7, f30, f27, f7 - FMA4 f9, f29, f24, f9 - FMA4 f11, f31, f24, f11 - FMA3 f8, f29, f25, f8 - FMA3 f10, f31, f25, f10 + FMA4 f1, f29, f24, f1 + FMA4 f3, f31, f24, f3 + FMA3 f0, f29, f25, f0 + FMA3 f2, f31, f25, f2 - FMA4 f13, f29, f26, f13 - FMA4 f15, f31, f26, f15 - FMA3 f12, f29, f27, f12 - FMA3 f14, f31, f27, f14 + FMA4 f5, f29, f26, f5 + FMA4 f7, f31, f26, f7 + FMA3 f4, f29, f27, f4 + FMA3 f6, f31, f27, f6 LFD f24, 20 * SIZE(BO) LFD f25, 21 * SIZE(BO) @@ -1736,15 +1726,15 @@ LL(32): FMA2 f5, f16, f23, f5 FMA2 f7, f18, f23, f7 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 - FMA4 f13, f17, f22, f13 - FMA4 f15, f19, f22, f15 - FMA3 f12, f17, f23, f12 - FMA3 f14, f19, f23, f14 + FMA4 f5, f17, f22, f5 + FMA4 f7, f19, f22, f7 + FMA3 f4, f17, f23, f4 + FMA3 f6, f19, f23, f6 LFD f20, 24 * SIZE(BO) LFD f21, 25 * SIZE(BO) @@ -1766,15 +1756,15 @@ LL(32): FMA2 f5, f28, f27, f5 FMA2 f7, f30, f27, f7 - FMA4 f9, f29, f24, f9 - FMA4 f11, f31, f24, f11 - FMA3 f8, f29, f25, f8 - FMA3 f10, f31, f25, f10 + FMA4 f1, f29, f24, f1 + FMA4 f3, f31, f24, f3 + FMA3 f0, f29, f25, f0 + FMA3 f2, f31, f25, f2 - FMA4 f13, f29, f26, f13 - FMA4 f15, f31, f26, f15 - FMA3 f12, f29, f27, f12 - FMA3 f14, f31, f27, f14 + FMA4 f5, f29, f26, f5 + FMA4 f7, f31, f26, f7 + FMA3 f4, f29, f27, f4 + FMA3 f6, f31, f27, f6 LFD f24, 28 * SIZE(BO) LFD f25, 29 * SIZE(BO) @@ -1796,15 +1786,15 @@ LL(32): FMA2 f5, f16, f23, f5 FMA2 f7, f18, f23, f7 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 - FMA4 f13, f17, f22, f13 - FMA4 f15, f19, f22, f15 - FMA3 f12, f17, f23, f12 - FMA3 f14, f19, f23, f14 + FMA4 f5, f17, f22, f5 + FMA4 f7, f19, f22, f7 + FMA3 f4, f17, f23, f4 + FMA3 f6, f19, f23, f6 LFD f20, 32 * SIZE(BO) LFD f21, 33 * SIZE(BO) @@ -1826,15 +1816,15 @@ LL(32): FMA2 f5, f28, f27, f5 FMA2 f7, f30, f27, f7 - FMA4 f9, f29, f24, f9 - FMA4 f11, f31, f24, f11 - FMA3 f8, f29, f25, f8 - FMA3 f10, f31, f25, f10 + FMA4 f1, f29, f24, f1 + FMA4 f3, f31, f24, f3 + FMA3 f0, f29, f25, f0 + FMA3 f2, f31, f25, f2 - FMA4 f13, f29, f26, f13 - FMA4 f15, f31, f26, f15 - FMA3 f12, f29, f27, f12 - FMA3 f14, f31, f27, f14 + FMA4 f5, f29, f26, f5 + FMA4 f7, f31, f26, f7 + FMA3 f4, f29, f27, f4 + FMA3 f6, f31, f27, f6 LFD f24, 36 * SIZE(BO) LFD f25, 37 * SIZE(BO) @@ -1883,20 +1873,20 @@ LL(36): FMA2 f5, f16, f23, f5 FMA2 f7, f18, f23, f7 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 LFD f16, 4 * SIZE(AO) LFD f18, 6 * SIZE(AO) LFD f20, 4 * SIZE(BO) LFD f21, 5 * SIZE(BO) - FMA4 f13, f17, f22, f13 - FMA4 f15, f19, f22, f15 - FMA3 f12, f17, f23, f12 - FMA3 f14, f19, f23, f14 + FMA4 f5, f17, f22, f5 + FMA4 f7, f19, f22, f7 + FMA3 f4, f17, f23, f4 + FMA3 f6, f19, f23, f6 LFD f17, 5 * SIZE(AO) LFD f19, 7 * SIZE(AO) @@ -1916,52 +1906,42 @@ LL(38): LFD f18, 2 * SIZE(CO1) LFD f19, 3 * SIZE(CO1) - FADD f0, f0, f8 - FADD f1, f1, f9 - FADD f2, f2, f10 - FADD f3, f3, f11 - LFD f20, 0 * SIZE(CO2) LFD f21, 1 * SIZE(CO2) LFD f22, 2 * SIZE(CO2) LFD f23, 3 * SIZE(CO2) - FADD f4, f4, f12 - FADD f5, f5, f13 - FADD f6, f6, f14 - FADD f7, f7, f15 + fmr f8, f0 + fmr f9, f1 + fmr f10, f2 + fmr f11, f3 - FNMSUB f24, f31, f1, f16 - FMADD f25, f31, f0, f17 - FNMSUB f26, f31, f3, f18 - FMADD f27, f31, f2, f19 + FMADD f24, f30, f0, f16 + FMADD f25, f30, f1, f17 + FMADD f26, f30, f2, f18 + FMADD f27, f30, f3, f19 - FMADD f0, f30, f0, f24 - FMADD f1, f30, f1, f25 - FMADD f2, f30, f2, f26 - FMADD f3, f30, f3, f27 + FNMSUB f0, f31, f9, f24 + FMADD f1, f31, f8, f25 + FNMSUB f2, f31, f11, f26 + FMADD f3, f31, f10, f27 - FNMSUB f24, f31, f5, f20 - FMADD f25, f31, f4, f21 - FNMSUB f26, f31, f7, f22 - FMADD f27, f31, f6, f23 + fmr f12, f4 + fmr f13, f5 + fmr f14, f6 + fmr f15, f7 - FMADD f4, f30, f4, f24 - FMADD f5, f30, f5, f25 - FMADD f6, f30, f6, f26 - FMADD f7, f30, f7, f27 + FMADD f24, f30, f4, f20 + FMADD f25, f30, f5, f21 + FMADD f26, f30, f6, f22 + FMADD f27, f30, f7, f23 + + FNMSUB f4, f31, f13, f24 + FMADD f5, f31, f12, f25 + FNMSUB f6, f31, f15, f26 + FMADD f7, f31, f14, f27 #else - FADD f0, f0, f8 - FADD f1, f1, f9 - FADD f2, f2, f10 - FADD f3, f3, f11 - - FADD f4, f4, f12 - FADD f5, f5, f13 - FADD f6, f6, f14 - FADD f7, f7, f15 - FMUL f16, f31, f1 FMUL f17, f31, f0 FMUL f18, f31, f3 @@ -2101,14 +2081,14 @@ LL(40): LL(42): FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 FMA1 f4, f16, f22, f4 - FMA4 f7, f17, f22, f7 FMA2 f5, f16, f23, f5 - FMA3 f6, f17, f23, f6 + FMA4 f5, f17, f22, f5 + FMA3 f4, f17, f23, f4 LFD f16, 2 * SIZE(AO) LFD f17, 3 * SIZE(AO) @@ -2119,14 +2099,14 @@ LL(42): LFD f23, 7 * SIZE(BO) FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 FMA1 f4, f16, f22, f4 - FMA4 f7, f17, f22, f7 FMA2 f5, f16, f23, f5 - FMA3 f6, f17, f23, f6 + FMA4 f5, f17, f22, f5 + FMA3 f4, f17, f23, f4 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -2137,14 +2117,14 @@ LL(42): LFD f23, 11 * SIZE(BO) FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 FMA1 f4, f16, f22, f4 - FMA4 f7, f17, f22, f7 FMA2 f5, f16, f23, f5 - FMA3 f6, f17, f23, f6 + FMA4 f5, f17, f22, f5 + FMA3 f4, f17, f23, f4 LFD f16, 6 * SIZE(AO) LFD f17, 7 * SIZE(AO) @@ -2155,14 +2135,14 @@ LL(42): LFD f23, 15 * SIZE(BO) FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 FMA1 f4, f16, f22, f4 - FMA4 f7, f17, f22, f7 FMA2 f5, f16, f23, f5 - FMA3 f6, f17, f23, f6 + FMA4 f5, f17, f22, f5 + FMA3 f4, f17, f23, f4 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -2202,14 +2182,14 @@ LL(45): LL(46): FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 FMA1 f4, f16, f22, f4 - FMA4 f7, f17, f22, f7 FMA2 f5, f16, f23, f5 - FMA3 f6, f17, f23, f6 + FMA4 f5, f17, f22, f5 + FMA3 f4, f17, f23, f4 LFD f16, 2 * SIZE(AO) LFD f17, 3 * SIZE(AO) @@ -2231,27 +2211,22 @@ LL(48): LFD f20, 0 * SIZE(CO2) LFD f21, 1 * SIZE(CO2) - FADD f0, f0, f2 - FADD f1, f1, f3 - FADD f4, f4, f6 - FADD f5, f5, f7 + fmr f2, f0 + fmr f3, f1 + fmr f6, f4 + fmr f7, f5 - FNMSUB f24, f31, f1, f16 - FMADD f25, f31, f0, f17 - FNMSUB f26, f31, f5, f20 - FMADD f27, f31, f4, f21 + FMADD f24, f30, f0, f16 + FMADD f25, f30, f1, f17 + FMADD f26, f30, f4, f20 + FMADD f27, f30, f5, f21 - FMADD f0, f30, f0, f24 - FMADD f1, f30, f1, f25 - FMADD f4, f30, f4, f26 - FMADD f5, f30, f5, f27 + FNMSUB f0, f31, f3, f24 + FMADD f1, f31, f2, f25 + FNMSUB f4, f31, f7, f26 + FMADD f5, f31, f6, f27 #else - FADD f0, f0, f2 - FADD f1, f1, f3 - FADD f4, f4, f6 - FADD f5, f5, f7 - FMUL f16, f31, f1 FMUL f17, f31, f0 FMUL f18, f31, f5 @@ -2401,10 +2376,10 @@ LL(52): FMA2 f1, f16, f21, f1 FMA2 f3, f18, f21, f3 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -2416,10 +2391,10 @@ LL(52): FMA2 f1, f16, f23, f1 FMA2 f3, f18, f23, f3 - FMA4 f9, f17, f22, f9 - FMA4 f11, f19, f22, f11 - FMA3 f8, f17, f23, f8 - FMA3 f10, f19, f23, f10 + FMA4 f1, f17, f22, f1 + FMA4 f3, f19, f22, f3 + FMA3 f0, f17, f23, f0 + FMA3 f2, f19, f23, f2 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -2436,10 +2411,10 @@ LL(52): FMA2 f1, f16, f21, f1 FMA2 f3, f18, f21, f3 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 LFD f16, 12 * SIZE(AO) LFD f17, 13 * SIZE(AO) @@ -2451,10 +2426,10 @@ LL(52): FMA2 f1, f16, f23, f1 FMA2 f3, f18, f23, f3 - FMA4 f9, f17, f22, f9 - FMA4 f11, f19, f22, f11 - FMA3 f8, f17, f23, f8 - FMA3 f10, f19, f23, f10 + FMA4 f1, f17, f22, f1 + FMA4 f3, f19, f22, f3 + FMA3 f0, f17, f23, f0 + FMA3 f2, f19, f23, f2 LFD f16, 16 * SIZE(AO) LFD f17, 17 * SIZE(AO) @@ -2471,10 +2446,10 @@ LL(52): FMA2 f1, f16, f21, f1 FMA2 f3, f18, f21, f3 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 LFD f16, 20 * SIZE(AO) LFD f17, 21 * SIZE(AO) @@ -2486,10 +2461,10 @@ LL(52): FMA2 f1, f16, f23, f1 FMA2 f3, f18, f23, f3 - FMA4 f9, f17, f22, f9 - FMA4 f11, f19, f22, f11 - FMA3 f8, f17, f23, f8 - FMA3 f10, f19, f23, f10 + FMA4 f1, f17, f22, f1 + FMA4 f3, f19, f22, f3 + FMA3 f0, f17, f23, f0 + FMA3 f2, f19, f23, f2 LFD f16, 24 * SIZE(AO) LFD f17, 25 * SIZE(AO) @@ -2506,10 +2481,10 @@ LL(52): FMA2 f1, f16, f21, f1 FMA2 f3, f18, f21, f3 - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 LFD f16, 28 * SIZE(AO) LFD f17, 29 * SIZE(AO) @@ -2521,10 +2496,10 @@ LL(52): FMA2 f1, f16, f23, f1 FMA2 f3, f18, f23, f3 - FMA4 f9, f17, f22, f9 - FMA4 f11, f19, f22, f11 - FMA3 f8, f17, f23, f8 - FMA3 f10, f19, f23, f10 + FMA4 f1, f17, f22, f1 + FMA4 f3, f19, f22, f3 + FMA3 f0, f17, f23, f0 + FMA3 f2, f19, f23, f2 LFD f16, 32 * SIZE(AO) LFD f17, 33 * SIZE(AO) @@ -2573,10 +2548,10 @@ LL(56): LFD f16, 4 * SIZE(AO) LFD f18, 6 * SIZE(AO) - FMA4 f9, f17, f20, f9 - FMA4 f11, f19, f20, f11 - FMA3 f8, f17, f21, f8 - FMA3 f10, f19, f21, f10 + FMA4 f1, f17, f20, f1 + FMA4 f3, f19, f20, f3 + FMA3 f0, f17, f21, f0 + FMA3 f2, f19, f21, f2 LFD f17, 5 * SIZE(AO) LFD f19, 7 * SIZE(AO) @@ -2595,27 +2570,22 @@ LL(58): LFD f18, 2 * SIZE(CO1) LFD f19, 3 * SIZE(CO1) - FADD f0, f0, f8 - FADD f1, f1, f9 - FADD f2, f2, f10 - FADD f3, f3, f11 + fmr f8, f0 + fmr f9, f1 + fmr f10, f2 + fmr f11, f3 - FNMSUB f24, f31, f1, f16 - FMADD f25, f31, f0, f17 - FNMSUB f26, f31, f3, f18 - FMADD f27, f31, f2, f19 + FMADD f24, f30, f0, f16 + FMADD f25, f30, f1, f17 + FMADD f26, f30, f2, f18 + FMADD f27, f30, f3, f19 - FMADD f0, f30, f0, f24 - FMADD f1, f30, f1, f25 - FMADD f2, f30, f2, f26 - FMADD f3, f30, f3, f27 + FNMSUB f0, f31, f9, f24 + FMADD f1, f31, f8, f25 + FNMSUB f2, f31, f11, f26 + FMADD f3, f31, f10, f27 #else - FADD f0, f0, f8 - FADD f1, f1, f9 - FADD f2, f2, f10 - FADD f3, f3, f11 - FMUL f16, f31, f1 FMUL f17, f31, f0 FMUL f18, f31, f3 @@ -2735,9 +2705,9 @@ LL(60): LL(62): FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 LFD f16, 4 * SIZE(AO) LFD f17, 5 * SIZE(AO) @@ -2745,9 +2715,9 @@ LL(62): LFD f21, 5 * SIZE(BO) FMA1 f0, f18, f22, f0 - FMA4 f3, f19, f22, f3 FMA2 f1, f18, f23, f1 - FMA3 f2, f19, f23, f2 + FMA4 f1, f19, f22, f1 + FMA3 f0, f19, f23, f0 LFD f18, 6 * SIZE(AO) LFD f19, 7 * SIZE(AO) @@ -2755,9 +2725,9 @@ LL(62): LFD f23, 7 * SIZE(BO) FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 FMA2 f1, f16, f21, f1 - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + FMA3 f0, f17, f21, f0 LFD f16, 8 * SIZE(AO) LFD f17, 9 * SIZE(AO) @@ -2765,9 +2735,9 @@ LL(62): LFD f21, 9 * SIZE(BO) FMA1 f0, f18, f22, f0 - FMA4 f3, f19, f22, f3 FMA2 f1, f18, f23, f1 - FMA3 f2, f19, f23, f2 + FMA4 f1, f19, f22, f1 + FMA3 f0, f19, f23, f0 LFD f18, 10 * SIZE(AO) LFD f19, 11 * SIZE(AO) @@ -2803,11 +2773,11 @@ LL(65): LL(66): FMA1 f0, f16, f20, f0 - FMA4 f3, f17, f20, f3 - LFD f20, 2 * SIZE(BO) FMA2 f1, f16, f21, f1 LFD f16, 2 * SIZE(AO) - FMA3 f2, f17, f21, f2 + FMA4 f1, f17, f20, f1 + LFD f20, 2 * SIZE(BO) + FMA3 f0, f17, f21, f0 LFD f17, 3 * SIZE(AO) LFD f21, 3 * SIZE(BO) @@ -2821,20 +2791,17 @@ LL(68): LFD f16, 0 * SIZE(CO1) LFD f17, 1 * SIZE(CO1) - FADD f0, f0, f2 - FADD f1, f1, f3 + fmr f2, f0 + fmr f3, f1 - FNMSUB f24, f31, f1, f16 - FMADD f25, f31, f0, f17 + FMADD f24, f30, f0, f16 + FMADD f25, f30, f1, f17 - FMADD f0, f30, f0, f24 - FMADD f1, f30, f1, f25 + FNMSUB f0, f31, f3, f24 + FMADD f1, f31, f2, f25 #else - FADD f0, f0, f2 - FADD f1, f1, f3 - FMUL f16, f31, f1 FMUL f17, f31, f0 From dd6c33d34d9925a7c02284c47a86afa4bf117237 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 19 Jul 2024 16:14:55 +0200 Subject: [PATCH 22/52] make NAN handling depend on dummy2 parameter --- kernel/x86/scal.S | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kernel/x86/scal.S b/kernel/x86/scal.S index b0c232b1b..7e12a52ab 100644 --- a/kernel/x86/scal.S +++ b/kernel/x86/scal.S @@ -57,19 +57,24 @@ #ifdef XDOUBLE movl 44(%esp),%edi movl 48(%esp),%esi + movl 64(%esp),%ecx #elif defined(DOUBLE) movl 36(%esp),%edi movl 40(%esp),%esi + movl 56(%esp),%ecx #else movl 32(%esp),%edi movl 36(%esp),%esi + movl 54(%esp),%ecx #endif ftst fnstsw %ax andb $68, %ah -// je .L300 # Alpha != ZERO - jmp .L300 + je .L300 # Alpha != ZERO + + cmpl $1,%ecx # dummy2 flag + je .L300 /* Alpha == ZERO */ cmpl $1,%esi From 7311d9301650adc0672c2990e1997702630b9d47 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Fri, 19 Jul 2024 16:50:50 +0000 Subject: [PATCH 23/52] Unroll TT further --- kernel/arm64/sgemm_small_kernel_tt_sve.c | 262 ++++++++++++++++++++--- 1 file changed, 231 insertions(+), 31 deletions(-) diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index 50dbd7399..c66330fb5 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -219,6 +219,7 @@ CNAME(BLASLONG M, const BLASLONG v_m2 = M & -v_size2; const BLASLONG v_m1 = M & -v_size; + const BLASLONG n8 = N & -8; const BLASLONG n4 = N & -4; const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0; @@ -238,6 +239,132 @@ CNAME(BLASLONG M, CREATE_A_POINTER(1, v_size); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + DECLARE_RESULT_VECTOR(1, 0); + DECLARE_RESULT_VECTOR(1, 1); + DECLARE_RESULT_VECTOR(1, 2); + DECLARE_RESULT_VECTOR(1, 3); + DECLARE_RESULT_VECTOR(1, 4); + DECLARE_RESULT_VECTOR(1, 5); + DECLARE_RESULT_VECTOR(1, 6); + DECLARE_RESULT_VECTOR(1, 7); + + if (LIKELY(packed_a != NULL)) { + if (j == 0) { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + VECTOR_PACK_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + VECTOR_PACK_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + } else { + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + GATHER_LOAD_A(pg_true, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 7, 4, 3, 0); + } + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + VECTOR_STORE(pg_true, 1, 0); + VECTOR_STORE(pg_true, 1, 1); + VECTOR_STORE(pg_true, 1, 2); + VECTOR_STORE(pg_true, 1, 3); + VECTOR_STORE(pg_true, 1, 4); + VECTOR_STORE(pg_true, 1, 5); + VECTOR_STORE(pg_true, 1, 6); + VECTOR_STORE(pg_true, 1, 7); + INCR_C_POINTER(0, 8); + INCR_C_POINTER(1, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); @@ -257,38 +384,19 @@ CNAME(BLASLONG M, DECLARE_RESULT_VECTOR(1, 3); if (LIKELY(packed_a != NULL)) { - if (j == 0) { - for (; k < K; k++) { + for (; k < K; k++) { - QUADWORD_LOAD_B(0, 0); - GATHER_LOAD_A(pg_true, 0, 0); - VECTOR_PACK_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - GATHER_LOAD_A(pg_true, 1, 0); - VECTOR_PACK_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } - } else { - for (; k < K; k++) { - - QUADWORD_LOAD_B(0, 0); - UNPACK_VECTOR_A(0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); - UNPACK_VECTOR_A(1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); - UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); - } + QUADWORD_LOAD_B(0, 0); + UNPACK_VECTOR_A(0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + UNPACK_VECTOR_A(1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 0, 3, 0); } } else { for (; k < K; k++) { @@ -361,6 +469,52 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_true, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_true, 0, 0); + VECTOR_STORE(pg_true, 0, 1); + VECTOR_STORE(pg_true, 0, 2); + VECTOR_STORE(pg_true, 0, 3); + VECTOR_STORE(pg_true, 0, 4); + VECTOR_STORE(pg_true, 0, 5); + VECTOR_STORE(pg_true, 0, 6); + VECTOR_STORE(pg_true, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); @@ -418,6 +572,52 @@ CNAME(BLASLONG M, CREATE_A_POINTER(0, 0); BLASLONG j = 0; + for (; j < n8; j += 8) { + + CREATE_B_POINTER(0, 0); + CREATE_B_POINTER(1, 1); + CREATE_B_POINTER(2, 2); + CREATE_B_POINTER(3, 3); + CREATE_B_POINTER(4, 4); + CREATE_B_POINTER(5, 5); + CREATE_B_POINTER(6, 6); + CREATE_B_POINTER(7, 7); + UPDATE_B_POINTER(8); + + BLASLONG k = 0; + DECLARE_RESULT_VECTOR(0, 0); + DECLARE_RESULT_VECTOR(0, 1); + DECLARE_RESULT_VECTOR(0, 2); + DECLARE_RESULT_VECTOR(0, 3); + DECLARE_RESULT_VECTOR(0, 4); + DECLARE_RESULT_VECTOR(0, 5); + DECLARE_RESULT_VECTOR(0, 6); + DECLARE_RESULT_VECTOR(0, 7); + + for (; k < K; k++) { + + QUADWORD_LOAD_B(0, 0); + GATHER_LOAD_A(pg_tail, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 0, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 0, 3, 0); + QUADWORD_LOAD_B(4, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 4, 4, 0, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 5, 4, 1, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 6, 4, 2, 0); + UPDATE_RESULT_VECTOR_QUADWORD(0, 7, 4, 3, 0); + } + VECTOR_STORE(pg_tail, 0, 0); + VECTOR_STORE(pg_tail, 0, 1); + VECTOR_STORE(pg_tail, 0, 2); + VECTOR_STORE(pg_tail, 0, 3); + VECTOR_STORE(pg_tail, 0, 4); + VECTOR_STORE(pg_tail, 0, 5); + VECTOR_STORE(pg_tail, 0, 6); + VECTOR_STORE(pg_tail, 0, 7); + INCR_C_POINTER(0, 8); + } for (; j < n4; j += 4) { CREATE_B_POINTER(0, 0); From ea4ab3b31081a4344905929a9c911b0a53eeca45 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Sat, 20 Jul 2024 13:39:22 +0000 Subject: [PATCH 24/52] Better header guard around bridge --- kernel/arm64/dgemm_small_kernel_nn_sve.c | 3 ++- kernel/arm64/dgemm_small_kernel_nt_sve.c | 3 ++- kernel/arm64/dgemm_small_kernel_tn_sve.c | 3 ++- kernel/arm64/dgemm_small_kernel_tt_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_nn_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_nt_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_tn_sve.c | 3 ++- kernel/arm64/sgemm_small_kernel_tt_sve.c | 3 ++- 8 files changed, 16 insertions(+), 8 deletions(-) diff --git a/kernel/arm64/dgemm_small_kernel_nn_sve.c b/kernel/arm64/dgemm_small_kernel_nn_sve.c index 417633471..fa39103d0 100644 --- a/kernel/arm64/dgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/dgemm_small_kernel_nt_sve.c b/kernel/arm64/dgemm_small_kernel_nt_sve.c index 241d96a6c..0b306e754 100644 --- a/kernel/arm64/dgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_nt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/dgemm_small_kernel_tn_sve.c b/kernel/arm64/dgemm_small_kernel_tn_sve.c index 1b0fada2a..daca8e1be 100644 --- a/kernel/arm64/dgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/dgemm_small_kernel_tt_sve.c b/kernel/arm64/dgemm_small_kernel_tt_sve.c index aa5bf2751..efe11a9f9 100644 --- a/kernel/arm64/dgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/dgemm_small_kernel_tt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_nn_sve.c b/kernel/arm64/sgemm_small_kernel_nn_sve.c index 0af073a14..8ea9cf5a7 100644 --- a/kernel/arm64/sgemm_small_kernel_nn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_nt_sve.c b/kernel/arm64/sgemm_small_kernel_nt_sve.c index ed7ee6bd6..ac7e067cd 100644 --- a/kernel/arm64/sgemm_small_kernel_nt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_nt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_tn_sve.c b/kernel/arm64/sgemm_small_kernel_tn_sve.c index 03406daa6..114640950 100644 --- a/kernel/arm64/sgemm_small_kernel_tn_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tn_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ diff --git a/kernel/arm64/sgemm_small_kernel_tt_sve.c b/kernel/arm64/sgemm_small_kernel_tt_sve.c index c66330fb5..731c9861b 100644 --- a/kernel/arm64/sgemm_small_kernel_tt_sve.c +++ b/kernel/arm64/sgemm_small_kernel_tt_sve.c @@ -29,7 +29,8 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include -#ifdef __ARM_NEON_SVE_BRIDGE +#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \ + __has_include() #include #else #define svdup_neonq_f32(fixed_reg) \ From c2ffd90e8c2cfc93c61d72bcef6fd19bb169e072 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Sat, 20 Jul 2024 17:31:00 +0200 Subject: [PATCH 25/52] make NAN handling depend on dummy2 parameter --- kernel/x86_64/scal_sse.S | 6 +++++- kernel/x86_64/scal_sse2.S | 7 ++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/kernel/x86_64/scal_sse.S b/kernel/x86_64/scal_sse.S index 91149af3f..88ef4a3ed 100644 --- a/kernel/x86_64/scal_sse.S +++ b/kernel/x86_64/scal_sse.S @@ -60,8 +60,10 @@ #ifdef WINDOWS_ABI movq 40(%rsp), X movq 48(%rsp), INCX - + movq 64(%rsp), %r9 movaps %xmm3, %xmm0 +#else + movq 24(%rsp), %r9 #endif SAVEREGISTERS @@ -76,6 +78,8 @@ shufps $0, %xmm0, %xmm0 jne .L100 # Alpha != ZERO + + cmpq $1, %r9 je .L100 /* Alpha == ZERO */ cmpq $SIZE, INCX diff --git a/kernel/x86_64/scal_sse2.S b/kernel/x86_64/scal_sse2.S index b778895ba..485e6ef46 100644 --- a/kernel/x86_64/scal_sse2.S +++ b/kernel/x86_64/scal_sse2.S @@ -48,6 +48,7 @@ #define X ARG2 #define INCX ARG3 #endif +#define FLAG %r9 #define XX %r10 #define I %rax @@ -60,8 +61,10 @@ #ifdef WINDOWS_ABI movq 40(%rsp), X movq 48(%rsp), INCX - + movq 64(%rsp), FLAG movaps %xmm3, %xmm0 +#else + movq 24(%rsp), FLAG #endif SAVEREGISTERS @@ -75,6 +78,8 @@ comisd %xmm0, %xmm1 jne .L100 # Alpha != ZERO jp .L100 # For Alpha = NaN + + cmpq $1, FLAG je .L100 # disable the Alpha=zero path as it does not handle x=inf or nan /* Alpha == ZERO */ cmpq $SIZE, INCX From c064319ecb47838babe32999adf2149ed4f5cd83 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Sat, 20 Jul 2024 17:42:31 +0200 Subject: [PATCH 26/52] fix alpha=NAN case --- kernel/x86_64/zscal_sse.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/x86_64/zscal_sse.S b/kernel/x86_64/zscal_sse.S index 8505c67bf..acd6c3654 100644 --- a/kernel/x86_64/zscal_sse.S +++ b/kernel/x86_64/zscal_sse.S @@ -76,7 +76,7 @@ pxor %xmm15, %xmm15 comiss %xmm0, %xmm15 jne .L100 # Alpha_r != ZERO - + jp .L100 # Alpha_r == NAN comiss %xmm1, %xmm15 jne .L100 # Alpha_i != ZERO From dfbc2348a88cbc20ec8016ed65a740639c3e45ae Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Sat, 20 Jul 2024 18:27:15 +0200 Subject: [PATCH 27/52] fix NAN handling --- kernel/x86_64/scal_atom.S | 8 +++++++- kernel/x86_64/zscal_atom.S | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/x86_64/scal_atom.S b/kernel/x86_64/scal_atom.S index 11350ea19..284ea4515 100644 --- a/kernel/x86_64/scal_atom.S +++ b/kernel/x86_64/scal_atom.S @@ -60,8 +60,10 @@ #ifdef WINDOWS_ABI movq 40(%rsp), X movq 48(%rsp), INCX - + movq 64(%rsp), %r9 movaps %xmm3, %xmm0 +#else + movq 24(%rsp), %r9 #endif SAVEREGISTERS @@ -73,6 +75,10 @@ lea (, INCX, SIZE), INCX comisd %xmm0, %xmm1 jne .L100 + jp .L100 + + cmpq $1, %r9 + je .L100 /* Alpha == ZERO */ cmpq $SIZE, INCX diff --git a/kernel/x86_64/zscal_atom.S b/kernel/x86_64/zscal_atom.S index 1649b855b..7713626c9 100644 --- a/kernel/x86_64/zscal_atom.S +++ b/kernel/x86_64/zscal_atom.S @@ -74,7 +74,7 @@ pxor %xmm15, %xmm15 comisd %xmm0, %xmm15 jne .L30 # Alpha_r != ZERO - + jp .L30 comisd %xmm1, %xmm15 jne .L30 # Alpha_i != ZERO From 73f8866ffba84ca7c8f75b7e59030ce0611e5528 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Sun, 21 Jul 2024 13:42:47 +0200 Subject: [PATCH 28/52] make NAN handling depend on DUMMY2 parameter --- kernel/power/dscal.c | 34 +++++++++++++++++++++++++++++++--- kernel/power/sscal.c | 44 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 71 insertions(+), 7 deletions(-) diff --git a/kernel/power/dscal.c b/kernel/power/dscal.c index 2bbc1ea6d..da8083976 100644 --- a/kernel/power/dscal.c +++ b/kernel/power/dscal.c @@ -73,6 +73,15 @@ static void dscal_kernel_8_zero (BLASLONG n, FLOAT *x) for( i=0; i= 16 ) { BLASLONG align = ((32 - ((uintptr_t)x & (uintptr_t)0x1F)) >> 3) & 0x3; + if (dummy2 == 0) + for (j = 0; j < align; j++) { + x [j] = 0.0; + } + else for (j = 0; j < align; j++) { if (isfinite(x[j])) x[j] = 0.0; @@ -151,7 +166,13 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS j=n1; } #endif - + if (dummy2 == 0) + while(j < n) + { + x[j]=0.0; + j++; + } + else while(j < n) { if (!isfinite(x[j])) @@ -202,7 +223,14 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( da == 0.0 ) { - + if (dummy2 == 0) + while(j < n) + { + x[i]=0.0; + i += inc_x; + j++; + } + else while(j < n) { if (!isfinite(x[i])) diff --git a/kernel/power/sscal.c b/kernel/power/sscal.c index 12246b0a3..54047a856 100644 --- a/kernel/power/sscal.c +++ b/kernel/power/sscal.c @@ -74,7 +74,24 @@ static void sscal_kernel_16_zero( BLASLONG n, FLOAT *x ) for( i=0; i= 32 ) { BLASLONG align = ((32 - ((uintptr_t)x & (uintptr_t)0x1F)) >> 2) & 0x7; + if (dummy2 == 0) + for (j = 0; j < align; j++){ + x[j] = 0.0; + } + else for (j = 0; j < align; j++) { if (isfinite(x[j])) x[j] = 0.0; @@ -153,9 +176,15 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS j=n1; } #endif - + if (dummy2 == 0) while(j < n) { + x[j] = 0.0; + j++; + } + else + while(j < n) + { if (isfinite(x[j])) x[j]=0.0; else @@ -204,7 +233,14 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( da == 0.0 ) { - + if (dummy2 == 0) + while(j < n) + { + x[i]=0.0; + i += inc_x; + j++; + } + else while(j < n) { if (isfinite(x[i])) From 29f3e759b9796ea427029ab362fea407c04fc757 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Tue, 23 Jul 2024 11:20:48 +0200 Subject: [PATCH 29/52] work around a gcc14.1 bug observed on Loongarch --- utest/test_potrs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utest/test_potrs.c b/utest/test_potrs.c index f39287d6f..642ce1e37 100644 --- a/utest/test_potrs.c +++ b/utest/test_potrs.c @@ -32,7 +32,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **********************************************************************************/ #include "openblas_utest.h" - +#pragma GCC optimize("no-gcse") /* void BLASFUNC(cpotrf)(char*, BLASINT*, complex float*, BLASINT*, BLASINT*); void BLASFUNC(zpotrs_(char*, BLASINT*, BLASINT*, complex double*, From 821ef34635db922f875b5c40dba4a12bd2b94fa5 Mon Sep 17 00:00:00 2001 From: yamazaki-mitsufumi Date: Tue, 23 Jul 2024 20:44:39 +0900 Subject: [PATCH 30/52] Add A64FX to the list of CPUs supported by DYNAMIC_ARCH --- Makefile.system | 1 + cmake/arch.cmake | 2 +- cmake/prebuild.cmake | 31 +++++++++++++++++++++++++++++++ cmake/system.cmake | 12 ++++++++++++ driver/others/dynamic_arm64.c | 21 ++++++++++++++++++++- 5 files changed, 65 insertions(+), 2 deletions(-) diff --git a/Makefile.system b/Makefile.system index 847bab179..6c1f01ba7 100644 --- a/Makefile.system +++ b/Makefile.system @@ -689,6 +689,7 @@ ifneq ($(NO_SVE), 1) DYNAMIC_CORE += NEOVERSEV1 DYNAMIC_CORE += NEOVERSEN2 DYNAMIC_CORE += ARMV8SVE +DYNAMIC_CORE += A64FX endif DYNAMIC_CORE += THUNDERX DYNAMIC_CORE += THUNDERX2T99 diff --git a/cmake/arch.cmake b/cmake/arch.cmake index eb974456b..27c5650ab 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -46,7 +46,7 @@ if (DYNAMIC_ARCH) if (ARM64) set(DYNAMIC_CORE ARMV8 CORTEXA53 CORTEXA57 THUNDERX THUNDERX2T99 TSV110 EMAG8180 NEOVERSEN1 THUNDERX3T110) if (${CMAKE_C_COMPILER_VERSION} VERSION_GREATER 9.99) - set(DYNAMIC_CORE ${DYNAMIC_CORE} NEOVERSEV1 NEOVERSEN2 ARMV8SVE) + set(DYNAMIC_CORE ${DYNAMIC_CORE} NEOVERSEV1 NEOVERSEN2 ARMV8SVE A64FX) endif () if (DYNAMIC_LIST) set(DYNAMIC_CORE ARMV8 ${DYNAMIC_LIST}) diff --git a/cmake/prebuild.cmake b/cmake/prebuild.cmake index e64352f4a..609fbe241 100644 --- a/cmake/prebuild.cmake +++ b/cmake/prebuild.cmake @@ -1218,6 +1218,37 @@ endif () set(ZGEMM_UNROLL_M 4) set(ZGEMM_UNROLL_N 4) set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "A64FX") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_CODE_SIZE\t65536\n" + "#define L1_CODE_LINESIZE\t256\n" + "#define L1_CODE_ASSOCIATIVE\t8\n" + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t256\n" + "#define L1_DATA_ASSOCIATIVE\t8\n" + "#define L2_SIZE\t8388608\n\n" + "#define L2_LINESIZE\t256\n" + "#define L2_ASSOCIATIVE\t8\n" + "#define L3_SIZE\t0\n\n" + "#define L3_LINESIZE\t0\n\n" + "#define L3_ASSOCIATIVE\t0\n\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n" + "#define HAVE_VFPV4\n" + "#define HAVE_VFPV3\n" + "#define HAVE_VFP\n" + "#define HAVE_NEON\n" + "#define HAVE_SVE\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 4) + set(SGEMM_UNROLL_N 8) + set(DGEMM_UNROLL_M 2) + set(DGEMM_UNROLL_N 8) + set(CGEMM_UNROLL_M 2) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 2) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) elseif ("${TCORE}" STREQUAL "P5600") file(APPEND ${TARGET_CONF_TEMP} "#define L2_SIZE 1048576\n" diff --git a/cmake/system.cmake b/cmake/system.cmake index e4778249f..b682c3af8 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -310,6 +310,18 @@ if (${TARGET} STREQUAL NEOVERSEV1) set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.2-a+sve") endif() endif() + if (${TARGET} STREQUAL A64FX) + if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI" AND NOT NO_SVE) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -Msve-intrinsics -march=armv8.2-a+sve -mtune=a64fx") + else () + execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) + if (${GCC_VERSION} VERSION_GREATER 10.4 OR ${GCC_VERSION} VERSION_EQUAL 10.4) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.2-a+sve -mtune=a64fx") + else () + message(FATAL_ERROR "Compiler $${CMAKE_C_COMPILER} {GCC_VERSION} does not support A64FX.") + endif() + endif() + endif() endif() diff --git a/driver/others/dynamic_arm64.c b/driver/others/dynamic_arm64.c index 6b21028d1..dc88d816f 100644 --- a/driver/others/dynamic_arm64.c +++ b/driver/others/dynamic_arm64.c @@ -120,6 +120,11 @@ extern gotoblas_t gotoblas_CORTEXA55; #else #define gotoblas_CORTEXA55 gotoblas_ARMV8 #endif +#ifdef DYN_A64FX +extern gotoblas_t gotoblas_A64FX; +#else +#define gotoblas_A64FX gotoblas_ARMV8 +#endif #else extern gotoblas_t gotoblas_CORTEXA53; #define gotoblas_CORTEXA55 gotoblas_CORTEXA53 @@ -136,10 +141,12 @@ extern gotoblas_t gotoblas_NEOVERSEN1; extern gotoblas_t gotoblas_NEOVERSEV1; extern gotoblas_t gotoblas_NEOVERSEN2; extern gotoblas_t gotoblas_ARMV8SVE; +extern gotoblas_t gotoblas_A64FX; #else #define gotoblas_NEOVERSEV1 gotoblas_ARMV8 #define gotoblas_NEOVERSEN2 gotoblas_ARMV8 #define gotoblas_ARMV8SVE gotoblas_ARMV8 +#define gotoblas_A64FX gotoblas_ARMV8 #endif extern gotoblas_t gotoblas_THUNDERX3T110; #endif @@ -149,7 +156,7 @@ extern void openblas_warning(int verbose, const char * msg); #define FALLBACK_VERBOSE 1 #define NEOVERSEN1_FALLBACK "OpenBLAS : Your OS does not support SVE instructions. OpenBLAS is using Neoverse N1 kernels as a fallback, which may give poorer performance.\n" -#define NUM_CORETYPES 17 +#define NUM_CORETYPES 18 /* * In case asm/hwcap.h is outdated on the build system, make sure @@ -184,6 +191,7 @@ static char *corename[] = { "thunderx3t110", "cortexa55", "armv8sve", + "a64fx", "unknown" }; @@ -205,6 +213,7 @@ char *gotoblas_corename(void) { if (gotoblas == &gotoblas_THUNDERX3T110) return corename[14]; if (gotoblas == &gotoblas_CORTEXA55) return corename[15]; if (gotoblas == &gotoblas_ARMV8SVE) return corename[16]; + if (gotoblas == &gotoblas_A64FX) return corename[17]; return corename[NUM_CORETYPES]; } @@ -241,6 +250,7 @@ static gotoblas_t *force_coretype(char *coretype) { case 14: return (&gotoblas_THUNDERX3T110); case 15: return (&gotoblas_CORTEXA55); case 16: return (&gotoblas_ARMV8SVE); + case 17: return (&gotoblas_A64FX); } snprintf(message, 128, "Core not found: %s\n", coretype); openblas_warning(1, message); @@ -346,6 +356,15 @@ static gotoblas_t *get_coretype(void) { return &gotoblas_THUNDERX3T110; } break; + case 0x46: // Fujitsu + switch (part) + { +#ifndef NO_SVE + case 0x001: // A64FX + return &gotoblas_A64FX; +#endif + } + break; case 0x48: // HiSilicon switch (part) { From 0096482f0383aa96f2652aa76ee98be79c920717 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Tue, 23 Jul 2024 15:01:26 +0200 Subject: [PATCH 31/52] fix incompatible definitions of MAXLOC --- lapack-netlib/SRC/cpstf2.c | 2 +- lapack-netlib/SRC/cpstrf.c | 2 +- lapack-netlib/SRC/spstf2.c | 2 +- lapack-netlib/SRC/spstrf.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lapack-netlib/SRC/cpstf2.c b/lapack-netlib/SRC/cpstf2.c index ae7fb05c4..f983f879f 100644 --- a/lapack-netlib/SRC/cpstf2.c +++ b/lapack-netlib/SRC/cpstf2.c @@ -256,7 +256,7 @@ static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n"; #define myceiling_(w) {ceil(w)} #define myhuge_(w) {HUGE_VAL} //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);} -#define mymaxloc_(w,s,e,n) dmaxloc_(w,*(s),*(e),n) +#define mymaxloc_(w,s,e,n) smaxloc_(w,*(s),*(e),n) /* procedure parameter types for -A and -C++ */ diff --git a/lapack-netlib/SRC/cpstrf.c b/lapack-netlib/SRC/cpstrf.c index ab4811273..a271835c2 100644 --- a/lapack-netlib/SRC/cpstrf.c +++ b/lapack-netlib/SRC/cpstrf.c @@ -256,7 +256,7 @@ static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n"; #define myceiling_(w) {ceil(w)} #define myhuge_(w) {HUGE_VAL} //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);} -#define mymaxloc_(w,s,e,n) dmaxloc_(w,*(s),*(e),n) +#define mymaxloc_(w,s,e,n) smaxloc_(w,*(s),*(e),n) /* procedure parameter types for -A and -C++ */ diff --git a/lapack-netlib/SRC/spstf2.c b/lapack-netlib/SRC/spstf2.c index 7b2abce03..c9e01583a 100644 --- a/lapack-netlib/SRC/spstf2.c +++ b/lapack-netlib/SRC/spstf2.c @@ -256,7 +256,7 @@ static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n"; #define myceiling_(w) {ceil(w)} #define myhuge_(w) {HUGE_VAL} //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);} -#define mymaxloc_(w,s,e,n) dmaxloc_(w,*(s),*(e),n) +#define mymaxloc_(w,s,e,n) smaxloc_(w,*(s),*(e),n) /* procedure parameter types for -A and -C++ */ diff --git a/lapack-netlib/SRC/spstrf.c b/lapack-netlib/SRC/spstrf.c index 75e1d65b7..ebb4fe934 100644 --- a/lapack-netlib/SRC/spstrf.c +++ b/lapack-netlib/SRC/spstrf.c @@ -256,7 +256,7 @@ static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n"; #define myceiling_(w) {ceil(w)} #define myhuge_(w) {HUGE_VAL} //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);} -#define mymaxloc_(w,s,e,n) dmaxloc_(w,*(s),*(e),n) +#define mymaxloc_(w,s,e,n) smaxloc_(w,*(s),*(e),n) /* procedure parameter types for -A and -C++ */ From b613754143d68a2635f0232f29a04318f15e34d3 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 24 Jul 2024 14:31:29 +0200 Subject: [PATCH 32/52] Update scal..c --- kernel/arm/scal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/arm/scal.c b/kernel/arm/scal.c index 1f96f9b95..6a2c37631 100644 --- a/kernel/arm/scal.c +++ b/kernel/arm/scal.c @@ -43,7 +43,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( (n <= 0) || (inc_x <= 0)) return(0); - if (dummy2 == 0) + if (dummy2 == 0) { while(j < n) { From 88caf02f6286c7e84948b788c01bed2cb79ba86d Mon Sep 17 00:00:00 2001 From: yamazaki-mitsufumi Date: Thu, 25 Jul 2024 22:43:13 +0900 Subject: [PATCH 33/52] Fix ambiguous error on Mac OS --- kernel/arm64/gemv_n_sve.c | 4 ++-- kernel/arm64/gemv_t_sve.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/arm64/gemv_n_sve.c b/kernel/arm64/gemv_n_sve.c index d3aa57ae3..295055561 100644 --- a/kernel/arm64/gemv_n_sve.c +++ b/kernel/arm64/gemv_n_sve.c @@ -37,13 +37,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SV_COUNT svcntd #define SV_TYPE svfloat64_t #define SV_TRUE svptrue_b64 -#define SV_WHILE svwhilelt_b64 +#define SV_WHILE svwhilelt_b64_s64 #define SV_DUP svdup_f64 #else #define SV_COUNT svcntw #define SV_TYPE svfloat32_t #define SV_TRUE svptrue_b32 -#define SV_WHILE svwhilelt_b32 +#define SV_WHILE svwhilelt_b32_s64 #define SV_DUP svdup_f32 #endif diff --git a/kernel/arm64/gemv_t_sve.c b/kernel/arm64/gemv_t_sve.c index bff08b257..ab700a374 100644 --- a/kernel/arm64/gemv_t_sve.c +++ b/kernel/arm64/gemv_t_sve.c @@ -37,13 +37,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SV_COUNT svcntd #define SV_TYPE svfloat64_t #define SV_TRUE svptrue_b64 -#define SV_WHILE svwhilelt_b64 +#define SV_WHILE svwhilelt_b64_s64 #define SV_DUP svdup_f64 #else #define SV_COUNT svcntw #define SV_TYPE svfloat32_t #define SV_TRUE svptrue_b32 -#define SV_WHILE svwhilelt_b32 +#define SV_WHILE svwhilelt_b32_s64 #define SV_DUP svdup_f32 #endif From 24acdd6bbbd0f9726d867851f62e75ed4c35644a Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 26 Jul 2024 09:49:24 +0200 Subject: [PATCH 34/52] correct offset --- kernel/x86/scal.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/x86/scal.S b/kernel/x86/scal.S index 7e12a52ab..6620d3169 100644 --- a/kernel/x86/scal.S +++ b/kernel/x86/scal.S @@ -65,7 +65,7 @@ #else movl 32(%esp),%edi movl 36(%esp),%esi - movl 54(%esp),%ecx + movl 52(%esp),%ecx #endif ftst From a875304eb0da7cc7966cf45317cf57392192430f Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 26 Jul 2024 09:50:20 +0200 Subject: [PATCH 35/52] fix inverted conditional for NAN handling --- kernel/riscv64/scal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/riscv64/scal.c b/kernel/riscv64/scal.c index bebbed67e..bd53fcff9 100644 --- a/kernel/riscv64/scal.c +++ b/kernel/riscv64/scal.c @@ -43,7 +43,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS if ( (n <= 0) || (inc_x <= 0)) return(0); - if (dummy2 == 0) { + if (dummy2 == 1) { while(j < n) { From d9ae4609fb4c5623b30b06abcfef5f5e1ae30ab1 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 26 Jul 2024 11:15:33 +0200 Subject: [PATCH 36/52] remove C99 requirement --- utest/test_gemv.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/utest/test_gemv.c b/utest/test_gemv.c index c85ef3f38..d5b8d2c6c 100644 --- a/utest/test_gemv.c +++ b/utest/test_gemv.c @@ -12,6 +12,7 @@ CTEST(sgemv, 0_nan_inf) { + int i; blasint N = 17; blasint incX = 1; blasint incY = 1; @@ -24,19 +25,20 @@ CTEST(sgemv, 0_nan_inf) memset(A, 0, sizeof(A)); memset(X, 0, sizeof(X)); - for (int i = 0; i < (N - 1); i += 2) + for (i = 0; i < (N - 1); i += 2) { Y[i] = NAN; Y[i + 1] = INFINITY; } Y[N - 1] = NAN; BLASFUNC(sgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); - for (int i = 0; i < N; i ++) + for (i = 0; i < N; i ++) ASSERT_TRUE(Y[i] == 0.0); } CTEST(sgemv, 0_nan_inf_incy_2) { + int i; blasint N = 17; blasint Ny = 33; blasint incX = 1; @@ -52,7 +54,7 @@ CTEST(sgemv, 0_nan_inf_incy_2) memset(A, 0, sizeof(A)); memset(X, 0, sizeof(X)); memset(Y, 0, sizeof(Y)); - for (int i = 0; i < (N - 1); i += 2) + for (i = 0; i < (N - 1); i += 2) { ay[0] = NAN; ay += 2; @@ -61,7 +63,7 @@ CTEST(sgemv, 0_nan_inf_incy_2) } Y[Ny - 1] = NAN; BLASFUNC(sgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); - for (int i = 0; i < Ny; i ++) + for (i = 0; i < Ny; i ++) ASSERT_TRUE(Y[i] == 0.0); } @@ -70,6 +72,7 @@ CTEST(sgemv, 0_nan_inf_incy_2) #ifdef BUILD_DOUBLE CTEST(dgemv, 0_nan_inf) { + int i; blasint N = 17; blasint incX = 1; blasint incY = 1; @@ -82,19 +85,20 @@ CTEST(dgemv, 0_nan_inf) memset(A, 0, sizeof(A)); memset(X, 0, sizeof(X)); - for (int i = 0; i < (N - 1); i += 2) + for (i = 0; i < (N - 1); i += 2) { Y[i] = NAN; Y[i + 1] = INFINITY; } Y[N - 1] = NAN; BLASFUNC(dgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); - for (int i = 0; i < N; i ++) + for (i = 0; i < N; i ++) ASSERT_TRUE(Y[i] == 0.0); } CTEST(dgemv, 0_nan_inf_incy_2) { + int i; blasint N = 17; blasint Ny = 33; blasint incX = 1; @@ -110,7 +114,7 @@ CTEST(dgemv, 0_nan_inf_incy_2) memset(A, 0, sizeof(A)); memset(X, 0, sizeof(X)); memset(Y, 0, sizeof(Y)); - for (int i = 0; i < (N - 1); i += 2) + for (i = 0; i < (N - 1); i += 2) { ay[0] = NAN; ay += 2; @@ -119,7 +123,7 @@ CTEST(dgemv, 0_nan_inf_incy_2) } Y[Ny - 1] = NAN; BLASFUNC(dgemv)(&trans, &N, &N, &alpha, A, &N, X, &incX, &beta, Y, &incY); - for (int i = 0; i < Ny; i ++) + for (i = 0; i < Ny; i ++) ASSERT_TRUE(Y[i] == 0.0); } From db5328e85bb1a9e11781ab92ce15273c28dffec0 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 26 Jul 2024 12:45:39 +0200 Subject: [PATCH 37/52] make array dimensions constant --- utest/test_gemv.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/utest/test_gemv.c b/utest/test_gemv.c index d5b8d2c6c..deae6d2a8 100644 --- a/utest/test_gemv.c +++ b/utest/test_gemv.c @@ -7,13 +7,14 @@ #ifndef INFINITY #define INFINITY 1.0/0.0 #endif +#define N 17 +#define Ny 33 #ifdef BUILD_SINGLE CTEST(sgemv, 0_nan_inf) { int i; - blasint N = 17; blasint incX = 1; blasint incY = 1; float alpha = 0.0; @@ -39,8 +40,6 @@ CTEST(sgemv, 0_nan_inf) CTEST(sgemv, 0_nan_inf_incy_2) { int i; - blasint N = 17; - blasint Ny = 33; blasint incX = 1; blasint incY = 2; float alpha = 0.0; @@ -73,7 +72,6 @@ CTEST(sgemv, 0_nan_inf_incy_2) CTEST(dgemv, 0_nan_inf) { int i; - blasint N = 17; blasint incX = 1; blasint incY = 1; double alpha = 0.0; @@ -99,8 +97,6 @@ CTEST(dgemv, 0_nan_inf) CTEST(dgemv, 0_nan_inf_incy_2) { int i; - blasint N = 17; - blasint Ny = 33; blasint incX = 1; blasint incY = 2; double alpha = 0.0; From 700649286351e763ff1263867e962a20e60987fc Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 26 Jul 2024 12:49:57 +0200 Subject: [PATCH 38/52] replace "Preview" in the MSVC vcvarsall path with "Community" --- docs/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/install.md b/docs/install.md index 1b04165c7..ffb4659d8 100644 --- a/docs/install.md +++ b/docs/install.md @@ -245,7 +245,7 @@ newer installed. On Windows 11 with Visual Studio 2022, this would be done by invoking: ```shell - "c:\Program Files\Microsoft Visual Studio\2022\Preview\vc\Auxiliary\Build\vcvars64.bat" + "c:\Program Files\Microsoft Visual Studio\2022\Community\vc\Auxiliary\Build\vcvars64.bat" ``` With VS2019, the command should be the same (except for the year number of course). From a090011fbff0d15b63a7edcc0156ed790e87f4b1 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 26 Jul 2024 12:56:12 +0200 Subject: [PATCH 39/52] just use numeric constants in dimensions --- utest/test_gemv.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/utest/test_gemv.c b/utest/test_gemv.c index deae6d2a8..dab6d2f11 100644 --- a/utest/test_gemv.c +++ b/utest/test_gemv.c @@ -7,22 +7,21 @@ #ifndef INFINITY #define INFINITY 1.0/0.0 #endif -#define N 17 -#define Ny 33 #ifdef BUILD_SINGLE CTEST(sgemv, 0_nan_inf) { int i; + blasint N = 17; blasint incX = 1; blasint incY = 1; float alpha = 0.0; float beta = 0.0; char trans = 'N'; - float A[N * N]; - float X[N]; - float Y[N]; + float A[17 * 17]; + float X[17]; + float Y[17]; memset(A, 0, sizeof(A)); memset(X, 0, sizeof(X)); @@ -40,14 +39,16 @@ CTEST(sgemv, 0_nan_inf) CTEST(sgemv, 0_nan_inf_incy_2) { int i; + blasint N = 17; + blasint Ny = 33; blasint incX = 1; blasint incY = 2; float alpha = 0.0; float beta = 0.0; char trans = 'N'; - float A[N * N]; - float X[N]; - float Y[Ny]; + float A[17 * 17]; + float X[17]; + float Y[33]; float *ay = Y; memset(A, 0, sizeof(A)); @@ -72,14 +73,15 @@ CTEST(sgemv, 0_nan_inf_incy_2) CTEST(dgemv, 0_nan_inf) { int i; + blasint N = 17; blasint incX = 1; blasint incY = 1; double alpha = 0.0; double beta = 0.0; char trans = 'N'; - double A[N * N]; - double X[N]; - double Y[N]; + double A[17 * 17]; + double X[17]; + double Y[17]; memset(A, 0, sizeof(A)); memset(X, 0, sizeof(X)); @@ -97,14 +99,16 @@ CTEST(dgemv, 0_nan_inf) CTEST(dgemv, 0_nan_inf_incy_2) { int i; + blasint N = 17; + blasint Ny = 33; blasint incX = 1; blasint incY = 2; double alpha = 0.0; double beta = 0.0; char trans = 'N'; - double A[N * N]; - double X[N]; - double Y[Ny]; + double A[17 * 17]; + double X[17]; + double Y[33]; double *ay = Y; memset(A, 0, sizeof(A)); From 4460d3ee7f2324d2642350a9d1650776d4fac981 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 26 Jul 2024 15:07:52 +0200 Subject: [PATCH 40/52] re-enable the sgesdd benchmark --- benchmark/pybench/benchmarks/bench_blas.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/benchmark/pybench/benchmarks/bench_blas.py b/benchmark/pybench/benchmarks/bench_blas.py index 628c0cb2a..8127dd0c7 100644 --- a/benchmark/pybench/benchmarks/bench_blas.py +++ b/benchmark/pybench/benchmarks/bench_blas.py @@ -234,14 +234,10 @@ def test_gesdd(benchmark, mn, variant): gesdd = ow.get_func('gesdd', variant) u, s, vt, info = benchmark(run_gesdd, a, lwork, gesdd) - if variant != 's': - # On entry to SLASCL parameter number 4 had an illegal value - # under codspeed (cannot repro locally or on CI w/o codspeed) - # https://github.com/OpenMathLib/OpenBLAS/issues/4776 - assert info == 0 + assert info == 0 - atol = {'s': 1e-5, 'd': 1e-13} - np.testing.assert_allclose(u @ np.diag(s) @ vt, a, atol=atol[variant]) + atol = {'s': 1e-5, 'd': 1e-13} + np.testing.assert_allclose(u @ np.diag(s) @ vt, a, atol=atol[variant]) # linalg.eigh From 175008caf8c58a35688a08a6200872772da146e0 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Sat, 27 Jul 2024 19:08:02 +0200 Subject: [PATCH 41/52] harden against a dashed suffix to the gcc version number --- c_check | 3 +++ 1 file changed, 3 insertions(+) diff --git a/c_check b/c_check index 114eed60c..c2b52c81b 100755 --- a/c_check +++ b/c_check @@ -356,6 +356,9 @@ if [ "$compiler" = "GCC" ]; then no_avx2=0 oldgcc=0 data=`$compiler_name -dumpversion` + case "$data" in *-*) + data="${data%-*}" + esac case "$data" in *.*.*) data="${data%.*}" esac From 85ca003ae7174ac00857c9c75ddfd53589c4918e Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Mon, 29 Jul 2024 11:14:25 +0100 Subject: [PATCH 42/52] Add fallback compile options for A64FX target --- Makefile.arm64 | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Makefile.arm64 b/Makefile.arm64 index 6ba63e94e..2c68d215f 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -276,12 +276,19 @@ endif endif endif -ifeq (1, $(filter 1,$(GCCVERSIONGTEQ11) $(ISCLANG))) ifeq ($(CORE), A64FX) +ifeq (1, $(filter 1,$(GCCVERSIONGTEQ10) $(ISCLANG))) +ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ3))) CCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx ifneq ($(F_COMPILER), NAG) FCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx endif +else +CCOMMON_OPT += -march=armv8.4-a+sve -mtune=neoverse-n1 +ifneq ($(F_COMPILER), NAG) +FCOMMON_OPT += -march=armv8.4-a -mtune=neoverse-n1 +endif +endif endif endif From 3ed226d3f8a724b163a69986a679e329b2fce056 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Mon, 29 Jul 2024 11:32:59 +0100 Subject: [PATCH 43/52] Re-add ISCLANG filter --- Makefile.arm64 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.arm64 b/Makefile.arm64 index 2c68d215f..d3ea7522d 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -278,7 +278,7 @@ endif ifeq ($(CORE), A64FX) ifeq (1, $(filter 1,$(GCCVERSIONGTEQ10) $(ISCLANG))) -ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ3))) +ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ3) $(ISCLANG))) CCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx ifneq ($(F_COMPILER), NAG) FCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx From 54ce33e85110911ab56f1b67c681e84cfe9b07fc Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Mon, 29 Jul 2024 15:28:59 +0100 Subject: [PATCH 44/52] Fix GCC11 check for A64FX target --- Makefile.arm64 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.arm64 b/Makefile.arm64 index d3ea7522d..fccc0d0d0 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -278,7 +278,7 @@ endif ifeq ($(CORE), A64FX) ifeq (1, $(filter 1,$(GCCVERSIONGTEQ10) $(ISCLANG))) -ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ3) $(ISCLANG))) +ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ3) $(GCCVERSIONGTEQ11) $(ISCLANG))) CCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx ifneq ($(F_COMPILER), NAG) FCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx From a13015b65689b71136fc71c0de8aac9f78be4b85 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Tue, 30 Jul 2024 19:10:18 +0200 Subject: [PATCH 45/52] try requesting ubuntu22 instead of latest --- Jenkinsfile.pwr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile.pwr b/Jenkinsfile.pwr index 96e18b8ad..ea27ef98f 100644 --- a/Jenkinsfile.pwr +++ b/Jenkinsfile.pwr @@ -1,7 +1,7 @@ pipeline { agent { docker { - image 'osuosl/ubuntu-ppc64le' + image 'osuosl/ubuntu-ppc64le:22.04' } } stages { From 86c15f028b37bc4285f0235c635b62aa639a6c82 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Tue, 30 Jul 2024 21:21:34 +0200 Subject: [PATCH 46/52] Update Jenkinsfile.pwr --- Jenkinsfile.pwr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile.pwr b/Jenkinsfile.pwr index ea27ef98f..b2f8ce2e5 100644 --- a/Jenkinsfile.pwr +++ b/Jenkinsfile.pwr @@ -1,7 +1,7 @@ pipeline { agent { docker { - image 'osuosl/ubuntu-ppc64le:22.04' + image 'osuosl/ubuntu-ppc64le:18.04' } } stages { From 3db5dbc88e62ff6676202eb80645fe41e901f08d Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Mon, 20 May 2024 22:40:04 +0200 Subject: [PATCH 47/52] forward to GEMV when one argument is actually a vector --- interface/gemm.c | 49 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/interface/gemm.c b/interface/gemm.c index 4537b6a78..c3118672f 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -47,22 +47,29 @@ #define SMP_THRESHOLD_MIN 65536.0 #ifdef XDOUBLE #define ERROR_NAME "QGEMM " +#define GEMV BLASFUNC(qgemv) #elif defined(DOUBLE) #define ERROR_NAME "DGEMM " +#define GEMV BLASFUNC(dgemv) #elif defined(BFLOAT16) #define ERROR_NAME "SBGEMM " +#define GEMV BLASFUNC(sbgemv) #else #define ERROR_NAME "SGEMM " +#define GEMV BLASFUNC(sgemv) #endif #else #define SMP_THRESHOLD_MIN 8192.0 #ifndef GEMM3M #ifdef XDOUBLE #define ERROR_NAME "XGEMM " +#define GEMV BLASFUNC(xgemv) #elif defined(DOUBLE) #define ERROR_NAME "ZGEMM " +#define GEMV BLASFUNC(zgemv) #else #define ERROR_NAME "CGEMM " +#define GEMV BLASFUNC(cgemv) #endif #else #ifdef XDOUBLE @@ -485,9 +492,38 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS } #endif #endif // defined(__linux__) && defined(__x86_64__) && defined(BFLOAT16) + // fprintf(stderr,"G E M M interface m n k %d %d %d\n",args.m,args.n,args.k); if ((args.m == 0) || (args.n == 0)) return; +#if 1 +#ifndef GEMM3M + if (args.m == 1) { + char *NT=(char*)malloc(2*sizeof(char)); + if (transb&1)strcpy(NT,"T"); + else NT="N"; +// fprintf(stderr,"G E M V\n"); + GEMV(NT, &args.n ,&args.k, args.alpha, args.b, &args.ldb, args.a, &args.m, args.beta, args.c, &args.m); +//SUBROUTINE SGEMV(TRANS,M,N,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) +return; + } else { + if (args.n == 1) { +#ifndef CBLAS + char *NT=(char*)malloc(2*sizeof(char)); + strcpy(NT,"N"); +#else + char *NT=(char*)malloc(2*sizeof(char)); + if (transb&1)strcpy(NT,"T"); + else strcpy(NT,"N"); +#endif +// fprintf(stderr,"G E M V ! ! ! lda=%d ldb=%d ldc=%d\n",args.lda,args.ldb,args.ldc); + GEMV(NT, &args.m ,&args.k, args.alpha, args.a, &args.lda, args.b, &args.n, args.beta, args.c, &args.n); +//SUBROUTINE SGEMV(TRANS,M,N,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) + return; + } + } +#endif +#endif #if 0 fprintf(stderr, "m = %4d n = %d k = %d lda = %4d ldb = %4d ldc = %4d\n", args.m, args.n, args.k, args.lda, args.ldb, args.ldc); @@ -521,10 +557,15 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS buffer = (XFLOAT *)blas_memory_alloc(0); -//For target LOONGSON3R5, applying an offset to the buffer is essential -//for minimizing cache conflicts and optimizing performance. -#if defined(ARCH_LOONGARCH64) && !defined(NO_AFFINITY) - sa = (XFLOAT *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); +//For Loongson servers, like the 3C5000 (featuring 16 cores), applying an +//offset to the buffer is essential for minimizing cache conflicts and optimizing performance. +#if defined(LOONGSON3R5) && !defined(NO_AFFINITY) + char model_name[128]; + get_cpu_model(model_name); + if ((strstr(model_name, "3C5000") != NULL) || (strstr(model_name, "3D5000") != NULL)) + sa = (XFLOAT *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); + else + sa = (XFLOAT *)((BLASLONG)buffer + GEMM_OFFSET_A); #else sa = (XFLOAT *)((BLASLONG)buffer +GEMM_OFFSET_A); #endif From 28b5334f22dc972613493bb8cd207657c002e240 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Tue, 23 Jul 2024 20:42:39 +0000 Subject: [PATCH 48/52] Complete implementation of GEMV forwarding --- interface/gemm.c | 89 +++++++++++++++++++++++++----------------------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/interface/gemm.c b/interface/gemm.c index c3118672f..c8a20d119 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -1,4 +1,5 @@ /*********************************************************************/ +/* Copyright 2024 The OpenBLAS Project */ /* Copyright 2009, 2010 The University of Texas at Austin. */ /* All rights reserved. */ /* */ @@ -63,13 +64,10 @@ #ifndef GEMM3M #ifdef XDOUBLE #define ERROR_NAME "XGEMM " -#define GEMV BLASFUNC(xgemv) #elif defined(DOUBLE) #define ERROR_NAME "ZGEMM " -#define GEMV BLASFUNC(zgemv) #else #define ERROR_NAME "CGEMM " -#define GEMV BLASFUNC(cgemv) #endif #else #ifdef XDOUBLE @@ -492,42 +490,54 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS } #endif #endif // defined(__linux__) && defined(__x86_64__) && defined(BFLOAT16) - // fprintf(stderr,"G E M M interface m n k %d %d %d\n",args.m,args.n,args.k); if ((args.m == 0) || (args.n == 0)) return; -#if 1 -#ifndef GEMM3M - if (args.m == 1) { - char *NT=(char*)malloc(2*sizeof(char)); - if (transb&1)strcpy(NT,"T"); - else NT="N"; -// fprintf(stderr,"G E M V\n"); - GEMV(NT, &args.n ,&args.k, args.alpha, args.b, &args.ldb, args.a, &args.m, args.beta, args.c, &args.m); -//SUBROUTINE SGEMV(TRANS,M,N,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) -return; - } else { - if (args.n == 1) { -#ifndef CBLAS - char *NT=(char*)malloc(2*sizeof(char)); - strcpy(NT,"N"); -#else - char *NT=(char*)malloc(2*sizeof(char)); - if (transb&1)strcpy(NT,"T"); - else strcpy(NT,"N"); -#endif -// fprintf(stderr,"G E M V ! ! ! lda=%d ldb=%d ldc=%d\n",args.lda,args.ldb,args.ldc); - GEMV(NT, &args.m ,&args.k, args.alpha, args.a, &args.lda, args.b, &args.n, args.beta, args.c, &args.n); -//SUBROUTINE SGEMV(TRANS,M,N,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) - return; +#if !defined(GEMM3M) && !defined(COMPLEX) + // Check if we can convert GEMM -> GEMV + if (args.k != 0) { + if (args.n == 1) { + blasint inc_x = 1; + blasint inc_y = 1; + // These were passed in as blasint, but the struct translates them to blaslong + blasint m = args.m; + blasint n = args.k; + blasint lda = args.lda; + // Create new transpose parameters + char NT = 'N'; + if (transa & 1) { + NT = 'T'; + m = args.k; + n = args.m; + } + if (transb & 1) { + inc_x = args.ldb; + } + GEMV(&NT, &m, &n, args.alpha, args.a, &lda, args.b, &inc_x, args.beta, args.c, &inc_y); + return; + } + if (args.m == 1) { + blasint inc_x = args.lda; + blasint inc_y = args.ldc; + // These were passed in as blasint, but the struct translates them to blaslong + blasint m = args.k; + blasint n = args.n; + blasint ldb = args.ldb; + // Create new transpose parameters + char NT = 'T'; + if (transa & 1) { + inc_x = 1; + } + if (transb & 1) { + NT = 'N'; + m = args.n; + n = args.k; + } + GEMV(&NT, &m, &n, args.alpha, args.b, &ldb, args.a, &inc_x, args.beta, args.c, &inc_y); + return; } } #endif -#endif -#if 0 - fprintf(stderr, "m = %4d n = %d k = %d lda = %4d ldb = %4d ldc = %4d\n", - args.m, args.n, args.k, args.lda, args.ldb, args.ldc); -#endif IDEBUG_START; @@ -557,15 +567,10 @@ return; buffer = (XFLOAT *)blas_memory_alloc(0); -//For Loongson servers, like the 3C5000 (featuring 16 cores), applying an -//offset to the buffer is essential for minimizing cache conflicts and optimizing performance. -#if defined(LOONGSON3R5) && !defined(NO_AFFINITY) - char model_name[128]; - get_cpu_model(model_name); - if ((strstr(model_name, "3C5000") != NULL) || (strstr(model_name, "3D5000") != NULL)) - sa = (XFLOAT *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); - else - sa = (XFLOAT *)((BLASLONG)buffer + GEMM_OFFSET_A); +//For target LOONGSON3R5, applying an offset to the buffer is essential +//for minimizing cache conflicts and optimizing performance. +#if defined(ARCH_LOONGARCH64) && !defined(NO_AFFINITY) + sa = (XFLOAT *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A); #else sa = (XFLOAT *)((BLASLONG)buffer +GEMM_OFFSET_A); #endif From 90eb863d4b3d836b5b4a42202fcca84283d9ea4a Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Tue, 23 Jul 2024 23:39:07 +0100 Subject: [PATCH 49/52] Re-add accidental removal --- interface/gemm.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/interface/gemm.c b/interface/gemm.c index c8a20d119..7004622a8 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -493,6 +493,11 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS if ((args.m == 0) || (args.n == 0)) return; +#if 0 + fprintf(stderr, "m = %4d n = %d k = %d lda = %4d ldb = %4d ldc = %4d\n", + args.m, args.n, args.k, args.lda, args.ldb, args.ldc); +#endif + #if !defined(GEMM3M) && !defined(COMPLEX) // Check if we can convert GEMM -> GEMV if (args.k != 0) { From b26424c6a20f02c3d7269a021c9b648f1fe04be3 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Wed, 24 Jul 2024 11:25:28 +0100 Subject: [PATCH 50/52] Allow opt into GEMM -> GEMV forwarding --- Makefile.system | 9 +++++++++ cmake/system.cmake | 7 +++++++ interface/gemm.c | 2 +- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/Makefile.system b/Makefile.system index 9c42dc115..436245c35 100644 --- a/Makefile.system +++ b/Makefile.system @@ -274,9 +274,18 @@ endif ifeq ($(ARCH), loongarch64) SMALL_MATRIX_OPT = 1 endif +ifeq ($(ARCH), arm64) +GEMM_GEMV_FORWARD = 1 +endif + ifeq ($(SMALL_MATRIX_OPT), 1) CCOMMON_OPT += -DSMALL_MATRIX_OPT endif +ifeq ($(GEMM_GEMV_FORWARD), 1) +ifneq ($(ONLY_CBLAS), 1) +CCOMMON_OPT += -DGEMM_GEMV_FORWARD +endif +endif # This operation is expensive, so execution should be once. ifndef GOTOBLAS_MAKEFILE diff --git a/cmake/system.cmake b/cmake/system.cmake index b682c3af8..efb7aef94 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -391,6 +391,13 @@ endif () if (X86_64 OR ${CORE} STREQUAL POWER10) set(SMALL_MATRIX_OPT TRUE) endif () +if (ARM64) + set(GEMM_GEMV_FORWARD TRUE) +endif () + +if (GEMM_GEMV_FORWARD AND NOT ONLY_CBLAS) + set(CCOMMON_OPT "${CCOMMON_OPT} -DGEMM_GEMV_FORWARD") +endif () if (SMALL_MATRIX_OPT) set(CCOMMON_OPT "${CCOMMON_OPT} -DSMALL_MATRIX_OPT") endif () diff --git a/interface/gemm.c b/interface/gemm.c index 7004622a8..ac58cf27f 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -498,7 +498,7 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS args.m, args.n, args.k, args.lda, args.ldb, args.ldc); #endif -#if !defined(GEMM3M) && !defined(COMPLEX) +#if defined(GEMM_GEMV_FORWARD) && !defined(GEMM3M) && !defined(COMPLEX) // Check if we can convert GEMM -> GEMV if (args.k != 0) { if (args.n == 1) { From ba2e989c67f9d2b3403c6fbaebc05b504e601fab Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Wed, 31 Jul 2024 13:07:35 +0100 Subject: [PATCH 51/52] Add accumulators to AArch64 GEMV Kernels This helps to reduce values going missing as we accumulate. --- kernel/arm64/KERNEL.NEOVERSEV1 | 3 ++ kernel/arm64/gemv_t.S | 67 +++++++++++++++++++--------------- kernel/arm64/gemv_t_sve.c | 40 ++++++++++++++++---- 3 files changed, 74 insertions(+), 36 deletions(-) diff --git a/kernel/arm64/KERNEL.NEOVERSEV1 b/kernel/arm64/KERNEL.NEOVERSEV1 index bc5999097..53d157a0a 100644 --- a/kernel/arm64/KERNEL.NEOVERSEV1 +++ b/kernel/arm64/KERNEL.NEOVERSEV1 @@ -1 +1,4 @@ include $(KERNELDIR)/KERNEL.ARMV8SVE + +SGEMVTKERNEL = gemv_t_sve.c +DGEMVTKERNEL = gemv_t_sve.c diff --git a/kernel/arm64/gemv_t.S b/kernel/arm64/gemv_t.S index b04367ab3..a98eef49b 100644 --- a/kernel/arm64/gemv_t.S +++ b/kernel/arm64/gemv_t.S @@ -1,5 +1,5 @@ /******************************************************************************* -Copyright (c) 2015, The OpenBLAS Project +Copyright (c) 2015, 2024 The OpenBLAS Project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -170,39 +170,48 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F32_FINALIZE #if !defined(DOUBLE) - fadd v1.4s, v1.4s, v2.4s + // F8 only has 2 accumulators + // so add into those pairs fadd v1.4s, v1.4s, v3.4s - fadd v1.4s, v1.4s, v4.4s -#else - fadd v1.2d, v1.2d, v2.2d - fadd v1.2d, v1.2d, v3.2d - fadd v1.2d, v1.2d, v4.2d + fadd v2.4s, v2.4s, v4.4s #endif .endm -.macro KERNEL_F4 +.macro KERNEL_F8 #if !defined(DOUBLE) - ld1 {v2.4s}, [A_PTR], #16 - ld1 {v3.4s}, [X_PTR], #16 - fmla v1.4s, v2.4s, v3.4s -#else - ld1 {v2.2d}, [A_PTR], #16 - ld1 {v3.2d}, [X_PTR], #16 - fmla v1.2d, v2.2d, v3.2d - - ld1 {v4.2d}, [A_PTR], #16 - ld1 {v5.2d}, [X_PTR], #16 - fmla v1.2d, v4.2d, v5.2d + ld1 {v13.4s, v14.4s}, [A_PTR], #32 + ld1 {v17.4s, v18.4s}, [X_PTR], #32 + fmla v1.4s, v13.4s, v17.4s + fmla v2.4s, v14.4s, v18.4s +#else + ld1 {v13.2d, v14.2d, v15.2d, v16.2d}, [A_PTR], #64 + ld1 {v17.2d, v18.2d, v19.2d, v20.2d}, [X_PTR], #64 + fmla v1.2d, v13.2d, v17.2d + fmla v2.2d, v14.2d, v18.2d + fmla v3.2d, v15.2d, v19.2d + fmla v4.2d, v16.2d, v20.2d #endif .endm -.macro KERNEL_F4_FINALIZE +.macro KERNEL_F8_FINALIZE #if !defined(DOUBLE) - ext v2.16b, v1.16b, v1.16b, #8 + // Take the top two elements of v1 and + // put them into the first two lanes of v3 + ext v3.16b, v1.16b, v1.16b, #8 + fadd v1.2s, v1.2s, v3.2s + ext v4.16b, v2.16b, v2.16b, #8 + fadd v2.2s, v2.2s, v4.2s + // Final pair fadd v1.2s, v1.2s, v2.2s faddp TEMP, v1.2s #else faddp TEMP, v1.2d + faddp TEMP1, v2.2d + faddp TEMP2, v3.2d + faddp TEMP3, v4.2d + fadd TEMP, TEMP, TEMP1 + fadd TEMP2, TEMP2, TEMP3 + fadd TEMP, TEMP, TEMP2 #endif .endm @@ -258,7 +267,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. asr I, M, #5 cmp I, xzr - beq .Lgemv_t_kernel_F4 + beq .Lgemv_t_kernel_F8 .Lgemv_t_kernel_F320: @@ -269,24 +278,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. KERNEL_F32_FINALIZE -.Lgemv_t_kernel_F4: +.Lgemv_t_kernel_F8: ands I, M, #31 - asr I, I, #2 + asr I, I, #3 cmp I, xzr beq .Lgemv_t_kernel_F1 -.Lgemv_t_kernel_F40: +.Lgemv_t_kernel_F80: - KERNEL_F4 + KERNEL_F8 subs I, I, #1 - bne .Lgemv_t_kernel_F40 + bne .Lgemv_t_kernel_F80 .Lgemv_t_kernel_F1: - KERNEL_F4_FINALIZE + KERNEL_F8_FINALIZE - ands I, M, #3 + ands I, M, #7 ble .Lgemv_t_kernel_F_END .Lgemv_t_kernel_F10: diff --git a/kernel/arm64/gemv_t_sve.c b/kernel/arm64/gemv_t_sve.c index ab700a374..183d9c3d1 100644 --- a/kernel/arm64/gemv_t_sve.c +++ b/kernel/arm64/gemv_t_sve.c @@ -59,20 +59,46 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO a_ptr = a; if (inc_x == 1) { + svbool_t pg_true = SV_TRUE(); uint64_t sve_size = SV_COUNT(); + uint64_t sve_size2 = sve_size * 2; + BLASLONG m1 = m & -sve_size; + BLASLONG m2 = m & -sve_size2; + for (j = 0; j < n; j++) { + BLASLONG i = 0; + + SV_TYPE temp_vec_v2_0 = SV_DUP(0.0); + SV_TYPE temp_vec_v2_1 = SV_DUP(0.0); + for (; i < m2; i += sve_size2) { + SV_TYPE a_vec0 = svld1(pg_true, a_ptr + i); + SV_TYPE x_vec0 = svld1(pg_true, x + i); + SV_TYPE a_vec1 = svld1(pg_true, a_ptr + i + sve_size); + SV_TYPE x_vec1 = svld1(pg_true, x + i + sve_size); + temp_vec_v2_0 = svmla_m(pg_true, temp_vec_v2_0, a_vec0, x_vec0); + temp_vec_v2_1 = svmla_m(pg_true, temp_vec_v2_1, a_vec1, x_vec1); + } + + SV_TYPE temp_vec_v1 = SV_DUP(0.0); + for (; i < m1; i += sve_size) { + SV_TYPE a_vec0 = svld1(pg_true, a_ptr + i); + SV_TYPE x_vec0 = svld1(pg_true, x + i); + temp_vec_v1 = svmla_m(pg_true, temp_vec_v1, a_vec0, x_vec0); + } + SV_TYPE temp_vec = SV_DUP(0.0); - i = 0; - svbool_t pg = SV_WHILE(i, m); - while (svptest_any(SV_TRUE(), pg)) { + for (; i < m; i += sve_size) { + svbool_t pg = SV_WHILE(i, m); SV_TYPE a_vec = svld1(pg, a_ptr + i); SV_TYPE x_vec = svld1(pg, x + i); temp_vec = svmla_m(pg, temp_vec, a_vec, x_vec); - i += sve_size; - pg = SV_WHILE(i, m); } - temp = svaddv(SV_TRUE(), temp_vec); - y[iy] += alpha * temp; + + y[iy] += alpha * ( + (svaddv(SV_TRUE(), temp_vec_v2_0) + svaddv(SV_TRUE(), temp_vec)) + + (svaddv(SV_TRUE(), temp_vec_v2_1) + svaddv(SV_TRUE(), temp_vec_v1)) + ); + iy += inc_y; a_ptr += lda; } From edbf093c98930b0b2144da638309b94a1a2bb5d0 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Wed, 31 Jul 2024 19:45:15 +0200 Subject: [PATCH 52/52] Update zarch SCAL kernels to handle INF and NAN arguments (#4829) * handle INF and NAN in input (for S/D only if DUMMY2 argument is set) --- kernel/zarch/cscal.c | 60 ++++++++++++++++++++++++++--------- kernel/zarch/dscal.c | 44 +++++++++++++++----------- kernel/zarch/sscal.c | 75 ++++++++++++++++++++++++++++---------------- kernel/zarch/zscal.c | 43 ++++++++++++++++++------- 4 files changed, 152 insertions(+), 70 deletions(-) diff --git a/kernel/zarch/cscal.c b/kernel/zarch/cscal.c index 57bb89c0a..e623f306b 100644 --- a/kernel/zarch/cscal.c +++ b/kernel/zarch/cscal.c @@ -234,12 +234,23 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, } else { while (j < n1) { - - temp0 = -da_i * x[i + 1]; - x[i + 1] = da_i * x[i]; + if (isnan(x[i]) || isinf(x[i])) + temp0 = NAN; + else + temp0 = -da_i * x[i + 1]; + if (!isinf(x[i + 1])) + x[i + 1] = da_i * x[i]; + else + x[i + 1] = NAN; x[i] = temp0; - temp1 = -da_i * x[i + 1 + inc_x]; - x[i + 1 + inc_x] = da_i * x[i + inc_x]; + if (isnan(x[i+inc_x]) || isinf(x[i+inc_x])) + temp1 = NAN; + else + temp1 = -da_i * x[i + 1 + inc_x]; + if (!isinf(x[i + 1 + inc_x])) + x[i + 1 + inc_x] = da_i * x[i + inc_x]; + else + x[i + 1 + inc_x] = NAN; x[i + inc_x] = temp1; i += 2 * inc_x; j += 2; @@ -247,9 +258,14 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, } while (j < n) { - - temp0 = -da_i * x[i + 1]; - x[i + 1] = da_i * x[i]; + if (isnan(x[i]) || isinf(x[i])) + temp0 = NAN; + else + temp0 = -da_i * x[i + 1]; + if (isinf(x[i + 1])) + x[i + 1] = NAN; + else + x[i + 1] = da_i * x[i]; x[i] = temp0; i += inc_x; j++; @@ -332,26 +348,42 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, j = n1; } - if (da_r == 0.0) { + if (da_r == 0.0 || isnan(da_r)) { if (da_i == 0.0) { - + float res = 0.0; + if (isnan(da_r)) res = da_r; while (j < n) { - x[i] = 0.0; - x[i + 1] = 0.0; + x[i] = res; + x[i + 1] = res; i += 2; j++; } + } else if (isinf(da_r)) { + while(j < n) + { + + x[i]= NAN; + x[i+1] = da_r; + i += 2 ; + j++; + + } } else { while (j < n) { temp0 = -da_i * x[i + 1]; - x[i + 1] = da_i * x[i]; - x[i] = temp0; + if (isinf(x[i])) temp0 = NAN; + if (!isinf(x[i + 1])) + x[i + 1] = da_i * x[i]; + else + x[i + 1] = NAN; + if (x[i] == x[i]) + x[i] = temp0; i += 2; j++; diff --git a/kernel/zarch/dscal.c b/kernel/zarch/dscal.c index a5a5e3468..14695602d 100644 --- a/kernel/zarch/dscal.c +++ b/kernel/zarch/dscal.c @@ -96,20 +96,28 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, if (inc_x == 1) { if (da == 0.0) { + + if (dummy2 == 0) { + BLASLONG n1 = n & -16; + if (n1 > 0) { + dscal_kernel_16_zero(n1, x); + j = n1; + } - BLASLONG n1 = n & -16; - if (n1 > 0) { - - dscal_kernel_16_zero(n1, x); - j = n1; + while (j < n) { + x[j] = 0.0; + j++; + } + } else { + while (j < n) { + if (isfinite(x[j])) + x[j] = 0.0; + else + x[j] = NAN; + j++; + } } - - while (j < n) { - - x[j] = 0.0; - j++; - } - + } else { BLASLONG n1 = n & -16; @@ -127,11 +135,9 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, } else { if (da == 0.0) { - + if (dummy2 == 0) { BLASLONG n1 = n & -4; - while (j < n1) { - x[i] = 0.0; x[i + inc_x] = 0.0; x[i + 2 * inc_x] = 0.0; @@ -139,11 +145,13 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, i += inc_x * 4; j += 4; - } + } while (j < n) { - - x[i] = 0.0; + if (dummy2==0 || isfinite(x[i])) + x[i] = 0.0; + else + x[i] = NAN; i += inc_x; j++; } diff --git a/kernel/zarch/sscal.c b/kernel/zarch/sscal.c index da2f49eaf..677727515 100644 --- a/kernel/zarch/sscal.c +++ b/kernel/zarch/sscal.c @@ -95,21 +95,31 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, if (inc_x == 1) { - if (da == 0.0) { + if (da == 0.0 || !isfinite(da)) { + if (dummy2 == 0) { + BLASLONG n1 = n & -32; + if (n1 > 0) { - BLASLONG n1 = n & -32; - if (n1 > 0) { + sscal_kernel_32_zero(n1, x); + j = n1; + } - sscal_kernel_32_zero(n1, x); - j = n1; + while (j < n) { + + x[j] = 0.0; + j++; + } + } else { + float res = 0.0; + if (!isfinite(da)) res = NAN; + while (j < n) { + if (isfinite(x[i])) + x[j] = res; + else + x[j] = NAN; + j++; + } } - - while (j < n) { - - x[j] = 0.0; - j++; - } - } else { BLASLONG n1 = n & -32; @@ -126,26 +136,37 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, } else { - if (da == 0.0) { + if (da == 0.0 || !isfinite(da)) { + if (dummy2 == 0) { + BLASLONG n1 = n & -2; - BLASLONG n1 = n & -2; + while (j < n1) { - while (j < n1) { + x[i] = 0.0; + x[i + inc_x] = 0.0; - x[i] = 0.0; - x[i + inc_x] = 0.0; + i += inc_x * 2; + j += 2; - i += inc_x * 2; - j += 2; - - } - while (j < n) { - - x[i] = 0.0; - i += inc_x; - j++; - } + } + while (j < n) { + x[i] = 0.0; + i += inc_x; + j++; + } + } else { + while (j < n) { + float res = 0.0; + if (!isfinite(da)) res = NAN; + if (isfinite(x[i])) + x[i] = res; + else + x[i] = NAN; + i += inc_x; + j++; + } + } } else { BLASLONG n1 = n & -2; diff --git a/kernel/zarch/zscal.c b/kernel/zarch/zscal.c index 4160a1a76..36466a6e0 100644 --- a/kernel/zarch/zscal.c +++ b/kernel/zarch/zscal.c @@ -237,13 +237,19 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, temp0 = NAN; else temp0 = -da_i * x[i + 1]; - x[i + 1] = da_i * x[i]; + if (!isinf(x[i + 1])) + x[i + 1] = da_i * x[i]; + else + x[i + 1] = NAN; x[i] = temp0; if (isnan(x[i + inc_x]) || isinf(x[i + inc_x])) temp1 = NAN; else temp1 = -da_i * x[i + 1 + inc_x]; - x[i + 1 + inc_x] = da_i * x[i + inc_x]; + if (!isinf(x[i + 1 + inc_x])) + x[i + 1 + inc_x] = da_i * x[i + inc_x]; + else + x[i + 1 + inc_x] = NAN; x[i + inc_x] = temp1; i += 2 * inc_x; j += 2; @@ -256,7 +262,10 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, temp0 = NAN; else temp0 = -da_i * x[i + 1]; - x[i + 1] = da_i * x[i]; + if (!isinf(x[i +1])) + x[i + 1] = da_i * x[i]; + else + x[i + 1] = NAN; x[i] = temp0; i += inc_x; j++; @@ -330,7 +339,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, zscal_kernel_8_zero(n1, x); else zscal_kernel_8(n1, da_r, da_i, x); - else if (da_i == 0) + else if (da_i == 0 && da_r == da_r) zscal_kernel_8_zero_i(n1, alpha, x); else zscal_kernel_8(n1, da_r, da_i, x); @@ -339,29 +348,41 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, j = n1; } - if (da_r == 0.0) { + if (da_r == 0.0 || isnan(da_r)) { if (da_i == 0.0) { - + double res= 0.0; + if (isnan(da_r)) res = da_r; while (j < n) { - x[i] = 0.0; - x[i + 1] = 0.0; + x[i] = res; + x[i + 1] = res; i += 2; j++; } + } else if (isinf(da_r)) { + while (j < n) { + x[i] = NAN; + x[i + 1] = da_r; + i += 2; + j++; + } } else { while (j < n) { - if (isnan(x[i]) || isinf(x[i])) + if (isinf(x[i])) temp0 = NAN; else temp0 = -da_i * x[i + 1]; - x[i + 1] = da_i * x[i]; - x[i] = temp0; + if (!isinf(x[i + 1])) + x[i + 1] = da_i * x[i]; + else + x[i + 1] = NAN; + if (x[i]==x[i]) + x[i] = temp0; i += 2; j++;