From 19fdbee291bfcfbf9319df69bec4597acdb41009 Mon Sep 17 00:00:00 2001 From: Ashwin Sekhar T K Date: Mon, 2 Nov 2015 17:45:24 +0530 Subject: [PATCH] Improve the sgemm kernel for CORTEXA57 --- kernel/arm64/KERNEL.CORTEXA57 | 8 + kernel/arm64/sgemm_kernel_4x4.S | 1516 ++++++++++++++++++------------- 2 files changed, 884 insertions(+), 640 deletions(-) diff --git a/kernel/arm64/KERNEL.CORTEXA57 b/kernel/arm64/KERNEL.CORTEXA57 index 94dfd9d4b..77a1f609c 100644 --- a/kernel/arm64/KERNEL.CORTEXA57 +++ b/kernel/arm64/KERNEL.CORTEXA57 @@ -60,3 +60,11 @@ DGEMVTKERNEL = gemv_t.S CGEMVTKERNEL = zgemv_t.S ZGEMVTKERNEL = zgemv_t.S +STRMMKERNEL = ../generic/trmmkernel_4x4.c + +SGEMMKERNEL = sgemm_kernel_4x4.S +SGEMMONCOPY = ../generic/gemm_ncopy_4.c +SGEMMOTCOPY = ../generic/gemm_tcopy_4.c +SGEMMONCOPYOBJ = sgemm_oncopy.o +SGEMMOTCOPYOBJ = sgemm_otcopy.o + diff --git a/kernel/arm64/sgemm_kernel_4x4.S b/kernel/arm64/sgemm_kernel_4x4.S index 78633297f..bfa80d589 100644 --- a/kernel/arm64/sgemm_kernel_4x4.S +++ b/kernel/arm64/sgemm_kernel_4x4.S @@ -1,5 +1,5 @@ -/*************************************************************************** -Copyright (c) 2013, The OpenBLAS Project +/******************************************************************************* +Copyright (c) 2015, The OpenBLAS Project All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -23,57 +23,43 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*****************************************************************************/ - -/************************************************************************************** -* 2013/11/23 Saar -* BLASTEST : OK -* CTEST : OK -* TEST : OK -* -* -* 2013/11/02 Saar -* UNROLL_N 4 -* UNROLL_M 4 -* DGEMM_P 128 -* DGEMM_Q 240 -* DGEMM_R 12288 -* A_PRE 128 -* B_PRE 128 -* C_PRE 32 -* -* Performance on Odroid U2: -* -* 3072x3072 1 Core: 2.62 GFLOPS ATLAS: 2.69 GFLOPS -* 3072x3072 2 Cores: 5.23 GFLOPS ATLAS: 5.27 GFLOPS -* 3072x3072 3 Cores: 7.78 GFLOPS ATLAS: 7.87 GFLOPS -* 3072x3072 4 Cores: 10.10 GFLOPS ATLAS: 9.98 GFLOPS -**************************************************************************************/ +*******************************************************************************/ #define ASSEMBLER #include "common.h" -/* X0 X1 X2 s0 X3 x4 x5 x6*/ -/*int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc*/ +/* X0 X1 X2 s0 X3 x4 x5 x6 */ +/*int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha0,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc */ + +#define origM x0 +#define origN x1 +#define origK x2 +#define origPA x3 +#define origPB x4 +#define pC x5 +#define LDC x6 +#define temp x7 +#define counterL x8 +#define counterI x9 +#define counterJ x10 +#define pB x11 +#define pCRow0 x12 +#define pCRow1 x13 +#define pCRow2 x14 +#define pA_0 x15 +#define pA_1 x16 +#define pA_2 x17 +#define pA_3 x18 -#define origM x0 -#define origN x1 -#define origK x2 -#define origPA x3 -#define origPB x4 -#define pC x5 -#define LDC x6 -#define offset x7 -#define counterL x8 -#define counterI x9 -#define pB x10 -#define counterJ x11 -#define tempALPHA x12 -#define pCRow0 x13 -#define pCRow1 x14 -#define pCRow2 x15 -#define pA x16 +#define alpha0 s10 +#define alphaV0 v10.s[0] +#define alpha1 s11 +#define alphaV1 v11.s[0] +#define alpha2 s14 +#define alphaV2 v14.s[0] +#define alpha3 s15 +#define alphaV3 v15.s[0] // 00 origM // 01 origN @@ -82,18 +68,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // 04 origPB // 05 pC // 06 origLDC -> LDC -// 07 offset +// 07 offset -> temp // 08 counterL // 09 counterI -// 10 pB -// 11 counterJ -// 12 tempALPHA -// 13 pCRow0 -// 14 pCRow1 -// 15 pCRow2 -// 16 pA -// 17 -// 18 must save +// 10 counterJ +// 11 pB +// 12 pCRow0 +// 13 pCRow1 +// 14 pCRow2 +// 15 pA_0 +// 16 pA_1 +// 17 pA_2 +// 18 must save pA_3 // 19 must save // 20 must save // 21 must save @@ -108,558 +94,719 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // 30 link // 31 sp -//v00 orig ALPHA -> a00 -//v01 a01 -//v02 a02 -//v03 a03 -//v04 a10 -//v05 a11 -//v06 a12 -//v07 a13 -//v08 must save b00 -//v09 must save b01 -//v10 must save b02 -//v11 must save b03 -//v12 must save b10 -//v13 must save b11 -//v14 must save b12 -//v15 must save b13 -//v16 must save C00 -//v17 must save C01 -//v18 C02 -//v19 C03 -//v20 C10 -//v21 C11 -//v22 C12 -//v23 C13 -//v24 C20 -//v25 C21 -//v26 C22 -//v27 C23 -//v28 C30 -//v29 C31 -//v30 C32 -//v31 C33 +/***************************** FOR 16x4 ***************************************/ +//v00 ALPHA -> pA00_0, pA01_0, pA02_0, pA03_0 +//v01 pA10_0, pA11_0, pA12_0, pA13_0 +//v02 pA00_1, pA01_1, pA02_1, pA03_1 +//v03 pA10_1, pA11_1, pA12_1, pA13_1 +//v04 pA00_2, pA01_2, pA02_2, pA03_2 +//v05 pA10_2, pA11_2, pA12_2, pA13_2 +//v06 pA00_3, pA01_3, pA02_3, pA03_3 +//v07 pA10_3, pA11_3, pA12_3, pA13_3 +//v08 must save pB00, pB01, pB02, pB03 +//v09 must save +//v10 must save ALPHA0 +//v11 must save ALPHA1 +//v12 must save pB10, pB11, pB12, pB13 +//v13 must save +//v14 must save ALPHA2 +//v15 must save ALPHA3 +//v16 must save C00_0, C01_0, C02_0, C03_0 +//v17 must save C10_0, C11_0, C12_0, C13_0 +//v18 C20_0, C21_0, C22_0, C23_0 +//v19 C30_0, C31_0, C32_0, C33_0 +//v20 C00_1, C01_1, C02_1, C03_1 +//v21 C10_1, C11_1, C12_1, C13_1 +//v22 C20_1, C21_1, C22_1, C23_1 +//v23 C30_1, C31_1, C32_1, C33_1 +//v24 C00_2, C01_2, C02_2, C03_2 +//v25 C10_2, C11_2, C12_2, C13_2 +//v26 C20_2, C21_2, C22_2, C23_2 +//v27 C30_2, C31_2, C32_2, C33_2 +//v28 C00_3, C01_3, C02_3, C03_3 +//v29 C10_3, C11_3, C12_3, C13_3 +//v30 C20_3, C21_3, C22_3, C23_3 +//v31 C30_3, C31_3, C32_3, C33_3 -// add sp,sp,#-(6*16) -// stp x18,x19,[sp,#(0*16)] -// stp x20,x21,[sp,#(1*16)] +/***************************** EXCEPT FOR 16x4 ********************************/ +//v00 ALPHA -> pA00, pA01 +//v01 pA02, pA03 +//v02 ppA00, ppA01 +//v03 ppA02, ppA03 +//v04 pA10, pA11 +//v05 pA12, pA13 +//v06 ppA10, ppA11 +//v07 ppA12, ppA13 +//v08 must save pB00, pB01 +//v09 must save pB02, pB03 +//v10 must save ALPHA0 +//v11 must save ALPHA1 +//v12 must save pB10, pB11 +//v13 must save pB12, pB13 +//v14 must save ALPHA2 +//v15 must save ALPHA3 +//v16 must save C00, C01 +//v17 must save C02, C03 +//v18 ppC00, ppC01 +//v19 ppC02, ppC03 +//v20 C10, C11 +//v21 C12, C13 +//v22 ppC10, ppC11 +//v23 ppC12, ppC13 +//v24 C20, C21 +//v25 C22, C23 +//v26 ppC20, ppC21 +//v27 ppC22, ppC23 +//v28 C30, C31 +//v29 C32, C33 +//v30 ppC30, ppC31 +//v31 ppC32, ppC33 - -/************************************************************************************** +/******************************************************************************* * Macro definitions -**************************************************************************************/ +*******************************************************************************/ + +.macro INIT16x4 + fmov s16, wzr + fmov s17, s16 + fmov s18, s17 + fmov s19, s16 + fmov s20, s17 + fmov s21, s16 + fmov s22, s17 + fmov s23, s16 + fmov s24, s17 + fmov s25, s16 + fmov s26, s17 + fmov s27, s16 + fmov s28, s17 + fmov s29, s16 + fmov s30, s17 + fmov s31, s16 +.endm + +.macro KERNEL16x4_I + ld1 {v8.4s}, [pB] + add pB, pB, #16 + + ld1 {v0.4s}, [pA_0] + add pA_0, pA_0, #16 + + fmul v16.4s, v0.4s, v8.4s[0] + fmul v20.4s, v0.4s, v8.4s[1] + + ld1 {v2.4s}, [pA_1] + add pA_1, pA_1, #16 + + fmul v24.4s, v0.4s, v8.4s[2] + fmul v28.4s, v0.4s, v8.4s[3] + + ld1 {v4.4s}, [pA_2] + add pA_2, pA_2, #16 + + fmul v17.4s, v2.4s, v8.4s[0] + fmul v21.4s, v2.4s, v8.4s[1] + + ld1 {v6.4s}, [pA_3] + add pA_3, pA_3, #16 + + fmul v25.4s, v2.4s, v8.4s[2] + fmul v29.4s, v2.4s, v8.4s[3] + + ld1 {v12.4s}, [pB] // for next round + add pB, pB, #16 + + fmul v18.4s, v4.4s, v8.4s[0] + fmul v19.4s, v6.4s, v8.4s[0] + + ld1 {v1.4s}, [pA_0] // for next round + add pA_0, pA_0, #16 + + fmul v22.4s, v4.4s, v8.4s[1] + fmul v23.4s, v6.4s, v8.4s[1] + + ld1 {v3.4s}, [pA_1] // for next round + add pA_1, pA_1, #16 + + fmul v26.4s, v4.4s, v8.4s[2] + fmul v27.4s, v6.4s, v8.4s[2] + + ld1 {v5.4s}, [pA_2] // for next round + add pA_2, pA_2, #16 + + fmul v30.4s, v4.4s, v8.4s[3] + fmul v31.4s, v6.4s, v8.4s[3] + + ld1 {v7.4s}, [pA_3] // for next round + add pA_3, pA_3, #16 +.endm + +.macro KERNEL16x4_M2 + fmla v16.4s, v1.4s, v12.4s[0] + fmla v17.4s, v3.4s, v12.4s[0] + + ld1 {v8.4s}, [pB] // for next round + add pB, pB, #16 + + fmla v18.4s, v5.4s, v12.4s[0] + fmla v19.4s, v7.4s, v12.4s[0] + + ld1 {v0.4s}, [pA_0] // for next round + add pA_0, pA_0, #16 + + fmla v20.4s, v1.4s, v12.4s[1] + fmla v21.4s, v3.4s, v12.4s[1] + + ld1 {v2.4s}, [pA_1] // for next round + add pA_1, pA_1, #16 + + fmla v22.4s, v5.4s, v12.4s[1] + fmla v23.4s, v7.4s, v12.4s[1] + + ld1 {v4.4s}, [pA_2] // for next round + add pA_2, pA_2, #16 + + fmla v24.4s, v1.4s, v12.4s[2] + fmla v25.4s, v3.4s, v12.4s[2] + + ld1 {v6.4s}, [pA_3] // for next round + add pA_3, pA_3, #16 + + fmla v26.4s, v5.4s, v12.4s[2] + fmla v27.4s, v7.4s, v12.4s[2] + + prfm PLDL1KEEP, [pA_2, #512] + + fmla v28.4s, v1.4s, v12.4s[3] + fmla v29.4s, v3.4s, v12.4s[3] + + prfm PLDL1KEEP, [pA_3, #512] + + fmla v30.4s, v5.4s, v12.4s[3] + fmla v31.4s, v7.4s, v12.4s[3] + + prfm PLDL1KEEP, [pB, #512] +.endm + +.macro KERNEL16x4_M1 + fmla v16.4s, v0.4s, v8.4s[0] + fmla v17.4s, v2.4s, v8.4s[0] + + ld1 {v12.4s}, [pB] // for next round + add pB, pB, #16 + + fmla v18.4s, v4.4s, v8.4s[0] + fmla v19.4s, v6.4s, v8.4s[0] + + ld1 {v1.4s}, [pA_0] // for next round + add pA_0, pA_0, #16 + + fmla v20.4s, v0.4s, v8.4s[1] + fmla v21.4s, v2.4s, v8.4s[1] + + ld1 {v3.4s}, [pA_1] // for next round + add pA_1, pA_1, #16 + + fmla v22.4s, v4.4s, v8.4s[1] + fmla v23.4s, v6.4s, v8.4s[1] + + ld1 {v5.4s}, [pA_2] // for next round + add pA_2, pA_2, #16 + + fmla v24.4s, v0.4s, v8.4s[2] + fmla v25.4s, v2.4s, v8.4s[2] + + ld1 {v7.4s}, [pA_3] // for next round + add pA_3, pA_3, #16 + + fmla v26.4s, v4.4s, v8.4s[2] + fmla v27.4s, v6.4s, v8.4s[2] + + prfm PLDL1KEEP, [pA_0, #512] + + fmla v28.4s, v0.4s, v8.4s[3] + fmla v29.4s, v2.4s, v8.4s[3] + + prfm PLDL1KEEP, [pA_1, #512] + + fmla v30.4s, v4.4s, v8.4s[3] + fmla v31.4s, v6.4s, v8.4s[3] +.endm + +.macro KERNEL16x4_E + fmla v16.4s, v1.4s, v12.4s[0] + fmla v17.4s, v3.4s, v12.4s[0] + fmla v18.4s, v5.4s, v12.4s[0] + fmla v19.4s, v7.4s, v12.4s[0] + fmla v20.4s, v1.4s, v12.4s[1] + fmla v21.4s, v3.4s, v12.4s[1] + fmla v22.4s, v5.4s, v12.4s[1] + fmla v23.4s, v7.4s, v12.4s[1] + fmla v24.4s, v1.4s, v12.4s[2] + fmla v25.4s, v3.4s, v12.4s[2] + fmla v26.4s, v5.4s, v12.4s[2] + fmla v27.4s, v7.4s, v12.4s[2] + fmla v28.4s, v1.4s, v12.4s[3] + fmla v29.4s, v3.4s, v12.4s[3] + fmla v30.4s, v5.4s, v12.4s[3] + fmla v31.4s, v7.4s, v12.4s[3] +.endm + +.macro KERNEL16x4_SUB + ld1 {v8.4s}, [pB] + add pB, pB, #16 + + ld1 {v0.4s}, [pA_0] + add pA_0, pA_0, #16 + + fmla v16.4s, v0.4s, v8.4s[0] + fmla v20.4s, v0.4s, v8.4s[1] + fmla v24.4s, v0.4s, v8.4s[2] + fmla v28.4s, v0.4s, v8.4s[3] + + ld1 {v2.4s}, [pA_1] + add pA_1, pA_1, #16 + + fmla v17.4s, v2.4s, v8.4s[0] + fmla v21.4s, v2.4s, v8.4s[1] + fmla v25.4s, v2.4s, v8.4s[2] + fmla v29.4s, v2.4s, v8.4s[3] + + ld1 {v4.4s}, [pA_2] + add pA_2, pA_2, #16 + + fmla v18.4s, v4.4s, v8.4s[0] + fmla v22.4s, v4.4s, v8.4s[1] + fmla v26.4s, v4.4s, v8.4s[2] + fmla v30.4s, v4.4s, v8.4s[3] + + ld1 {v6.4s}, [pA_3] + add pA_3, pA_3, #16 + + fmla v19.4s, v6.4s, v8.4s[0] + fmla v23.4s, v6.4s, v8.4s[1] + fmla v27.4s, v6.4s, v8.4s[2] + fmla v31.4s, v6.4s, v8.4s[3] +.endm + +.macro SAVE16x4 + mov pCRow1, pCRow0 + + ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [pCRow1] + fmla v0.4s, v16.4s, alphaV0 + fmla v1.4s, v17.4s, alphaV1 + fmla v2.4s, v18.4s, alphaV2 + fmla v3.4s, v19.4s, alphaV3 + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [pCRow1] + + add pCRow1, pCRow1, LDC + + ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [pCRow1] + fmla v4.4s, v20.4s, alphaV0 + fmla v5.4s, v21.4s, alphaV1 + fmla v6.4s, v22.4s, alphaV2 + fmla v7.4s, v23.4s, alphaV3 + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [pCRow1] + + add pCRow1, pCRow1, LDC + + ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [pCRow1] + fmla v0.4s, v24.4s, alphaV0 + fmla v1.4s, v25.4s, alphaV1 + fmla v2.4s, v26.4s, alphaV2 + fmla v3.4s, v27.4s, alphaV3 + st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [pCRow1] + + add pCRow1, pCRow1, LDC + + ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [pCRow1] + fmla v4.4s, v28.4s, alphaV0 + fmla v5.4s, v29.4s, alphaV1 + fmla v6.4s, v30.4s, alphaV2 + fmla v7.4s, v31.4s, alphaV3 + st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [pCRow1] + + add pCRow0, pCRow0, #64 +.endm + +/******************************************************************************/ + +.macro INIT8x4 + fmov s16, wzr + fmov s17, s16 + fmov s18, s17 + fmov s19, s16 + fmov s20, s17 + fmov s21, s16 + fmov s22, s17 + fmov s23, s16 + fmov s24, s17 + fmov s25, s16 + fmov s26, s17 + fmov s27, s16 + fmov s28, s17 + fmov s29, s16 + fmov s30, s17 + fmov s31, s16 +.endm + +.macro KERNEL8x4_SUB + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + ld1 {v0.2s, v1.2s}, [pA_0] + add pA_0, pA_0, #16 + + fmla v16.2s, v0.2s, v8.2s[0] + fmla v29.2s, v1.2s, v9.2s[1] + fmla v20.2s, v0.2s, v8.2s[1] + fmla v25.2s, v1.2s, v9.2s[0] + + ld1 {v2.2s, v3.2s}, [pA_1] + add pA_1, pA_1, #16 + + fmla v24.2s, v0.2s, v9.2s[0] + fmla v21.2s, v1.2s, v8.2s[1] + fmla v28.2s, v0.2s, v9.2s[1] + fmla v17.2s, v1.2s, v8.2s[0] + + fmla v18.2s, v2.2s, v8.2s[0] + fmla v31.2s, v3.2s, v9.2s[1] + fmla v22.2s, v2.2s, v8.2s[1] + fmla v27.2s, v3.2s, v9.2s[0] + + fmla v26.2s, v2.2s, v9.2s[0] + fmla v23.2s, v3.2s, v8.2s[1] + fmla v30.2s, v2.2s, v9.2s[1] + fmla v19.2s, v3.2s, v8.2s[0] +.endm + +.macro SAVE8x4 + mov pCRow1, pCRow0 + + ld1 {v0.2s, v1.2s}, [pCRow1] + fmla v0.2s, v16.2s, alphaV0 + fmla v1.2s, v17.2s, alphaV1 + st1 {v0.2s, v1.2s}, [pCRow1] + + add pCRow2, pCRow1, LDC + add pCRow1, pCRow1, #16 + + ld1 {v2.2s, v3.2s}, [pCRow1] + fmla v2.2s, v18.2s, alphaV2 + fmla v3.2s, v19.2s, alphaV3 + st1 {v2.2s, v3.2s}, [pCRow1] + + ld1 {v4.2s, v5.2s}, [pCRow2] + fmla v4.2s, v20.2s, alphaV0 + fmla v5.2s, v21.2s, alphaV1 + st1 {v4.2s, v5.2s}, [pCRow2] + + add pCRow1, pCRow2, LDC + add pCRow2, pCRow2, #16 + + ld1 {v6.2s, v7.2s}, [pCRow2] + fmla v6.2s, v22.2s, alphaV2 + fmla v7.2s, v23.2s, alphaV3 + st1 {v6.2s, v7.2s}, [pCRow2] + + ld1 {v0.2s, v1.2s}, [pCRow1] + fmla v0.2s, v24.2s, alphaV0 + fmla v1.2s, v25.2s, alphaV1 + st1 {v0.2s, v1.2s}, [pCRow1] + + add pCRow2, pCRow1, LDC + add pCRow1, pCRow1, #16 + + ld1 {v2.2s, v3.2s}, [pCRow1] + fmla v2.2s, v26.2s, alphaV2 + fmla v3.2s, v27.2s, alphaV3 + st1 {v2.2s, v3.2s}, [pCRow1] + + ld1 {v4.2s, v5.2s}, [pCRow2] + fmla v4.2s, v28.2s, alphaV0 + fmla v5.2s, v29.2s, alphaV1 + st1 {v4.2s, v5.2s}, [pCRow2] + + add pCRow2, pCRow2, #16 + + ld1 {v6.2s, v7.2s}, [pCRow2] + fmla v6.2s, v30.2s, alphaV2 + fmla v7.2s, v31.2s, alphaV3 + st1 {v6.2s, v7.2s}, [pCRow2] + + add pCRow0, pCRow0, #32 +.endm + +/******************************************************************************/ .macro INIT4x4 - - fsub v16.4s , v16.4s , v16.4s - fsub v20.4s , v20.4s , v20.4s - fsub v24.4s , v24.4s , v24.4s - fsub v28.4s , v28.4s , v28.4s - + fmov s16, wzr + fmov s17, s16 + fmov s20, s17 + fmov s21, s16 + fmov s24, s17 + fmov s25, s16 + fmov s28, s17 + fmov s29, s16 .endm -.macro KERNEL4x4_I - - ld1 {v8.2s},[pB],#8 - ld1 {v10.2s},[pB],#8 - ld1 {v0.4s},[pA],#16 - - fmulx v16.4s, v0.4s, v8.4s[0] - fmulx v20.4s, v0.4s, v8.4s[1] - fmulx v24.4s, v0.4s, v10.4s[0] - fmulx v28.4s, v0.4s, v10.4s[1] - - ld1 {v12.2s},[pB],#8 // for next round - ld1 {v14.2s},[pB],#8 // for next round - ld1 {v4.4s},[pA],#16 // for next round - - -.endm - - -.macro KERNEL4x4_M2 - - fmla v16.4s, v4.4s, v12.s[0] - fmla v20.4s, v4.4s, v12.s[1] - fmla v24.4s, v4.4s, v14.s[0] - fmla v28.4s, v4.4s, v14.s[1] - - ld1 {v8.2s},[pB],#8 - ld1 {v10.2s},[pB],#8 - ld1 {v0.4s},[pA],#16 - -.endm - - -.macro KERNEL4x4_M1 - - fmla v16.4s, v0.4s, v8.s[0] - fmla v20.4s, v0.4s, v8.s[1] - fmla v24.4s, v0.4s, v10.s[0] - fmla v28.4s, v0.4s, v10.s[1] - - ld1 {v12.2s},[pB],#8 - ld1 {v14.2s},[pB],#8 - ld1 {v4.4s},[pA],#16 - -.endm - - - -.macro KERNEL4x4_E - - fmla v16.4s, v4.4s, v12.s[0] - fmla v20.4s, v4.4s, v12.s[1] - fmla v24.4s, v4.4s, v14.s[0] - fmla v28.4s, v4.4s, v14.s[1] - -.endm - - - - .macro KERNEL4x4_SUB + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + ld1 {v0.2s, v1.2s}, [pA_0] + add pA_0, pA_0, #16 - ld1 {v8.2s},[pB],#8 - ld1 {v10.2s},[pB],#8 - ld1 {v0.4s} , [pA],#16 + fmla v16.2s, v0.2s, v8.2s[0] + fmla v29.2s, v1.2s, v9.2s[1] - fmla v16.4s, v0.4s, v8.s[0] - fmla v20.4s, v0.4s, v8.s[1] - fmla v24.4s, v0.4s, v10.s[0] - fmla v28.4s, v0.4s, v10.s[1] + fmla v20.2s, v0.2s, v8.2s[1] + fmla v25.2s, v1.2s, v9.2s[0] + fmla v24.2s, v0.2s, v9.2s[0] + fmla v21.2s, v1.2s, v8.2s[1] + + fmla v28.2s, v0.2s, v9.2s[1] + fmla v17.2s, v1.2s, v8.2s[0] .endm - - - .macro SAVE4x4 + ld1 {v8.2s, v9.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + fmla v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] - add pCRow1, pCRow0, LDC // create a second row pointer from the first row pointer - mov v0.d[0], tempALPHA + add pCRow1, pCRow0, LDC - ld1 {v8.4s},[pCRow0] // load 4 values of C from first row - fmla v8.4s ,v16.4s,v0.s[0] - st1 {v8.4s},[pCRow0],#16 // store C from first row + ld1 {v12.2s, v13.2s}, [pCRow1] + fmla v12.2s, v20.2s, alphaV2 + fmla v13.2s, v21.2s, alphaV3 + st1 {v12.2s, v13.2s}, [pCRow1] - ld1 {v12.4s},[pCRow1] // load 4 values of C from second row - fmla v12.4s ,v20.4s,v0.s[0] - st1 {v12.4s},[pCRow1] // store C from second row + add pCRow2, pCRow1, LDC - add pCRow2, pCRow1, LDC // Row2 points to third row + ld1 {v8.2s, v9.2s}, [pCRow2] + fmla v8.2s, v24.2s, alphaV0 + fmla v9.2s, v25.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow2] - ld1 {v8.4s},[pCRow2] // load 4 values of C from third row - fmla v8.4s ,v24.4s,v0.s[0] - st1 {v8.4s} ,[pCRow2] // store C from third row + add pCRow1, pCRow2, LDC - add pCRow1, pCRow2 , LDC // row1 points to fourth row - - ld1 {v12.4s},[pCRow1] // load 4 values of C from fourth row - fmla v12.4s ,v28.4s,v0.s[0] - st1 {v12.4s},[pCRow1] // store fourth row + ld1 {v12.2s, v13.2s}, [pCRow1] + fmla v12.2s, v28.2s, alphaV2 + fmla v13.2s, v29.2s, alphaV3 + st1 {v12.2s, v13.2s}, [pCRow1] + add pCRow0, pCRow0, #16 .endm /******************************************************************************/ .macro INIT2x4 - - fsub s16 , s16 , s16 - fmov s17, s16 + fmov s16, wzr fmov s20, s16 - fmov s21, s16 - fmov s24, s16 - fmov s25, s16 + fmov s24, s20 fmov s28, s16 - fmov s29, s16 - .endm - - .macro KERNEL2x4_SUB + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + ld1 {v0.2s}, [pA_0] + add pA_0, pA_0, #8 - ldr s8 , [ pB ] - ldr s9 , [ pB, #4 ] - ldr s10, [ pB, #8 ] - ldr s11, [ pB, #12 ] - - ldr s0 , [ pA ] - ldr s1 , [ pA, #4 ] - - fmadd s16 , s0, s8, s16 - fmadd s17 , s1, s8, s17 - - fmadd s20 , s0, s9, s20 - fmadd s21 , s1, s9, s21 - - fmadd s24 , s0, s10, s24 - fmadd s25 , s1, s10, s25 - - fmadd s28 , s0, s11, s28 - fmadd s29 , s1, s11, s29 - add pA , pA, #8 - add pB , pB, #16 - + fmla v16.2s, v0.2s, v8.2s[0] + fmla v20.2s, v0.2s, v8.2s[1] + fmla v24.2s, v0.2s, v9.2s[0] + fmla v28.2s, v0.2s, v9.2s[1] .endm - #define F1ST( op1, op2, op3) fmadd op1, op2, op3, op1 - #define L1ST( op1, op2, op3) ldr op1, [op2, op3] - .macro SAVE2x4 + ld1 {v8.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] - add pCRow1 , pCRow0, LDC - add pCRow2 , pCRow1, LDC - mov v0.d[0], tempALPHA + add pCRow1, pCRow0, LDC - L1ST ( s8,pCRow0, #0) - L1ST ( s9,pCRow0, #4 ) + ld1 {v12.2s}, [pCRow1] + fmla v12.2s, v20.2s, alphaV1 + st1 {v12.2s}, [pCRow1] - F1ST ( s8 , s0 , s16) - F1ST ( s9 , s0 , s17) + add pCRow2, pCRow1, LDC - str s8 , [pCRow0, #0] - str s9 , [pCRow0, #4 ] + ld1 {v8.2s}, [pCRow2] + fmla v8.2s, v24.2s, alphaV2 + st1 {v8.2s}, [pCRow2] - ldr s12, [pCRow1, #0] - ldr s13, [pCRow1, #4 ] + add pCRow1, pCRow2, LDC - F1ST ( s12, s0 , s20) - F1ST ( s13, s0 , s21) - - str s12, [pCRow1, #0] - str s13, [pCRow1, #4 ] - - L1ST ( s8,pCRow2 , #0) - L1ST ( s9,pCRow2 , #4 ) - - F1ST ( s8 , s0 , s24) - F1ST ( s9 , s0 , s25) - - str s8 , [pCRow2 , #0] - str s9 , [pCRow2 , #4 ] - - add pCRow1, pCRow2 , LDC - - ldr s12, [pCRow1, #0] - ldr s13, [pCRow1, #4 ] - - F1ST ( s12, s0 , s28) - F1ST ( s13, s0 , s29) - - str s12, [pCRow1, #0] - str s13, [pCRow1, #4 ] + ld1 {v12.2s}, [pCRow1] + fmla v12.2s, v28.2s, alphaV3 + st1 {v12.2s}, [pCRow1] add pCRow0, pCRow0, #8 - .endm - /******************************************************************************/ .macro INIT1x4 - - fsub s16 , s16 , s16 + fmov s16, wzr fmov s20, s16 - fmov s24, s16 - fmov s28, s16 - .endm - - .macro KERNEL1x4_SUB + ldr s0, [pA_0] + add pA_0, pA_0, #4 - ldr s8 , [ pB ] - ldr s9 , [ pB, #4 ] - ldr s10, [ pB, #8 ] - ldr s11, [ pB, #12 ] - - ldr s0 , [ pA ] - - fmadd s16 , s0, s8, s16 - fmadd s20 , s0, s9, s20 - fmadd s24 , s0, s10, s24 - fmadd s28 , s0, s11, s28 - - add pA , pA, #4 - add pB , pB, #16 + ld1 {v8.2s, v9.2s}, [pB] + add pB, pB, #16 + fmla v16.2s, v8.2s, v0.s[0] + fmla v20.2s, v9.2s, v0.s[0] .endm .macro SAVE1x4 + add pCRow1, pCRow0, LDC - add pCRow1 , pCRow0, LDC - add pCRow2 , pCRow1, LDC + ld1 {v8.s}[0], [pCRow0] + ld1 {v8.s}[1], [pCRow1] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.s}[0], [pCRow0] + st1 {v8.s}[1], [pCRow1] - mov v0.d[0], tempALPHA + add pCRow2, pCRow1, LDC + add pCRow1, pCRow2, LDC - L1ST ( s8,pCRow0, #0) - F1ST ( s8 , s0 , s16) - str s8 , [pCRow0, #0] - - L1ST ( s12,pCRow1, #0) - F1ST ( s12, s0 , s20) - str s12, [pCRow1, #0] - - L1ST ( s8,pCRow2 , #0) - F1ST ( s8 , s0 , s24) - str s8 , [pCRow2 , #0] - - add pCRow1, pCRow2 , LDC - - L1ST ( s12,pCRow1, #0) - F1ST ( s12, s0 , s28) - str s12, [pCRow1, #0] + ld1 {v12.s}[0], [pCRow2] + ld1 {v12.s}[1], [pCRow1] + fmla v12.2s, v20.2s, alphaV1 + st1 {v12.s}[0], [pCRow2] + st1 {v12.s}[1], [pCRow1] add pCRow0, pCRow0, #4 - .endm -/******************************************************************************/ /******************************************************************************/ .macro INIT4x2 - - fsub s16 , s16 , s16 - fmov s17, s16 - fmov s18, s16 - fmov s19, s16 - fmov s20, s16 - fmov s21, s16 - fmov s22, s16 - fmov s23, s16 - + fmov s16, wzr + fmov s17, s16 + fmov s20, s17 + fmov s21, s16 .endm - - .macro KERNEL4x2_SUB + ld1 {v8.2s}, [pB] + add pB, pB, #8 + ld1 {v0.2s, v1.2s}, [pA_0] + add pA_0, pA_0, #16 - ldr s8 , [ pB ] - ldr s9 , [ pB, #4 ] - - ldr s0 , [ pA ] - ldr s1 , [ pA, #4 ] - ldr s2 , [ pA, #8 ] - ldr s3 , [ pA, #12 ] - - fmadd s16 , s0, s8, s16 - fmadd s17 , s1, s8, s17 - fmadd s18 , s2, s8, s18 - fmadd s19 , s3, s8, s19 - - fmadd s20 , s0, s9, s20 - fmadd s21 , s1, s9, s21 - fmadd s22 , s2, s9, s22 - fmadd s23 , s3, s9, s23 - - add pA , pA, #16 - add pB , pB, #8 - + fmla v16.2s, v0.2s, v8.2s[0] + fmla v17.2s, v1.2s, v8.2s[0] + fmla v20.2s, v0.2s, v8.2s[1] + fmla v21.2s, v1.2s, v8.2s[1] .endm .macro SAVE4x2 + ld1 {v8.2s, v9.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + fmla v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] - add pCRow1 , pCRow0, LDC + add pCRow1, pCRow0, LDC - mov v0.d[0], tempALPHA - - L1ST ( s8,pCRow0, #0) - L1ST ( s9,pCRow0, #4 ) - L1ST ( s10,pCRow0, #8 ) - L1ST ( s11,pCRow0, #12 ) - - F1ST ( s8 , s0 , s16) - F1ST ( s9 , s0 , s17) - F1ST ( s10, s0 , s18) - F1ST ( s11, s0 , s19) - - str s8 , [pCRow0] - str s9 , [pCRow0, #4 ] - str s10, [pCRow0, #8 ] - str s11, [pCRow0, #12 ] - - L1ST ( s12,pCRow1, #0) - L1ST ( s13,pCRow1, #4 ) - L1ST ( s14,pCRow1, #8 ) - L1ST ( s15,pCRow1, #12 ) - - F1ST ( s12, s0 , s20) - F1ST ( s13, s0 , s21) - F1ST ( s14, s0 , s22) - F1ST ( s15, s0 , s23) - - str s12, [pCRow1] - str s13, [pCRow1, #4 ] - str s14, [pCRow1, #8 ] - str s15, [pCRow1, #12 ] + ld1 {v12.2s, v13.2s}, [pCRow1] + fmla v12.2s, v20.2s, alphaV2 + fmla v13.2s, v21.2s, alphaV3 + st1 {v12.2s, v13.2s}, [pCRow1] add pCRow0, pCRow0, #16 - .endm - /******************************************************************************/ .macro INIT2x2 - - fsub s16 , s16 , s16 - fmov s17, s16 + fmov s16, wzr fmov s20, s16 - fmov s21, s16 - .endm - - .macro KERNEL2x2_SUB + ld1 {v8.2s}, [pB] + add pB, pB, #8 - ldr s8 , [ pB ] - ldr s9 , [ pB, #4 ] - - ldr s0 , [ pA ] - ldr s1 , [ pA, #4 ] - - fmadd s16 , s0, s8, s16 - fmadd s17 , s1, s8, s17 - - fmadd s20 , s0, s9, s20 - fmadd s21 , s1, s9, s21 - - add pA , pA, #8 - add pB , pB, #8 + ld1 {v0.2s}, [pA_0] + add pA_0, pA_0, #8 + fmla v16.2s, v0.2s, v8.2s[0] + fmla v20.2s, v0.2s, v8.2s[1] .endm .macro SAVE2x2 + ld1 {v8.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] add pCRow1 , pCRow0, LDC - mov v0.d[0], tempALPHA - - L1ST ( s8,pCRow0, #0 ) - L1ST ( s9,pCRow0, #4 ) - - F1ST ( s8 , s0 , s16) - F1ST ( s9 , s0 , s17) - - str s8 , [pCRow0] - str s9 , [pCRow0, #4 ] - - L1ST ( s12,pCRow1, #0 ) - L1ST ( s13,pCRow1, #4 ) - - F1ST ( s12, s0 , s20) - F1ST ( s13, s0 , s21) - - str s12, [pCRow1] - str s13, [pCRow1, #4 ] + ld1 {v12.2s}, [pCRow1] + fmla v12.2s, v20.2s, alphaV1 + st1 {v12.2s}, [pCRow1] add pCRow0, pCRow0, #8 - .endm /******************************************************************************/ .macro INIT1x2 - - fsub s16 , s16 , s16 - fmov s20, s16 - + fmov s16, wzr .endm - - .macro KERNEL1x2_SUB - - ldr s8 , [ pB ] - ldr s9 , [ pB, #4 ] - - ldr s0 , [ pA ] - fmadd s16 , s0, s8, s16 - fmadd s20 , s0, s9, s20 - - add pA , pA, #4 + ld1 {v8.2s} , [pB] add pB , pB, #8 + ldr s0 , [pA_0] + add pA_0, pA_0, #4 + + fmla v16.2s, v8.2s, v0.2s[0] .endm .macro SAVE1x2 - add pCRow1 , pCRow0, LDC - mov v0.d[0], tempALPHA - - L1ST ( s8,pCRow0, #0) - F1ST ( s8 , s0 , s16) - str s8 , [pCRow0] - - L1ST ( s12,pCRow1, #0) - F1ST ( s12, s0 , s20) - str s12, [pCRow1] + ld1 {v8.s}[0], [pCRow0] + ld1 {v8.s}[1], [pCRow1] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.s}[0], [pCRow0] + st1 {v8.s}[1], [pCRow1] add pCRow0, pCRow0, #4 - .endm -/******************************************************************************/ /******************************************************************************/ .macro INIT4x1 - - fsub s16 , s16 , s16 - fmov s17, s16 - fmov s18, s16 - fmov s19, s16 - + fmov s16, wzr + fmov s17, s16 .endm - - .macro KERNEL4x1_SUB - - ldr s8 , [ pB ] - - ldr s0 , [ pA ] - ldr s1 , [ pA, #4 ] - ldr s2 , [ pA, #8 ] - ldr s3 , [ pA, #12 ] - - fmadd s16 , s0, s8, s16 - fmadd s17 , s1, s8, s17 - fmadd s18 , s2, s8, s18 - fmadd s19 , s3, s8, s19 - - add pA , pA, #16 + ldr s8, [pB] add pB , pB, #4 + ld1 {v0.2s, v1.2s}, [pA_0] + add pA_0 , pA_0, #16 + + fmla v16.2s, v0.2s, v8.2s[0] + fmla v17.2s, v1.2s, v8.2s[0] .endm .macro SAVE4x1 - - - mov v0.d[0], tempALPHA - - L1ST ( s8,pCRow0, #0 ) - L1ST ( s9,pCRow0, #4 ) - L1ST ( s10,pCRow0, #8 ) - L1ST ( s11,pCRow0, #12 ) - - F1ST ( s8 , s0 , s16) - F1ST ( s9 , s0 , s17) - F1ST ( s10, s0 , s18) - F1ST ( s11, s0 , s19) - - str s8 , [pCRow0] - str s9 , [pCRow0, #4 ] - str s10, [pCRow0, #8 ] - str s11, [pCRow0, #12 ] + ld1 {v8.2s, v9.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + fmla v9.2s, v17.2s, alphaV1 + st1 {v8.2s, v9.2s}, [pCRow0] add pCRow0, pCRow0, #16 - .endm @@ -668,186 +815,271 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /******************************************************************************/ .macro INIT2x1 - - fsub s16 , s16 , s16 - fmov s17, s16 - + fmov s16, wzr .endm - - .macro KERNEL2x1_SUB - - ldr s8 , [ pB ] - - ldr s0 , [ pA ] - ldr s1 , [ pA, #4 ] - - fmadd s16 , s0, s8, s16 - fmadd s17 , s1, s8, s17 - - add pA , pA, #8 + ldr s8, [pB] add pB , pB, #4 + ld1 {v0.2s}, [pA_0] + add pA_0 , pA_0, #8 + + fmla v16.2s, v0.2s, v8.2s[0] .endm .macro SAVE2x1 - - - mov v0.d[0], tempALPHA - - L1ST ( s8,pCRow0, #0 ) - L1ST ( s9,pCRow0, #4 ) - - F1ST ( s8 , s0 , s16) - F1ST ( s9 , s0 , s17) - - str s8 , [pCRow0] - str s9 , [pCRow0, #4 ] + ld1 {v8.2s}, [pCRow0] + fmla v8.2s, v16.2s, alphaV0 + st1 {v8.2s}, [pCRow0] add pCRow0, pCRow0, #8 - .endm /******************************************************************************/ .macro INIT1x1 - - fsub s16 , s16 , s16 - + fmov s16, wzr .endm - - .macro KERNEL1x1_SUB - - ldr s8 , [ pB ] - - ldr s0 , [ pA ] - - fmadd s16 , s0, s8, s16 - - add pA , pA, #4 + ldr s8, [pB] add pB , pB, #4 + ldr s0, [pA_0] + add pA_0 , pA_0, #4 + + fmadd s16, s0, s8, s16 .endm .macro SAVE1x1 - - - mov v0.d[0], tempALPHA - - L1ST ( s8,pCRow0, #0 ) - F1ST ( s8 , s0 , s16) - str s8 , [pCRow0] + ldr s8, [pCRow0] + fmadd s8, s16, alpha0, s8 + str s8, [pCRow0] add pCRow0, pCRow0, #4 - .endm - - - - -/************************************************************************************** +/******************************************************************************* * End of macro definitions -**************************************************************************************/ +*******************************************************************************/ PROLOGUE .align 5 - add sp,sp,#-(5*16) - stp d8,d9,[sp,#(0*16)] - stp d10,d11,[sp,#(1*16)] - stp d12,d13,[sp,#(2*16)] - stp d14,d15,[sp,#(3*16)] - stp d16,d17,[sp,#(4*16)] + add sp, sp, #-(11 * 16) + stp d8, d9, [sp, #(0 * 16)] + stp d10, d11, [sp, #(1 * 16)] + stp d12, d13, [sp, #(2 * 16)] + stp d14, d15, [sp, #(3 * 16)] + stp d16, d17, [sp, #(4 * 16)] + stp x18, x19, [sp, #(5 * 16)] + stp x20, x21, [sp, #(6 * 16)] + stp x22, x23, [sp, #(7 * 16)] + stp x24, x25, [sp, #(8 * 16)] + stp x26, x27, [sp, #(9 * 16)] + str x28, [sp, #(10 * 16)] - mov tempALPHA, v0.d[0] - lsl LDC, LDC, #2 // ldc = ldc * 4 + fmov alpha0, s0 + fmov alpha1, s0 + fmov alpha2, s0 + fmov alpha3, s0 + + lsl LDC, LDC, #2 // ldc = ldc * 4 mov pB, origPB mov counterJ, origN - asr counterJ, counterJ, #2 // J = J / 4 + asr counterJ, counterJ, #2 // J = J / 4 cmp counterJ, #0 ble sgemm_kernel_L2_BEGIN +/******************************************************************************/ + sgemm_kernel_L4_BEGIN: + mov pCRow0, pC // pCRow0 = C + add pC, pC, LDC, lsl #2 - mov pCRow0, pC // pCRow0 = C - add pC,pC,LDC, lsl #2 + lsl temp, origK, #4 // k * 4 * 4 + mov pA_0, origPA // pA_0 = start of A array + add pA_1, temp, pA_0 + add pA_2, temp, pA_1 + add pA_3, temp, pA_2 - mov pA, origPA // pA = start of A array - - - -sgemm_kernel_L4_M4_BEGIN: +sgemm_kernel_L4_M16_BEGIN: mov counterI, origM - asr counterI, counterI, #2 // counterI = counterI / 4 + asr counterI, counterI, #4 // counterI = counterI / 16 cmp counterI, #0 + ble sgemm_kernel_L4_M8_BEGIN + +sgemm_kernel_L4_M16_20: + + mov pB, origPB + asr counterL , origK, #1 // L = K / 2 + cmp counterL , #2 // is there at least 4 to do? + blt sgemm_kernel_L4_M16_32 + + KERNEL16x4_I // do one in the K + KERNEL16x4_M2 // do another in the K + + subs counterL, counterL, #2 + ble sgemm_kernel_L4_M16_22a + .align 5 + +sgemm_kernel_L4_M16_22: + + KERNEL16x4_M1 + KERNEL16x4_M2 + + subs counterL, counterL, #1 + bgt sgemm_kernel_L4_M16_22 + + +sgemm_kernel_L4_M16_22a: + + KERNEL16x4_M1 + KERNEL16x4_E + + b sgemm_kernel_L4_M16_44 + +sgemm_kernel_L4_M16_32: + + tst counterL, #1 + ble sgemm_kernel_L4_M16_40 + + KERNEL16x4_I + + KERNEL16x4_E + + b sgemm_kernel_L4_M16_44 + + +sgemm_kernel_L4_M16_40: + + INIT16x4 + +sgemm_kernel_L4_M16_44: + + ands counterL , origK, #1 + ble sgemm_kernel_L4_M16_100 + +sgemm_kernel_L4_M16_46: + + KERNEL16x4_SUB + +sgemm_kernel_L4_M16_100: + + SAVE16x4 + +sgemm_kernel_L4_M16_END: + lsl temp, origK, #4 // k * 4 * 4 = Four rows of A + add pA_0, pA_0, temp + add pA_0, pA_0, temp + add pA_0, pA_0, temp + add pA_1, pA_0, temp + add pA_2, pA_1, temp + add pA_3, pA_2, temp + subs counterI, counterI, #1 + bne sgemm_kernel_L4_M16_20 + +sgemm_kernel_L4_M8_BEGIN: + mov counterI, origM + tst counterI , #15 + ble sgemm_kernel_L4_END + + tst counterI, #8 + ble sgemm_kernel_L4_M4_BEGIN + +sgemm_kernel_L4_M8_20: + + INIT8x4 + + mov pB, origPB + asr counterL, origK, #3 // counterL = counterL / 8 + cmp counterL, #0 + ble sgemm_kernel_L4_M8_40 + +sgemm_kernel_L4_M8_22: + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + subs counterL, counterL, #1 + bgt sgemm_kernel_L4_M8_22 + + +sgemm_kernel_L4_M8_40: + + ands counterL , origK, #7 // counterL = counterL % 8 + ble sgemm_kernel_L4_M8_100 + +sgemm_kernel_L4_M8_42: + + KERNEL8x4_SUB + + subs counterL, counterL, #1 + bgt sgemm_kernel_L4_M8_42 + +sgemm_kernel_L4_M8_100: + + SAVE8x4 + +sgemm_kernel_L4_M8_END: + lsl temp, origK, #4 // k * 4 * 4 + add pA_0, pA_0, temp + +sgemm_kernel_L4_M4_BEGIN: + mov counterI, origM + tst counterI , #7 + ble sgemm_kernel_L4_END + + tst counterI, #4 ble sgemm_kernel_L4_M2_BEGIN sgemm_kernel_L4_M4_20: + INIT4x4 + mov pB, origPB - asr counterL , origK, #1 // L = K / 2 - cmp counterL , #2 // is there at least 4 to do? - blt sgemm_kernel_L4_M4_32 - - - - KERNEL4x4_I //do one in the K - KERNEL4x4_M2 //do another in the K - - subs counterL, counterL, #2 // subtract 2, since one is always done at the tail - ble sgemm_kernel_L4_M4_22a - .align 5 + asr counterL, origK, #3 // counterL = counterL / 8 + cmp counterL, #0 + ble sgemm_kernel_L4_M4_40 sgemm_kernel_L4_M4_22: - KERNEL4x4_M1 - KERNEL4x4_M2 + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB subs counterL, counterL, #1 bgt sgemm_kernel_L4_M4_22 -sgemm_kernel_L4_M4_22a: - - KERNEL4x4_M1 - KERNEL4x4_E - - b sgemm_kernel_L4_M4_44 - -sgemm_kernel_L4_M4_32: // less than 4 to do in the K direction - - tst counterL, #1 - ble sgemm_kernel_L4_M4_40 - - KERNEL4x4_I - - KERNEL4x4_E - - b sgemm_kernel_L4_M4_44 - sgemm_kernel_L4_M4_40: - INIT4x4 - - -sgemm_kernel_L4_M4_44: - - ands counterL , origK, #1 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L4_M4_100 -sgemm_kernel_L4_M4_46: +sgemm_kernel_L4_M4_42: KERNEL4x4_SUB subs counterL, counterL, #1 - bne sgemm_kernel_L4_M4_46 + bgt sgemm_kernel_L4_M4_42 sgemm_kernel_L4_M4_100: @@ -855,9 +1087,6 @@ sgemm_kernel_L4_M4_100: sgemm_kernel_L4_M4_END: - subs counterI, counterI, #1 - bne sgemm_kernel_L4_M4_20 - sgemm_kernel_L4_M2_BEGIN: @@ -865,7 +1094,7 @@ sgemm_kernel_L4_M2_BEGIN: tst counterI , #3 ble sgemm_kernel_L4_END - tst counterI, #2 // counterI = counterI / 2 + tst counterI, #2 // counterI = counterI / 2 ble sgemm_kernel_L4_M1_BEGIN sgemm_kernel_L4_M2_20: @@ -873,7 +1102,7 @@ sgemm_kernel_L4_M2_20: INIT2x4 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL , #0 ble sgemm_kernel_L4_M2_40 @@ -895,7 +1124,7 @@ sgemm_kernel_L4_M2_22: sgemm_kernel_L4_M2_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L4_M2_100 sgemm_kernel_L4_M2_42: @@ -914,7 +1143,7 @@ sgemm_kernel_L4_M2_END: sgemm_kernel_L4_M1_BEGIN: - tst counterI, #1 // counterI = counterI % 2 + tst counterI, #1 // counterI = counterI % 2 ble sgemm_kernel_L4_END sgemm_kernel_L4_M1_20: @@ -922,7 +1151,7 @@ sgemm_kernel_L4_M1_20: INIT1x4 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL , #0 ble sgemm_kernel_L4_M1_40 @@ -943,7 +1172,7 @@ sgemm_kernel_L4_M1_22: sgemm_kernel_L4_M1_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L4_M1_100 sgemm_kernel_L4_M1_42: @@ -960,35 +1189,36 @@ sgemm_kernel_L4_M1_100: sgemm_kernel_L4_END: - add origPB, origPB, origK, lsl #4 // B = B + K * 4 * 4 + lsl temp, origK, #4 + add origPB, origPB, temp // B = B + K * 4 * 4 - subs counterJ, counterJ , #1 // j-- + subs counterJ, counterJ , #1 // j-- bgt sgemm_kernel_L4_BEGIN - -/*********************************************************************************************/ +/******************************************************************************/ sgemm_kernel_L2_BEGIN: // less than 2 left in N direction mov counterJ , origN tst counterJ , #3 - ble sgemm_kernel_L999 // error, N was less than 4? + ble sgemm_kernel_L999 tst counterJ , #2 ble sgemm_kernel_L1_BEGIN - mov pCRow0, pC // pCRow0 = pC - add pC , pC, LDC, lsl #1 + mov pCRow0, pC // pCRow0 = pC - mov pA, origPA // pA = A + add pC,pC,LDC, lsl #1 + + mov pA_0, origPA // pA_0 = A sgemm_kernel_L2_M4_BEGIN: mov counterI, origM - asr counterI, counterI, #2 // counterI = counterI / 4 + asr counterI, counterI, #2 // counterI = counterI / 4 cmp counterI,#0 ble sgemm_kernel_L2_M2_BEGIN @@ -997,7 +1227,7 @@ sgemm_kernel_L2_M4_20: INIT4x2 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL,#0 ble sgemm_kernel_L2_M4_40 .align 5 @@ -1019,7 +1249,7 @@ sgemm_kernel_L2_M4_22: sgemm_kernel_L2_M4_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L2_M4_100 sgemm_kernel_L2_M4_42: @@ -1045,7 +1275,7 @@ sgemm_kernel_L2_M2_BEGIN: tst counterI , #3 ble sgemm_kernel_L2_END - tst counterI, #2 // counterI = counterI / 2 + tst counterI, #2 // counterI = counterI / 2 ble sgemm_kernel_L2_M1_BEGIN sgemm_kernel_L2_M2_20: @@ -1053,7 +1283,7 @@ sgemm_kernel_L2_M2_20: INIT2x2 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL,#0 ble sgemm_kernel_L2_M2_40 @@ -1075,7 +1305,7 @@ sgemm_kernel_L2_M2_22: sgemm_kernel_L2_M2_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L2_M2_100 sgemm_kernel_L2_M2_42: @@ -1094,7 +1324,7 @@ sgemm_kernel_L2_M2_END: sgemm_kernel_L2_M1_BEGIN: - tst counterI, #1 // counterI = counterI % 2 + tst counterI, #1 // counterI = counterI % 2 ble sgemm_kernel_L2_END sgemm_kernel_L2_M1_20: @@ -1102,7 +1332,7 @@ sgemm_kernel_L2_M1_20: INIT1x2 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL, #0 ble sgemm_kernel_L2_M1_40 @@ -1123,7 +1353,7 @@ sgemm_kernel_L2_M1_22: sgemm_kernel_L2_M1_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L2_M1_100 sgemm_kernel_L2_M1_42: @@ -1139,9 +1369,9 @@ sgemm_kernel_L2_M1_100: sgemm_kernel_L2_END: - add origPB, origPB, origK, lsl #3 // B = B + K * 2 * 4 + add origPB, origPB, origK, lsl #3 // B = B + K * 2 * 4 -/*********************************************************************************************/ +/******************************************************************************/ sgemm_kernel_L1_BEGIN: @@ -1150,17 +1380,17 @@ sgemm_kernel_L1_BEGIN: ble sgemm_kernel_L999 // done - mov pCRow0, pC // pCRow0 = C - add pC , pCRow0 , LDC // C01 is the current line, update pC to point to next + mov pCRow0, pC // pCRow0 = C + add pC , pC , LDC // Update pC to point to next - mov pA, origPA // pA = A + mov pA_0, origPA // pA_0 = A sgemm_kernel_L1_M4_BEGIN: mov counterI, origM - asr counterI, counterI, #2 // counterI = counterI / 4 + asr counterI, counterI, #2 // counterI = counterI / 4 cmp counterI, #0 ble sgemm_kernel_L1_M2_BEGIN @@ -1169,7 +1399,7 @@ sgemm_kernel_L1_M4_20: INIT4x1 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL , #0 ble sgemm_kernel_L1_M4_40 .align 5 @@ -1191,7 +1421,7 @@ sgemm_kernel_L1_M4_22: sgemm_kernel_L1_M4_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L1_M4_100 sgemm_kernel_L1_M4_42: @@ -1217,7 +1447,7 @@ sgemm_kernel_L1_M2_BEGIN: tst counterI , #3 ble sgemm_kernel_L1_END - tst counterI, #2 // counterI = counterI / 2 + tst counterI, #2 // counterI = counterI / 2 ble sgemm_kernel_L1_M1_BEGIN sgemm_kernel_L1_M2_20: @@ -1225,7 +1455,7 @@ sgemm_kernel_L1_M2_20: INIT2x1 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL , #0 ble sgemm_kernel_L1_M2_40 @@ -1247,7 +1477,7 @@ sgemm_kernel_L1_M2_22: sgemm_kernel_L1_M2_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L1_M2_100 sgemm_kernel_L1_M2_42: @@ -1266,7 +1496,7 @@ sgemm_kernel_L1_M2_END: sgemm_kernel_L1_M1_BEGIN: - tst counterI, #1 // counterI = counterI % 2 + tst counterI, #1 // counterI = counterI % 2 ble sgemm_kernel_L1_END sgemm_kernel_L1_M1_20: @@ -1274,7 +1504,7 @@ sgemm_kernel_L1_M1_20: INIT1x1 mov pB, origPB - asr counterL , origK, #3 // counterL = counterL / 8 + asr counterL , origK, #3 // counterL = counterL / 8 cmp counterL , #0 ble sgemm_kernel_L1_M1_40 @@ -1295,7 +1525,7 @@ sgemm_kernel_L1_M1_22: sgemm_kernel_L1_M1_40: - ands counterL , origK, #7 // counterL = counterL % 8 + ands counterL , origK, #7 // counterL = counterL % 8 ble sgemm_kernel_L1_M1_100 sgemm_kernel_L1_M1_42: @@ -1314,13 +1544,19 @@ sgemm_kernel_L1_END: sgemm_kernel_L999: - mov x0, #0 // set return value - ldp d8,d9,[sp,#(0*16)] - ldp d10,d11,[sp,#(1*16)] - ldp d12,d13,[sp,#(2*16)] - ldp d14,d15,[sp,#(3*16)] - ldp d16,d17,[sp,#(4*16)] - add sp,sp,#(5*16) + mov x0, #0 // set return value + ldp d8, d9, [sp, #(0 * 16)] + ldp d10, d11, [sp, #(1 * 16)] + ldp d12, d13, [sp, #(2 * 16)] + ldp d14, d15, [sp, #(3 * 16)] + ldp d16, d17, [sp, #(4 * 16)] + ldp x18, x19, [sp, #(5 * 16)] + ldp x20, x21, [sp, #(6 * 16)] + ldp x22, x23, [sp, #(7 * 16)] + ldp x24, x25, [sp, #(8 * 16)] + ldp x26, x27, [sp, #(9 * 16)] + ldr x28, [sp, #(10 * 16)] + add sp, sp, #(11*16) ret EPILOGUE