OpenBLAS/kernel/arm64/dgemm_kernel_4x4.S

1339 lines
24 KiB
ArmAsm

/*******************************************************************************
Copyright (c) 2015, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define ASSEMBLER
#include "common.h"
/* X0 X1 X2 s0 X3 x4 x5 x6 */
/*int CNAME(BLASLONG bm,BLASLONG bn,BLASLONG bk,FLOAT alpha0,FLOAT* ba,FLOAT* bb,FLOAT* C,BLASLONG ldc */
#define origM x0
#define origN x1
#define origK x2
#define origPA x3
#define origPB x4
#define pC x5
#define LDC x6
#define temp x7
#define counterL x8
#define counterI x9
#define counterJ x10
#define pB x11
#define pCRow0 x12
#define pCRow1 x13
#define pCRow2 x14
#define pA x15
#define ppC x16
#define ppCRow0 x17
#define ppCRow1 x18
#define ppCRow2 x19
#define ppA x20
#define alpha0 d10
#define alphaV0 v10.d[0]
#define alpha1 d11
#define alphaV1 v11.d[0]
#define alpha2 d14
#define alphaV2 v14.d[0]
#define alpha3 d15
#define alphaV3 v15.d[0]
// 00 origM
// 01 origN
// 02 origK
// 03 origPA
// 04 origPB
// 05 pC
// 06 origLDC -> LDC
// 07 offset -> temp
// 08 counterL
// 09 counterI
// 10 counterJ
// 11 pB
// 12 pCRow0
// 13 pCRow1
// 14 pCRow2
// 15 pA
// 16 ppC
// 17 ppCRow0
// 18 must save ppCRow1
// 19 must save ppCRow2
// 20 must save ppA
// 21 must save
// 22 must save
// 23 must save
// 24 must save
// 25 must save
// 26 must save
// 27 must save
// 28 must save
// 29 frame
// 30 link
// 31 sp
//v00 ALPHA -> pA00, pA01
//v01 pA02, pA03
//v02 ppA00, ppA01
//v03 ppA02, ppA03
//v04 pA10, pA11
//v05 pA12, pA13
//v06 ppA10, ppA11
//v07 ppA12, ppA13
//v08 must save pB00, pB01
//v09 must save pB02, pB03
//v10 must save ALPHA0
//v11 must save ALPHA1
//v12 must save pB10, pB11
//v13 must save pB12, pB13
//v14 must save ALPHA2
//v15 must save ALPHA3
//v16 must save C00, C01
//v17 must save C02, C03
//v18 ppC00, ppC01
//v19 ppC02, ppC03
//v20 C10, C11
//v21 C12, C13
//v22 ppC10, ppC11
//v23 ppC12, ppC13
//v24 C20, C21
//v25 C22, C23
//v26 ppC20, ppC21
//v27 ppC22, ppC23
//v28 C30, C31
//v29 C32, C33
//v30 ppC30, ppC31
//v31 ppC32, ppC33
/*******************************************************************************
* Macro definitions
*******************************************************************************/
.macro INIT8x4
fmov d16, xzr
fmov d17, d16
fmov d18, d17
fmov d19, d16
fmov d20, d17
fmov d21, d16
fmov d22, d17
fmov d23, d16
fmov d24, d17
fmov d25, d16
fmov d26, d17
fmov d27, d16
fmov d28, d17
fmov d29, d16
fmov d30, d17
fmov d31, d16
.endm
.macro KERNEL8x4_I
ld1 {v8.2d, v9.2d}, [pB]
add pB, pB, #32
ld1 {v0.2d, v1.2d}, [pA]
add pA, pA, #32
fmul v16.2d, v0.2d, v8.2d[0]
fmul v29.2d, v1.2d, v9.2d[1]
ld1 {v2.2d, v3.2d}, [ppA]
add ppA, ppA, #32
fmul v20.2d, v0.2d, v8.2d[1]
fmul v25.2d, v1.2d, v9.2d[0]
fmul v18.2d, v2.2d, v8.2d[0]
fmul v31.2d, v3.2d, v9.2d[1]
fmul v22.2d, v2.2d, v8.2d[1]
fmul v27.2d, v3.2d, v9.2d[0]
ld1 {v12.2d, v13.2d}, [pB] // for next round
add pB, pB, #32
fmul v24.2d, v0.2d, v9.2d[0]
fmul v21.2d, v1.2d, v8.2d[1]
ld1 {v4.2d, v5.2d} , [pA] // for next round
add pA, pA, #32
fmul v26.2d, v2.2d, v9.2d[0]
fmul v23.2d, v3.2d, v8.2d[1]
ld1 {v6.2d, v7.2d} , [ppA] // for next round
add ppA, ppA, #32
fmul v28.2d, v0.2d, v9.2d[1]
fmul v17.2d, v1.2d, v8.2d[0]
fmul v30.2d, v2.2d, v9.2d[1]
fmul v19.2d, v3.2d, v8.2d[0]
.endm
.macro KERNEL8x4_M2
fmla v16.2d, v4.2d, v12.2d[0]
fmla v29.2d, v5.2d, v13.2d[1]
ld1 {v8.2d, v9.2d}, [pB]
add pB, pB, #32
fmla v18.2d, v6.2d, v12.2d[0]
fmla v31.2d, v7.2d, v13.2d[1]
fmla v20.2d, v4.2d, v12.2d[1]
fmla v25.2d, v5.2d, v13.2d[0]
prfm PLDL1KEEP, [pB, #512]
fmla v22.2d, v6.2d, v12.2d[1]
fmla v27.2d, v7.2d, v13.2d[0]
fmla v24.2d, v4.2d, v13.2d[0]
fmla v21.2d, v5.2d, v12.2d[1]
ld1 {v0.2d, v1.2d}, [pA]
add pA, pA, #32
fmla v26.2d, v6.2d, v13.2d[0]
fmla v23.2d, v7.2d, v12.2d[1]
fmla v28.2d, v4.2d, v13.2d[1]
fmla v17.2d, v5.2d, v12.2d[0]
ld1 {v2.2d, v3.2d}, [ppA]
add ppA, ppA, #32
fmla v30.2d, v6.2d, v13.2d[1]
fmla v19.2d, v7.2d, v12.2d[0]
.endm
.macro KERNEL8x4_M1
fmla v16.2d, v0.2d, v8.2d[0]
fmla v29.2d, v1.2d, v9.2d[1]
ld1 {v12.2d, v13.2d}, [pB] // for next round
add pB, pB, #32
fmla v18.2d, v2.2d, v8.2d[0]
fmla v31.2d, v3.2d, v9.2d[1]
fmla v20.2d, v0.2d, v8.2d[1]
fmla v25.2d, v1.2d, v9.2d[0]
prfm PLDL1KEEP, [pA, #512]
fmla v22.2d, v2.2d, v8.2d[1]
fmla v27.2d, v3.2d, v9.2d[0]
prfm PLDL1KEEP, [ppA, #512]
fmla v24.2d, v0.2d, v9.2d[0]
fmla v21.2d, v1.2d, v8.2d[1]
ld1 {v4.2d, v5.2d} , [pA] // for next round
add pA, pA, #32
fmla v26.2d, v2.2d, v9.2d[0]
fmla v23.2d, v3.2d, v8.2d[1]
fmla v28.2d, v0.2d, v9.2d[1]
fmla v17.2d, v1.2d, v8.2d[0]
ld1 {v6.2d, v7.2d} , [ppA] // for next round
add ppA, ppA, #32
fmla v30.2d, v2.2d, v9.2d[1]
fmla v19.2d, v3.2d, v8.2d[0]
.endm
.macro KERNEL8x4_E
fmla v16.2d, v4.2d, v12.2d[0]
fmla v25.2d, v5.2d, v13.2d[0]
fmla v18.2d, v6.2d, v12.2d[0]
fmla v27.2d, v7.2d, v13.2d[0]
fmla v20.2d, v4.2d, v12.2d[1]
fmla v29.2d, v5.2d, v13.2d[1]
fmla v22.2d, v6.2d, v12.2d[1]
fmla v31.2d, v7.2d, v13.2d[1]
fmla v24.2d, v4.2d, v13.2d[0]
fmla v17.2d, v5.2d, v12.2d[0]
fmla v26.2d, v6.2d, v13.2d[0]
fmla v19.2d, v7.2d, v12.2d[0]
fmla v28.2d, v4.2d, v13.2d[1]
fmla v21.2d, v5.2d, v12.2d[1]
fmla v30.2d, v6.2d, v13.2d[1]
fmla v23.2d, v7.2d, v12.2d[1]
.endm
.macro KERNEL8x4_SUB
ld1 {v8.2d, v9.2d}, [pB]
add pB, pB, #32
ld1 {v0.2d, v1.2d}, [pA]
add pA, pA, #32
fmla v16.2d, v0.2d, v8.2d[0]
fmla v29.2d, v1.2d, v9.2d[1]
fmla v20.2d, v0.2d, v8.2d[1]
fmla v25.2d, v1.2d, v9.2d[0]
ld1 {v2.2d, v3.2d}, [ppA]
add ppA, ppA, #32
fmla v24.2d, v0.2d, v9.2d[0]
fmla v21.2d, v1.2d, v8.2d[1]
fmla v28.2d, v0.2d, v9.2d[1]
fmla v17.2d, v1.2d, v8.2d[0]
fmla v18.2d, v2.2d, v8.2d[0]
fmla v31.2d, v3.2d, v9.2d[1]
fmla v22.2d, v2.2d, v8.2d[1]
fmla v27.2d, v3.2d, v9.2d[0]
fmla v26.2d, v2.2d, v9.2d[0]
fmla v23.2d, v3.2d, v8.2d[1]
fmla v30.2d, v2.2d, v9.2d[1]
fmla v19.2d, v3.2d, v8.2d[0]
.endm
.macro SAVE8x4
add ppCRow0, pCRow0, #32
ld1 {v0.2d, v1.2d}, [pCRow0]
fmla v0.2d, v16.2d, alphaV0
fmla v1.2d, v17.2d, alphaV1
st1 {v0.2d, v1.2d}, [pCRow0]
ld1 {v2.2d, v3.2d}, [ppCRow0]
fmla v2.2d, v18.2d, alphaV2
fmla v3.2d, v19.2d, alphaV3
st1 {v2.2d, v3.2d}, [ppCRow0]
add pCRow1, pCRow0, LDC
add ppCRow1, ppCRow0, LDC
ld1 {v4.2d, v5.2d}, [pCRow1]
fmla v4.2d, v20.2d, alphaV0
fmla v5.2d, v21.2d, alphaV1
st1 {v4.2d, v5.2d}, [pCRow1]
ld1 {v6.2d, v7.2d}, [ppCRow1]
fmla v6.2d, v22.2d, alphaV2
fmla v7.2d, v23.2d, alphaV3
st1 {v6.2d, v7.2d}, [ppCRow1]
add pCRow2, pCRow1, LDC
add ppCRow2, ppCRow1, LDC
ld1 {v0.2d, v1.2d}, [pCRow2]
fmla v0.2d, v24.2d, alphaV0
fmla v1.2d, v25.2d, alphaV1
st1 {v0.2d, v1.2d}, [pCRow2]
ld1 {v2.2d, v3.2d}, [ppCRow2]
fmla v2.2d, v26.2d, alphaV2
fmla v3.2d, v27.2d, alphaV3
st1 {v2.2d, v3.2d}, [ppCRow2]
add pCRow1, pCRow2, LDC
add ppCRow1, ppCRow2, LDC
ld1 {v4.2d, v5.2d}, [pCRow1]
fmla v4.2d, v28.2d, alphaV0
fmla v5.2d, v29.2d, alphaV1
st1 {v4.2d, v5.2d}, [pCRow1]
ld1 {v6.2d, v7.2d}, [ppCRow1]
fmla v6.2d, v30.2d, alphaV2
fmla v7.2d, v31.2d, alphaV3
st1 {v6.2d, v7.2d}, [ppCRow1]
add pCRow0, pCRow0, #64
.endm
/******************************************************************************/
.macro INIT4x4
fmov d16, xzr
fmov d17, d16
fmov d20, d17
fmov d21, d16
fmov d24, d17
fmov d25, d16
fmov d28, d17
fmov d29, d16
.endm
.macro KERNEL4x4_SUB
ld1 {v8.2d, v9.2d}, [pB]
add pB, pB, #32
ld1 {v0.2d, v1.2d}, [pA]
add pA, pA, #32
fmla v16.2d, v0.2d, v8.2d[0]
fmla v29.2d, v1.2d, v9.2d[1]
fmla v20.2d, v0.2d, v8.2d[1]
fmla v25.2d, v1.2d, v9.2d[0]
fmla v24.2d, v0.2d, v9.2d[0]
fmla v21.2d, v1.2d, v8.2d[1]
fmla v28.2d, v0.2d, v9.2d[1]
fmla v17.2d, v1.2d, v8.2d[0]
.endm
.macro SAVE4x4
ld1 {v8.2d, v9.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
fmla v9.2d, v17.2d, alphaV1
st1 {v8.2d, v9.2d}, [pCRow0]
add pCRow1, pCRow0, LDC
ld1 {v12.2d, v13.2d}, [pCRow1]
fmla v12.2d, v20.2d, alphaV2
fmla v13.2d, v21.2d, alphaV3
st1 {v12.2d, v13.2d}, [pCRow1]
add pCRow2, pCRow1, LDC
ld1 {v8.2d, v9.2d}, [pCRow2]
fmla v8.2d, v24.2d, alphaV0
fmla v9.2d, v25.2d, alphaV1
st1 {v8.2d, v9.2d}, [pCRow2]
add pCRow1, pCRow2, LDC
ld1 {v12.2d, v13.2d}, [pCRow1]
fmla v12.2d, v28.2d, alphaV2
fmla v13.2d, v29.2d, alphaV3
st1 {v12.2d, v13.2d}, [pCRow1]
add pCRow0, pCRow0, #32
.endm
/******************************************************************************/
.macro INIT2x4
fmov d16, xzr
fmov d20, d16
fmov d24, d20
fmov d28, d16
.endm
.macro KERNEL2x4_SUB
ld1 {v8.2d, v9.2d}, [pB]
add pB, pB, #32
ld1 {v0.2d}, [pA]
add pA, pA, #16
fmla v16.2d, v0.2d, v8.2d[0]
fmla v20.2d, v0.2d, v8.2d[1]
fmla v24.2d, v0.2d, v9.2d[0]
fmla v28.2d, v0.2d, v9.2d[1]
.endm
.macro SAVE2x4
ld1 {v8.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.2d}, [pCRow0]
add pCRow1, pCRow0, LDC
ld1 {v12.2d}, [pCRow1]
fmla v12.2d, v20.2d, alphaV1
st1 {v12.2d}, [pCRow1]
add pCRow2, pCRow1, LDC
ld1 {v8.2d}, [pCRow2]
fmla v8.2d, v24.2d, alphaV2
st1 {v8.2d}, [pCRow2]
add pCRow1, pCRow2, LDC
ld1 {v12.2d}, [pCRow1]
fmla v12.2d, v28.2d, alphaV3
st1 {v12.2d}, [pCRow1]
add pCRow0, pCRow0, #16
.endm
/******************************************************************************/
.macro INIT1x4
fmov d16, xzr
fmov d20, d16
.endm
.macro KERNEL1x4_SUB
ldr d0, [pA]
add pA, pA, #8
ld1 {v8.2d, v9.2d}, [pB]
add pB, pB, #32
fmla v16.2d, v8.2d, v0.d[0]
fmla v20.2d, v9.2d, v0.d[0]
.endm
.macro SAVE1x4
add pCRow1, pCRow0, LDC
ld1 {v8.d}[0], [pCRow0]
ld1 {v8.d}[1], [pCRow1]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.d}[0], [pCRow0]
st1 {v8.d}[1], [pCRow1]
add pCRow2, pCRow1, LDC
add pCRow1, pCRow2, LDC
ld1 {v12.d}[0], [pCRow2]
ld1 {v12.d}[1], [pCRow1]
fmla v12.2d, v20.2d, alphaV1
st1 {v12.d}[0], [pCRow2]
st1 {v12.d}[1], [pCRow1]
add pCRow0, pCRow0, #8
.endm
/******************************************************************************/
.macro INIT4x2
fmov d16, xzr
fmov d17, d16
fmov d20, d17
fmov d21, d16
.endm
.macro KERNEL4x2_SUB
ld1 {v8.2d}, [pB]
add pB, pB, #16
ld1 {v0.2d, v1.2d}, [pA]
add pA, pA, #32
fmla v16.2d, v0.2d, v8.2d[0]
fmla v17.2d, v1.2d, v8.2d[0]
fmla v20.2d, v0.2d, v8.2d[1]
fmla v21.2d, v1.2d, v8.2d[1]
.endm
.macro SAVE4x2
ld1 {v8.2d, v9.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
fmla v9.2d, v17.2d, alphaV1
st1 {v8.2d, v9.2d}, [pCRow0]
add pCRow1, pCRow0, LDC
ld1 {v12.2d, v13.2d}, [pCRow1]
fmla v12.2d, v20.2d, alphaV2
fmla v13.2d, v21.2d, alphaV3
st1 {v12.2d, v13.2d}, [pCRow1]
add pCRow0, pCRow0, #32
.endm
/******************************************************************************/
.macro INIT2x2
fmov d16, xzr
fmov d20, d16
.endm
.macro KERNEL2x2_SUB
ld1 {v8.2d}, [pB]
add pB, pB, #16
ld1 {v0.2d}, [pA]
add pA, pA, #16
fmla v16.2d, v0.2d, v8.2d[0]
fmla v20.2d, v0.2d, v8.2d[1]
.endm
.macro SAVE2x2
ld1 {v8.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.2d}, [pCRow0]
add pCRow1 , pCRow0, LDC
ld1 {v12.2d}, [pCRow1]
fmla v12.2d, v20.2d, alphaV1
st1 {v12.2d}, [pCRow1]
add pCRow0, pCRow0, #16
.endm
/******************************************************************************/
.macro INIT1x2
fmov d16, xzr
.endm
.macro KERNEL1x2_SUB
ld1 {v8.2d} , [pB]
add pB , pB, #16
ldr d0 , [pA]
add pA, pA, #8
fmla v16.2d, v8.2d, v0.2d[0]
.endm
.macro SAVE1x2
add pCRow1 , pCRow0, LDC
ld1 {v8.d}[0], [pCRow0]
ld1 {v8.d}[1], [pCRow1]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.d}[0], [pCRow0]
st1 {v8.d}[1], [pCRow1]
add pCRow0, pCRow0, #8
.endm
/******************************************************************************/
.macro INIT4x1
fmov d16, xzr
fmov d17, d16
.endm
.macro KERNEL4x1_SUB
ldr d8, [pB]
add pB , pB, #8
ld1 {v0.2d, v1.2d}, [pA]
add pA , pA, #32
fmla v16.2d, v0.2d, v8.2d[0]
fmla v17.2d, v1.2d, v8.2d[0]
.endm
.macro SAVE4x1
ld1 {v8.2d, v9.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
fmla v9.2d, v17.2d, alphaV1
st1 {v8.2d, v9.2d}, [pCRow0]
add pCRow0, pCRow0, #32
.endm
/******************************************************************************/
.macro INIT2x1
fmov d16, xzr
.endm
.macro KERNEL2x1_SUB
ldr d8, [pB]
add pB , pB, #8
ld1 {v0.2d}, [pA]
add pA , pA, #16
fmla v16.2d, v0.2d, v8.2d[0]
.endm
.macro SAVE2x1
ld1 {v8.2d}, [pCRow0]
fmla v8.2d, v16.2d, alphaV0
st1 {v8.2d}, [pCRow0]
add pCRow0, pCRow0, #16
.endm
/******************************************************************************/
.macro INIT1x1
fmov d16, xzr
.endm
.macro KERNEL1x1_SUB
ldr d8, [pB]
add pB , pB, #8
ldr d0, [pA]
add pA , pA, #8
fmadd d16, d0, d8, d16
.endm
.macro SAVE1x1
ldr d8, [pCRow0]
fmadd d8, d16, alpha0, d8
str d8, [pCRow0]
add pCRow0, pCRow0, #8
.endm
/*******************************************************************************
* End of macro definitions
*******************************************************************************/
PROLOGUE
.align 5
add sp, sp, #-(11 * 16)
stp d8, d9, [sp, #(0 * 16)]
stp d10, d11, [sp, #(1 * 16)]
stp d12, d13, [sp, #(2 * 16)]
stp d14, d15, [sp, #(3 * 16)]
stp d16, d17, [sp, #(4 * 16)]
stp x18, x19, [sp, #(5 * 16)]
stp x20, x21, [sp, #(6 * 16)]
stp x22, x23, [sp, #(7 * 16)]
stp x24, x25, [sp, #(8 * 16)]
stp x26, x27, [sp, #(9 * 16)]
str x28, [sp, #(10 * 16)]
fmov alpha0, d0
fmov alpha1, d0
fmov alpha2, d0
fmov alpha3, d0
lsl LDC, LDC, #3 // ldc = ldc * 8
mov pB, origPB
mov counterJ, origN
asr counterJ, counterJ, #2 // J = J / 4
cmp counterJ, #0
ble dgemm_kernel_L2_BEGIN
dgemm_kernel_L4_BEGIN:
mov pCRow0, pC // pCRow0 = C
add pC, pC, LDC, lsl #2
lsl temp, origK, #5 // k * 4 * 8
mov pA, origPA // pA = start of A array
add ppA, temp, pA
//------------------------------------------------------------------------------
dgemm_kernel_L4_M8_BEGIN:
mov counterI, origM
asr counterI, counterI, #3 // counterI = counterI / 8
cmp counterI, #0
ble dgemm_kernel_L4_M4_BEGIN
dgemm_kernel_L4_M8_20:
mov pB, origPB
asr counterL , origK, #1 // L = K / 2
cmp counterL , #2 // is there at least 4 to do?
blt dgemm_kernel_L4_M8_32
KERNEL8x4_I // do one in the K
KERNEL8x4_M2 // do another in the K
subs counterL, counterL, #2 // subtract 2
ble dgemm_kernel_L4_M8_22a
.align 5
dgemm_kernel_L4_M8_22:
KERNEL8x4_M1
KERNEL8x4_M2
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M8_22
dgemm_kernel_L4_M8_22a:
KERNEL8x4_M1
KERNEL8x4_E
b dgemm_kernel_L4_M8_44
dgemm_kernel_L4_M8_32:
tst counterL, #1
ble dgemm_kernel_L4_M8_40
KERNEL8x4_I
KERNEL8x4_E
b dgemm_kernel_L4_M8_44
dgemm_kernel_L4_M8_40:
INIT8x4
dgemm_kernel_L4_M8_44:
ands counterL , origK, #1
ble dgemm_kernel_L4_M8_100
dgemm_kernel_L4_M8_46:
KERNEL8x4_SUB
dgemm_kernel_L4_M8_100:
SAVE8x4
dgemm_kernel_L4_M8_END:
lsl temp, origK, #5 // k * 4 * 8
add pA, pA, temp
add ppA, ppA, temp
subs counterI, counterI, #1
bne dgemm_kernel_L4_M8_20
dgemm_kernel_L4_M4_BEGIN:
mov counterI, origM
tst counterI , #7
ble dgemm_kernel_L4_END
tst counterI, #4
ble dgemm_kernel_L4_M2_BEGIN
dgemm_kernel_L4_M4_20:
INIT4x4
mov pB, origPB
asr counterL, origK, #3 // counterL = counterL / 8
cmp counterL, #0
ble dgemm_kernel_L4_M4_40
dgemm_kernel_L4_M4_22:
KERNEL4x4_SUB
KERNEL4x4_SUB
KERNEL4x4_SUB
KERNEL4x4_SUB
KERNEL4x4_SUB
KERNEL4x4_SUB
KERNEL4x4_SUB
KERNEL4x4_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M4_22
dgemm_kernel_L4_M4_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L4_M4_100
dgemm_kernel_L4_M4_42:
KERNEL4x4_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M4_42
dgemm_kernel_L4_M4_100:
SAVE4x4
dgemm_kernel_L4_M4_END:
dgemm_kernel_L4_M2_BEGIN:
mov counterI, origM
tst counterI , #3
ble dgemm_kernel_L4_END
tst counterI, #2 // counterI = counterI / 2
ble dgemm_kernel_L4_M1_BEGIN
dgemm_kernel_L4_M2_20:
INIT2x4
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL , #0
ble dgemm_kernel_L4_M2_40
dgemm_kernel_L4_M2_22:
KERNEL2x4_SUB
KERNEL2x4_SUB
KERNEL2x4_SUB
KERNEL2x4_SUB
KERNEL2x4_SUB
KERNEL2x4_SUB
KERNEL2x4_SUB
KERNEL2x4_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M2_22
dgemm_kernel_L4_M2_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L4_M2_100
dgemm_kernel_L4_M2_42:
KERNEL2x4_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M2_42
dgemm_kernel_L4_M2_100:
SAVE2x4
dgemm_kernel_L4_M2_END:
dgemm_kernel_L4_M1_BEGIN:
tst counterI, #1 // counterI = counterI % 2
ble dgemm_kernel_L4_END
dgemm_kernel_L4_M1_20:
INIT1x4
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL , #0
ble dgemm_kernel_L4_M1_40
dgemm_kernel_L4_M1_22:
KERNEL1x4_SUB
KERNEL1x4_SUB
KERNEL1x4_SUB
KERNEL1x4_SUB
KERNEL1x4_SUB
KERNEL1x4_SUB
KERNEL1x4_SUB
KERNEL1x4_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M1_22
dgemm_kernel_L4_M1_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L4_M1_100
dgemm_kernel_L4_M1_42:
KERNEL1x4_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L4_M1_42
dgemm_kernel_L4_M1_100:
SAVE1x4
dgemm_kernel_L4_END:
lsl temp, origK, #5
add origPB, origPB, temp // B = B + K * 4 * 8
subs counterJ, counterJ , #1 // j--
bgt dgemm_kernel_L4_BEGIN
/******************************************************************************/
dgemm_kernel_L2_BEGIN: // less than 2 left in N direction
mov counterJ , origN
tst counterJ , #3
ble dgemm_kernel_L999 // error, N was less than 4?
tst counterJ , #2
ble dgemm_kernel_L1_BEGIN
mov pCRow0, pC // pCRow0 = pC
add pC,pC,LDC, lsl #1
mov pA, origPA // pA = A
dgemm_kernel_L2_M4_BEGIN:
mov counterI, origM
asr counterI, counterI, #2 // counterI = counterI / 4
cmp counterI,#0
ble dgemm_kernel_L2_M2_BEGIN
dgemm_kernel_L2_M4_20:
INIT4x2
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL,#0
ble dgemm_kernel_L2_M4_40
.align 5
dgemm_kernel_L2_M4_22:
KERNEL4x2_SUB
KERNEL4x2_SUB
KERNEL4x2_SUB
KERNEL4x2_SUB
KERNEL4x2_SUB
KERNEL4x2_SUB
KERNEL4x2_SUB
KERNEL4x2_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L2_M4_22
dgemm_kernel_L2_M4_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L2_M4_100
dgemm_kernel_L2_M4_42:
KERNEL4x2_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L2_M4_42
dgemm_kernel_L2_M4_100:
SAVE4x2
dgemm_kernel_L2_M4_END:
subs counterI, counterI, #1
bgt dgemm_kernel_L2_M4_20
dgemm_kernel_L2_M2_BEGIN:
mov counterI, origM
tst counterI , #3
ble dgemm_kernel_L2_END
tst counterI, #2 // counterI = counterI / 2
ble dgemm_kernel_L2_M1_BEGIN
dgemm_kernel_L2_M2_20:
INIT2x2
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL,#0
ble dgemm_kernel_L2_M2_40
dgemm_kernel_L2_M2_22:
KERNEL2x2_SUB
KERNEL2x2_SUB
KERNEL2x2_SUB
KERNEL2x2_SUB
KERNEL2x2_SUB
KERNEL2x2_SUB
KERNEL2x2_SUB
KERNEL2x2_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L2_M2_22
dgemm_kernel_L2_M2_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L2_M2_100
dgemm_kernel_L2_M2_42:
KERNEL2x2_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L2_M2_42
dgemm_kernel_L2_M2_100:
SAVE2x2
dgemm_kernel_L2_M2_END:
dgemm_kernel_L2_M1_BEGIN:
tst counterI, #1 // counterI = counterI % 2
ble dgemm_kernel_L2_END
dgemm_kernel_L2_M1_20:
INIT1x2
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL, #0
ble dgemm_kernel_L2_M1_40
dgemm_kernel_L2_M1_22:
KERNEL1x2_SUB
KERNEL1x2_SUB
KERNEL1x2_SUB
KERNEL1x2_SUB
KERNEL1x2_SUB
KERNEL1x2_SUB
KERNEL1x2_SUB
KERNEL1x2_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L2_M1_22
dgemm_kernel_L2_M1_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L2_M1_100
dgemm_kernel_L2_M1_42:
KERNEL1x2_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L2_M1_42
dgemm_kernel_L2_M1_100:
SAVE1x2
dgemm_kernel_L2_END:
add origPB, origPB, origK, lsl #4 // B = B + K * 2 * 8
/******************************************************************************/
dgemm_kernel_L1_BEGIN:
mov counterJ , origN
tst counterJ , #1
ble dgemm_kernel_L999 // done
mov pCRow0, pC // pCRow0 = C
add pC , pC , LDC // update pC to point to next
mov pA, origPA // pA = A
dgemm_kernel_L1_M4_BEGIN:
mov counterI, origM
asr counterI, counterI, #2 // counterI = counterI / 4
cmp counterI, #0
ble dgemm_kernel_L1_M2_BEGIN
dgemm_kernel_L1_M4_20:
INIT4x1
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL , #0
ble dgemm_kernel_L1_M4_40
.align 5
dgemm_kernel_L1_M4_22:
KERNEL4x1_SUB
KERNEL4x1_SUB
KERNEL4x1_SUB
KERNEL4x1_SUB
KERNEL4x1_SUB
KERNEL4x1_SUB
KERNEL4x1_SUB
KERNEL4x1_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L1_M4_22
dgemm_kernel_L1_M4_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L1_M4_100
dgemm_kernel_L1_M4_42:
KERNEL4x1_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L1_M4_42
dgemm_kernel_L1_M4_100:
SAVE4x1
dgemm_kernel_L1_M4_END:
subs counterI, counterI, #1
bgt dgemm_kernel_L1_M4_20
dgemm_kernel_L1_M2_BEGIN:
mov counterI, origM
tst counterI , #3
ble dgemm_kernel_L1_END
tst counterI, #2 // counterI = counterI / 2
ble dgemm_kernel_L1_M1_BEGIN
dgemm_kernel_L1_M2_20:
INIT2x1
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL , #0
ble dgemm_kernel_L1_M2_40
dgemm_kernel_L1_M2_22:
KERNEL2x1_SUB
KERNEL2x1_SUB
KERNEL2x1_SUB
KERNEL2x1_SUB
KERNEL2x1_SUB
KERNEL2x1_SUB
KERNEL2x1_SUB
KERNEL2x1_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L1_M2_22
dgemm_kernel_L1_M2_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L1_M2_100
dgemm_kernel_L1_M2_42:
KERNEL2x1_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L1_M2_42
dgemm_kernel_L1_M2_100:
SAVE2x1
dgemm_kernel_L1_M2_END:
dgemm_kernel_L1_M1_BEGIN:
tst counterI, #1 // counterI = counterI % 2
ble dgemm_kernel_L1_END
dgemm_kernel_L1_M1_20:
INIT1x1
mov pB, origPB
asr counterL , origK, #3 // counterL = counterL / 8
cmp counterL , #0
ble dgemm_kernel_L1_M1_40
dgemm_kernel_L1_M1_22:
KERNEL1x1_SUB
KERNEL1x1_SUB
KERNEL1x1_SUB
KERNEL1x1_SUB
KERNEL1x1_SUB
KERNEL1x1_SUB
KERNEL1x1_SUB
KERNEL1x1_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L1_M1_22
dgemm_kernel_L1_M1_40:
ands counterL , origK, #7 // counterL = counterL % 8
ble dgemm_kernel_L1_M1_100
dgemm_kernel_L1_M1_42:
KERNEL1x1_SUB
subs counterL, counterL, #1
bgt dgemm_kernel_L1_M1_42
dgemm_kernel_L1_M1_100:
SAVE1x1
dgemm_kernel_L1_END:
dgemm_kernel_L999:
mov x0, #0 // set return value
ldp d8, d9, [sp, #(0 * 16)]
ldp d10, d11, [sp, #(1 * 16)]
ldp d12, d13, [sp, #(2 * 16)]
ldp d14, d15, [sp, #(3 * 16)]
ldp d16, d17, [sp, #(4 * 16)]
ldp x18, x19, [sp, #(5 * 16)]
ldp x20, x21, [sp, #(6 * 16)]
ldp x22, x23, [sp, #(7 * 16)]
ldp x24, x25, [sp, #(8 * 16)]
ldp x26, x27, [sp, #(9 * 16)]
ldr x28, [sp, #(10 * 16)]
add sp, sp, #(11*16)
ret
EPILOGUE