Merge pull request #3793 from imzhuhl/new_sbgemm
New sbgemm implementation for Neoverse N2
This commit is contained in:
commit
03bd1157d8
|
@ -50,6 +50,7 @@ typedef struct {
|
||||||
#ifdef BUILD_BFLOAT16
|
#ifdef BUILD_BFLOAT16
|
||||||
int sbgemm_p, sbgemm_q, sbgemm_r;
|
int sbgemm_p, sbgemm_q, sbgemm_r;
|
||||||
int sbgemm_unroll_m, sbgemm_unroll_n, sbgemm_unroll_mn;
|
int sbgemm_unroll_m, sbgemm_unroll_n, sbgemm_unroll_mn;
|
||||||
|
int sbgemm_align_k;
|
||||||
|
|
||||||
void (*sbstobf16_k) (BLASLONG, float *, BLASLONG, bfloat16 *, BLASLONG);
|
void (*sbstobf16_k) (BLASLONG, float *, BLASLONG, bfloat16 *, BLASLONG);
|
||||||
void (*sbdtobf16_k) (BLASLONG, double *, BLASLONG, bfloat16 *, BLASLONG);
|
void (*sbdtobf16_k) (BLASLONG, double *, BLASLONG, bfloat16 *, BLASLONG);
|
||||||
|
|
|
@ -304,6 +304,15 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
|
||||||
while (gemm_p * min_l > l2size) gemm_p -= GEMM_UNROLL_M;
|
while (gemm_p * min_l > l2size) gemm_p -= GEMM_UNROLL_M;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BLASLONG pad_min_l = min_l;
|
||||||
|
#if defined(HALF)
|
||||||
|
#if defined(DYNAMIC_ARCH)
|
||||||
|
pad_min_l = (min_l + gotoblas->sbgemm_align_k - 1) & ~(gotoblas->sbgemm_align_k-1);
|
||||||
|
#else
|
||||||
|
pad_min_l = (min_l + SBGEMM_ALIGN_K - 1) & ~(SBGEMM_ALIGN_K - 1);;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
/* First, we have to move data A to L2 cache */
|
/* First, we have to move data A to L2 cache */
|
||||||
min_i = m_to - m_from;
|
min_i = m_to - m_from;
|
||||||
l1stride = 1;
|
l1stride = 1;
|
||||||
|
@ -350,7 +359,7 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
|
||||||
START_RPCC();
|
START_RPCC();
|
||||||
|
|
||||||
OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
|
OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
|
||||||
sb + min_l * (jjs - js) * COMPSIZE * l1stride);
|
sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride);
|
||||||
|
|
||||||
STOP_RPCC(outercost);
|
STOP_RPCC(outercost);
|
||||||
|
|
||||||
|
@ -358,10 +367,10 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
|
||||||
|
|
||||||
#if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
|
#if !defined(XDOUBLE) || !defined(QUAD_PRECISION)
|
||||||
KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
|
KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
|
||||||
sa, sb + min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
|
sa, sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
|
||||||
#else
|
#else
|
||||||
KERNEL_OPERATION(min_i, min_jj, min_l, (void *)&xalpha,
|
KERNEL_OPERATION(min_i, min_jj, min_l, (void *)&xalpha,
|
||||||
sa, sb + min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
|
sa, sb + pad_min_l * (jjs - js) * COMPSIZE * l1stride, c, ldc, m_from, jjs);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
STOP_RPCC(kernelcost);
|
STOP_RPCC(kernelcost);
|
||||||
|
|
|
@ -325,6 +325,16 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
|
||||||
if (min_l > GEMM_Q) min_l = (min_l + 1) / 2;
|
if (min_l > GEMM_Q) min_l = (min_l + 1) / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BLASLONG pad_min_l = min_l;
|
||||||
|
|
||||||
|
#if defined(HALF)
|
||||||
|
#if defined(DYNAMIC_ARCH)
|
||||||
|
pad_min_l = (min_l + gotoblas->sbgemm_align_k - 1) & ~(gotoblas->sbgemm_align_k-1);
|
||||||
|
#else
|
||||||
|
pad_min_l = (min_l + SBGEMM_ALIGN_K - 1) & ~(SBGEMM_ALIGN_K - 1);;
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Determine step size in m
|
/* Determine step size in m
|
||||||
* Note: We are currently on the first step in m
|
* Note: We are currently on the first step in m
|
||||||
*/
|
*/
|
||||||
|
@ -382,13 +392,13 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
|
||||||
/* Copy part of local region of B into workspace */
|
/* Copy part of local region of B into workspace */
|
||||||
START_RPCC();
|
START_RPCC();
|
||||||
OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
|
OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
|
||||||
buffer[bufferside] + min_l * (jjs - js) * COMPSIZE * l1stride);
|
buffer[bufferside] + pad_min_l * (jjs - js) * COMPSIZE * l1stride);
|
||||||
STOP_RPCC(copy_B);
|
STOP_RPCC(copy_B);
|
||||||
|
|
||||||
/* Apply kernel with local region of A and part of local region of B */
|
/* Apply kernel with local region of A and part of local region of B */
|
||||||
START_RPCC();
|
START_RPCC();
|
||||||
KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
|
KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
|
||||||
sa, buffer[bufferside] + min_l * (jjs - js) * COMPSIZE * l1stride,
|
sa, buffer[bufferside] + pad_min_l * (jjs - js) * COMPSIZE * l1stride,
|
||||||
c, ldc, m_from, jjs);
|
c, ldc, m_from, jjs);
|
||||||
STOP_RPCC(kernel);
|
STOP_RPCC(kernel);
|
||||||
|
|
||||||
|
|
|
@ -190,10 +190,10 @@ ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)
|
||||||
|
|
||||||
SBGEMM_BETA = sbgemm_beta_neoversen2.c
|
SBGEMM_BETA = sbgemm_beta_neoversen2.c
|
||||||
SBGEMMKERNEL = sbgemm_kernel_$(SBGEMM_UNROLL_M)x$(SBGEMM_UNROLL_N)_neoversen2.c
|
SBGEMMKERNEL = sbgemm_kernel_$(SBGEMM_UNROLL_M)x$(SBGEMM_UNROLL_N)_neoversen2.c
|
||||||
SBGEMMINCOPY = sbgemm_ncopy_neoversen2.c
|
SBGEMMINCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_N)_neoversen2.c
|
||||||
SBGEMMITCOPY = sbgemm_tcopy_neoversen2.c
|
SBGEMMITCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_M)_neoversen2.c
|
||||||
SBGEMMONCOPY = sbgemm_ncopy_neoversen2.c
|
SBGEMMONCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_N)_neoversen2.c
|
||||||
SBGEMMOTCOPY = sbgemm_tcopy_neoversen2.c
|
SBGEMMOTCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_M)_neoversen2.c
|
||||||
SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX)
|
SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX)
|
||||||
SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX)
|
SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX)
|
||||||
SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX)
|
SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX)
|
||||||
|
|
|
@ -30,100 +30,37 @@
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
#ifdef ALPHA_ONE
|
#define INIT_C(M, N) mc##M##N = svdup_f32(0);
|
||||||
#define LOAD_C(M, N) \
|
|
||||||
mc##M##N = svld1_gather_index(pg32, ptr_c0##N + 2 * M , off_vc);
|
|
||||||
|
|
||||||
#define LOAD_C_LOW(M, N) \
|
|
||||||
mc##M##N = svld1_gather_index(pg32_low, ptr_c0##N + 2 * M, off_vc);
|
|
||||||
|
|
||||||
#define LOAD_C_EVEN(M, N) \
|
|
||||||
mc##M##N = svld1_gather_index(pg32_even, ptr_c0##N + 2 * M, off_vc);
|
|
||||||
|
|
||||||
#define LOAD_C_FIRST(M, N) \
|
|
||||||
mc##M##N = svld1_gather_index(pg32_first, ptr_c0##N + 2 * M, off_vc);
|
|
||||||
|
|
||||||
#define STORE_C(M, N) \
|
|
||||||
svst1_scatter_index(pg32, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#define STORE_C_LOW(M, N) \
|
|
||||||
svst1_scatter_index(pg32_low, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#define STORE_C_EVEN(M, N) \
|
|
||||||
svst1_scatter_index(pg32_even, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#define STORE_C_FIRST(M, N) \
|
|
||||||
svst1_scatter_index(pg32_first, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#else
|
|
||||||
#define LOAD_C(M, N) \
|
|
||||||
mc##M##N = svdup_f32(0); \
|
|
||||||
oc##M##N = svld1_gather_index(pg32, ptr_c0##N + 2 * M , off_vc);
|
|
||||||
|
|
||||||
#define LOAD_C_LOW(M, N) \
|
|
||||||
mc##M##N = svdup_f32(0); \
|
|
||||||
oc##M##N = svld1_gather_index(pg32_low, ptr_c0##N + 2 * M , off_vc);
|
|
||||||
|
|
||||||
#define LOAD_C_EVEN(M, N) \
|
|
||||||
mc##M##N = svdup_f32(0); \
|
|
||||||
oc##M##N = svld1_gather_index(pg32_even, ptr_c0##N + 2 * M , off_vc);
|
|
||||||
|
|
||||||
#define LOAD_C_FIRST(M, N) \
|
|
||||||
mc##M##N = svdup_f32(0); \
|
|
||||||
oc##M##N = svld1_gather_index(pg32_first, ptr_c0##N + 2 * M , off_vc);
|
|
||||||
|
|
||||||
#define STORE_C(M, N) \
|
|
||||||
mc##M##N = svmad_z(pg32, svalpha, mc##M##N, oc##M##N); \
|
|
||||||
svst1_scatter_index(pg32, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#define STORE_C_LOW(M, N) \
|
|
||||||
mc##M##N = svmad_z(pg32_low, svalpha, mc##M##N, oc##M##N); \
|
|
||||||
svst1_scatter_index(pg32_low, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#define STORE_C_EVEN(M, N) \
|
|
||||||
mc##M##N = svmad_z(pg32_even, svalpha, mc##M##N, oc##M##N); \
|
|
||||||
svst1_scatter_index(pg32_even, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#define STORE_C_FIRST(M, N) \
|
|
||||||
mc##M##N = svmad_z(pg32_first, svalpha, mc##M##N, oc##M##N); \
|
|
||||||
svst1_scatter_index(pg32_first, ptr_c0##N + 2 * M, off_vc, mc##M##N);
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define LOAD_A(M) ma##M = svld1_bf16(pg16, ptr_a##M);
|
|
||||||
|
|
||||||
#define LOAD_B(N) mb##N = svld1_bf16(pg16, ptr_b##N);
|
|
||||||
|
|
||||||
#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N);
|
#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N);
|
||||||
|
|
||||||
#define LOAD_KREST_1(NAME, M) \
|
#define INIT_C_8x4 \
|
||||||
m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), zero, zero, zero, \
|
do { \
|
||||||
*(ptr_##NAME##M + 1), zero, zero, zero);
|
INIT_C(0, 0); \
|
||||||
|
INIT_C(0, 1); \
|
||||||
#define LOAD_KREST_1_LOW(NAME, M) \
|
INIT_C(1, 0); \
|
||||||
m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), zero, zero, zero, zero, zero, \
|
INIT_C(1, 1); \
|
||||||
zero, zero);
|
INIT_C(2, 0); \
|
||||||
|
INIT_C(2, 1); \
|
||||||
#define LOAD_KREST_2(NAME, M) \
|
INIT_C(3, 0); \
|
||||||
m##NAME##M = \
|
INIT_C(3, 1); \
|
||||||
svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), zero, zero, \
|
} while (0);
|
||||||
*(ptr_##NAME##M + 2), *(ptr_##NAME##M + 3), zero, zero);
|
|
||||||
|
|
||||||
#define LOAD_KREST_2_LOW(NAME, M) \
|
|
||||||
m##NAME##M = svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), zero, \
|
|
||||||
zero, zero, zero, zero, zero);
|
|
||||||
|
|
||||||
#define LOAD_KREST_3(NAME, M) \
|
|
||||||
m##NAME##M = \
|
|
||||||
svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), \
|
|
||||||
*(ptr_##NAME##M + 2), zero, *(ptr_##NAME##M + 3), \
|
|
||||||
*(ptr_##NAME##M + 4), *(ptr_##NAME##M + 5), zero);
|
|
||||||
|
|
||||||
#define LOAD_KREST_3_LOW(NAME, M) \
|
|
||||||
m##NAME##M = \
|
|
||||||
svdupq_bf16(*(ptr_##NAME##M), *(ptr_##NAME##M + 1), \
|
|
||||||
*(ptr_##NAME##M + 2), zero, zero, zero, zero, zero);
|
|
||||||
|
|
||||||
|
#ifdef ALPHA_ONE
|
||||||
|
#define UPDATE_C(PG, PTR, DST, SRC) \
|
||||||
|
do { \
|
||||||
|
DST = svld1_f32((PG), (PTR)); \
|
||||||
|
DST = svadd_z((PG), SRC, DST); \
|
||||||
|
svst1_f32((PG), (PTR), DST); \
|
||||||
|
} while (0);
|
||||||
|
#else
|
||||||
|
#define UPDATE_C(PG, PTR, DST, SRC) \
|
||||||
|
do { \
|
||||||
|
DST = svld1_f32((PG), (PTR)); \
|
||||||
|
DST = svmad_z((PG), svalpha, SRC, DST); \
|
||||||
|
svst1_f32((PG), (PTR), DST); \
|
||||||
|
} while (0);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef ALPHA_ONE
|
#ifdef ALPHA_ONE
|
||||||
int sbgemm_kernel_neoversen2_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
|
int sbgemm_kernel_neoversen2_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
|
||||||
|
@ -131,396 +68,329 @@ int sbgemm_kernel_neoversen2_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT
|
||||||
int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
|
int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
BLASLONG pad_k = (k + 3) & ~3;
|
||||||
|
|
||||||
|
svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1;
|
||||||
|
svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31,
|
||||||
|
vc0, vc1, vc2, vc3, vc4, vc5, vc6, vc7,
|
||||||
|
oc0, oc1, oc2, oc3, oc4, oc5, oc6, oc7;
|
||||||
|
svfloat32_t svalpha = svdup_f32(alpha);
|
||||||
|
|
||||||
|
svbool_t pg16 = svptrue_b16();
|
||||||
|
svbool_t pg16_low = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
|
||||||
|
svbool_t pg32 = svptrue_b32();
|
||||||
|
svbool_t pg32_low = svdupq_b32(1, 1, 0, 0);
|
||||||
|
svbool_t pg32_first = svdupq_b32(1, 0, 0, 0);
|
||||||
|
|
||||||
bfloat16_t *ptr_a = (bfloat16_t *)A;
|
bfloat16_t *ptr_a = (bfloat16_t *)A;
|
||||||
bfloat16_t *ptr_b = (bfloat16_t *)B;
|
bfloat16_t *ptr_b = (bfloat16_t *)B;
|
||||||
FLOAT *ptr_c = C;
|
FLOAT *ptr_c = C;
|
||||||
|
|
||||||
bfloat16_t *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3;
|
bfloat16_t *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3;
|
||||||
bfloat16_t *ptr_b0, *ptr_b1;
|
bfloat16_t *ptr_b0, *ptr_b1;
|
||||||
FLOAT *ptr_c00, *ptr_c01;
|
FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3;
|
||||||
|
|
||||||
svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1;
|
|
||||||
svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31;
|
|
||||||
#ifndef ALPHA_ONE
|
|
||||||
svfloat32_t oc00, oc01, oc10, oc11, oc20, oc21, oc30, oc31;
|
|
||||||
#endif
|
|
||||||
svbool_t pg16 = svptrue_b16();
|
|
||||||
svbool_t pg16_low = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
|
|
||||||
svbool_t pg32 = svptrue_b32();
|
|
||||||
svbool_t pg32_low = svdupq_b32(1, 1, 0, 0);
|
|
||||||
svbool_t pg32_even = svdupq_b32(1, 0, 1, 0);
|
|
||||||
svbool_t pg32_first = svdupq_b32(1, 0, 0, 0);
|
|
||||||
svfloat32_t svalpha = svdup_f32(alpha);
|
|
||||||
bfloat16 tmp = 0;
|
|
||||||
bfloat16_t zero = *((bfloat16_t *)&tmp);
|
|
||||||
BLASLONG krest = k & 3;
|
|
||||||
|
|
||||||
// 00 01 10 11
|
|
||||||
svuint32_t off_vc = svdupq_u32(0, (uint32_t)ldc, 1, (uint32_t)ldc + 1);
|
|
||||||
|
|
||||||
for (BLASLONG j = 0; j < n / 4; j++) {
|
for (BLASLONG j = 0; j < n / 4; j++) {
|
||||||
ptr_c00 = ptr_c;
|
ptr_c0 = ptr_c;
|
||||||
ptr_c01 = ptr_c + 2 * ldc;
|
ptr_c1 = ptr_c0 + ldc;
|
||||||
|
ptr_c2 = ptr_c1 + ldc;
|
||||||
|
ptr_c3 = ptr_c2 + ldc;
|
||||||
ptr_c += 4 * ldc;
|
ptr_c += 4 * ldc;
|
||||||
|
|
||||||
ptr_a = (bfloat16_t *)A;
|
ptr_a = (bfloat16_t *)A;
|
||||||
|
|
||||||
for (BLASLONG i = 0; i < m / 8; i++) {
|
for (BLASLONG i = 0; i < m / 8; i++) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a1 = ptr_a0 + 2 * k;
|
ptr_a += 8 * pad_k;
|
||||||
ptr_a2 = ptr_a1 + 2 * k;
|
|
||||||
ptr_a3 = ptr_a2 + 2 * k;
|
|
||||||
ptr_a += 8 * k;
|
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
ptr_b1 = ptr_b0 + 2 * k;
|
|
||||||
|
|
||||||
LOAD_C(0, 0); LOAD_C(0, 1);
|
INIT_C_8x4;
|
||||||
LOAD_C(1, 0); LOAD_C(1, 1);
|
|
||||||
LOAD_C(2, 0); LOAD_C(2, 1);
|
|
||||||
LOAD_C(3, 0); LOAD_C(3, 1);
|
|
||||||
|
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3);
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
LOAD_B(0); LOAD_B(1);
|
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
|
||||||
|
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
|
||||||
|
ma3 = svld1_bf16(pg16, ptr_a0 + 24);
|
||||||
|
|
||||||
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
|
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
|
||||||
|
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
MATMUL(0, 0); MATMUL(0, 1);
|
||||||
MATMUL(1, 0); MATMUL(1, 1);
|
MATMUL(1, 0); MATMUL(1, 1);
|
||||||
MATMUL(2, 0); MATMUL(2, 1);
|
MATMUL(2, 0); MATMUL(2, 1);
|
||||||
MATMUL(3, 0); MATMUL(3, 1);
|
MATMUL(3, 0); MATMUL(3, 1);
|
||||||
|
|
||||||
ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8;
|
ptr_a0 += 32;
|
||||||
ptr_b0 += 8; ptr_b1 += 8;
|
ptr_b0 += 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (krest) {
|
vc0 = svuzp1(mc00, mc10);
|
||||||
if (krest == 1) {
|
vc1 = svuzp1(mc20, mc30);
|
||||||
LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1);
|
vc2 = svuzp2(mc00, mc10);
|
||||||
LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3);
|
vc3 = svuzp2(mc20, mc30);
|
||||||
LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1);
|
vc4 = svuzp1(mc01, mc11);
|
||||||
} else if (krest == 2) {
|
vc5 = svuzp1(mc21, mc31);
|
||||||
LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1);
|
vc6 = svuzp2(mc01, mc11);
|
||||||
LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3);
|
vc7 = svuzp2(mc21, mc31);
|
||||||
LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1);
|
|
||||||
LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3);
|
|
||||||
LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
|
||||||
MATMUL(1, 0); MATMUL(1, 1);
|
|
||||||
MATMUL(2, 0); MATMUL(2, 1);
|
|
||||||
MATMUL(3, 0); MATMUL(3, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
STORE_C(0, 0); STORE_C(0, 1);
|
UPDATE_C(pg32, ptr_c0, oc0, vc0);
|
||||||
STORE_C(1, 0); STORE_C(1, 1);
|
UPDATE_C(pg32, ptr_c0+4, oc1, vc1);
|
||||||
STORE_C(2, 0); STORE_C(2, 1);
|
UPDATE_C(pg32, ptr_c1, oc2, vc2);
|
||||||
STORE_C(3, 0); STORE_C(3, 1);
|
UPDATE_C(pg32, ptr_c1+4, oc3, vc3);
|
||||||
|
UPDATE_C(pg32, ptr_c2, oc4, vc4)
|
||||||
|
UPDATE_C(pg32, ptr_c2+4, oc5, vc5);
|
||||||
|
UPDATE_C(pg32, ptr_c3, oc6, vc6)
|
||||||
|
UPDATE_C(pg32, ptr_c3+4, oc7, vc7);
|
||||||
|
|
||||||
ptr_c00 += 8; ptr_c01 += 8;
|
ptr_c0 += 8;
|
||||||
|
ptr_c1 += 8;
|
||||||
|
ptr_c2 += 8;
|
||||||
|
ptr_c3 += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 4) {
|
if (m & 4) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a1 = ptr_a0 + 2 * k;
|
ptr_a += 4 * pad_k;
|
||||||
ptr_a += 4 * k;
|
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
ptr_b1 = ptr_b0 + 2 * k;
|
|
||||||
|
|
||||||
LOAD_C(0, 0); LOAD_C(0, 1);
|
INIT_C(0, 0); INIT_C(0, 1);
|
||||||
LOAD_C(1, 0); LOAD_C(1, 1);
|
INIT_C(1, 0); INIT_C(1, 1);
|
||||||
|
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
LOAD_A(0); LOAD_A(1);
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
LOAD_B(0); LOAD_B(1);
|
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
|
||||||
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
|
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
|
||||||
|
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
MATMUL(0, 0); MATMUL(0, 1);
|
||||||
MATMUL(1, 0); MATMUL(1, 1);
|
MATMUL(1, 0); MATMUL(1, 1);
|
||||||
|
|
||||||
ptr_a0 += 8; ptr_a1 += 8;
|
ptr_a0 += 16;
|
||||||
ptr_b0 += 8; ptr_b1 += 8;
|
ptr_b0 += 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (krest) {
|
vc0 = svuzp1(mc00, mc10);
|
||||||
if (krest == 1) {
|
vc1 = svuzp2(mc00, mc10);
|
||||||
LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1);
|
vc2 = svuzp1(mc01, mc11);
|
||||||
LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1);
|
vc3 = svuzp2(mc01, mc11);
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1);
|
|
||||||
LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1);
|
|
||||||
LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
|
||||||
MATMUL(1, 0); MATMUL(1, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
STORE_C(0, 0); STORE_C(0, 1);
|
UPDATE_C(pg32, ptr_c0, oc0, vc0);
|
||||||
STORE_C(1, 0); STORE_C(1, 1);
|
UPDATE_C(pg32, ptr_c1, oc1, vc1);
|
||||||
|
UPDATE_C(pg32, ptr_c2, oc2, vc2);
|
||||||
|
UPDATE_C(pg32, ptr_c3, oc3, vc3);
|
||||||
|
|
||||||
ptr_c00 += 4; ptr_c01 += 4;
|
ptr_c0 += 4;
|
||||||
|
ptr_c1 += 4;
|
||||||
|
ptr_c2 += 4;
|
||||||
|
ptr_c3 += 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 2) {
|
if (m & 2) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a += 2 * k;
|
ptr_a += 2 * pad_k;
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
ptr_b1 = ptr_b0 + 2 * k;
|
|
||||||
|
|
||||||
LOAD_C(0, 0); LOAD_C(0, 1);
|
INIT_C(0, 0); INIT_C(0, 1);
|
||||||
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
LOAD_A(0);
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
LOAD_B(0); LOAD_B(1);
|
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
|
||||||
|
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
MATMUL(0, 0); MATMUL(0, 1);
|
||||||
|
|
||||||
ptr_a0 += 8;
|
ptr_a0 += 8;
|
||||||
ptr_b0 += 8; ptr_b1 += 8;
|
ptr_b0 += 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (krest) {
|
vc0 = svuzp1(mc00, mc00);
|
||||||
if (krest == 1) {
|
vc1 = svuzp2(mc00, mc00);
|
||||||
LOAD_KREST_1(a, 0);
|
vc2 = svuzp1(mc01, mc01);
|
||||||
LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1);
|
vc3 = svuzp2(mc01, mc01);
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2(a, 0);
|
UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
|
||||||
LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1);
|
UPDATE_C(pg32_low, ptr_c1, oc1, vc1);
|
||||||
} else if (krest == 3) {
|
UPDATE_C(pg32_low, ptr_c2, oc2, vc2);
|
||||||
LOAD_KREST_3(a, 0);
|
UPDATE_C(pg32_low, ptr_c3, oc3, vc3);
|
||||||
LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1);
|
|
||||||
}
|
ptr_c0 += 2;
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
ptr_c1 += 2;
|
||||||
}
|
ptr_c2 += 2;
|
||||||
STORE_C(0, 0); STORE_C(0, 1);
|
ptr_c3 += 2;
|
||||||
ptr_c00 += 2; ptr_c01 += 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 1) {
|
if (m & 1) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
ptr_b1 = ptr_b0 + 2 * k;
|
|
||||||
|
|
||||||
LOAD_C_LOW(0, 0); LOAD_C_LOW(0, 1);
|
INIT_C(0, 0); INIT_C(0, 1);
|
||||||
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
|
||||||
ma0 = svld1_bf16(pg16_low, ptr_a0);
|
ma0 = svld1_bf16(pg16_low, ptr_a0);
|
||||||
LOAD_B(0); LOAD_B(1);
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
|
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
|
||||||
|
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
MATMUL(0, 0); MATMUL(0, 1);
|
||||||
|
|
||||||
ptr_a0 += 4;
|
ptr_a0 += 4;
|
||||||
ptr_b0 += 8;
|
ptr_b0 += 16;
|
||||||
ptr_b1 += 8;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (krest) {
|
vc1 = svuzp2(mc00, mc00);
|
||||||
if (krest == 1) {
|
vc3 = svuzp2(mc01, mc01);
|
||||||
LOAD_KREST_1_LOW(a, 0);
|
|
||||||
LOAD_KREST_1(b, 0); LOAD_KREST_1(b, 1);
|
UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
|
||||||
} else if (krest == 2) {
|
UPDATE_C(pg32_first, ptr_c1, oc1, vc1);
|
||||||
LOAD_KREST_2_LOW(a, 0);
|
UPDATE_C(pg32_first, ptr_c2, oc2, mc01);
|
||||||
LOAD_KREST_2(b, 0); LOAD_KREST_2(b, 1);
|
UPDATE_C(pg32_first, ptr_c3, oc3, vc3);
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3_LOW(a, 0);
|
|
||||||
LOAD_KREST_3(b, 0); LOAD_KREST_3(b, 1);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0); MATMUL(0, 1);
|
|
||||||
}
|
|
||||||
STORE_C_LOW(0, 0); STORE_C_LOW(0, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ptr_b += 4 * k;
|
ptr_b += 4 * pad_k;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n & 2) {
|
if (n & 2) {
|
||||||
ptr_c00 = ptr_c;
|
ptr_c0 = ptr_c;
|
||||||
|
ptr_c1 = ptr_c0 + ldc;
|
||||||
ptr_c += 2 * ldc;
|
ptr_c += 2 * ldc;
|
||||||
|
|
||||||
ptr_a = (bfloat16_t *)A;
|
ptr_a = (bfloat16_t *)A;
|
||||||
|
|
||||||
for (BLASLONG i = 0; i < m / 8; i++) {
|
for (BLASLONG i = 0; i < m / 8; i++) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a1 = ptr_a0 + 2 * k;
|
ptr_a += 8 * pad_k;
|
||||||
ptr_a2 = ptr_a1 + 2 * k;
|
|
||||||
ptr_a3 = ptr_a2 + 2 * k;
|
|
||||||
ptr_a += 8 * k;
|
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
|
|
||||||
LOAD_C(0, 0);
|
INIT_C(0, 0);
|
||||||
LOAD_C(1, 0);
|
INIT_C(1, 0);
|
||||||
LOAD_C(2, 0);
|
INIT_C(2, 0);
|
||||||
LOAD_C(3, 0);
|
INIT_C(3, 0);
|
||||||
|
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3);
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
LOAD_B(0);
|
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
|
||||||
|
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
|
||||||
|
ma3 = svld1_bf16(pg16, ptr_a0 + 24);
|
||||||
|
|
||||||
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
|
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
MATMUL(1, 0);
|
MATMUL(1, 0);
|
||||||
MATMUL(2, 0);
|
MATMUL(2, 0);
|
||||||
MATMUL(3, 0);
|
MATMUL(3, 0);
|
||||||
|
|
||||||
ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8;
|
ptr_a0 += 32;
|
||||||
ptr_b0 += 8;
|
ptr_b0 += 8;
|
||||||
}
|
}
|
||||||
if (krest) {
|
|
||||||
if (krest == 1) {
|
|
||||||
LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1);
|
|
||||||
LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3);
|
|
||||||
LOAD_KREST_1(b, 0);
|
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1);
|
|
||||||
LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3);
|
|
||||||
LOAD_KREST_2(b, 0);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1);
|
|
||||||
LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3);
|
|
||||||
LOAD_KREST_3(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
MATMUL(1, 0);
|
|
||||||
MATMUL(2, 0);
|
|
||||||
MATMUL(3, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
STORE_C(0, 0);
|
vc0 = svuzp1(mc00, mc10);
|
||||||
STORE_C(1, 0);
|
vc1 = svuzp1(mc20, mc30);
|
||||||
STORE_C(2, 0);
|
vc2 = svuzp2(mc00, mc10);
|
||||||
STORE_C(3, 0);
|
vc3 = svuzp2(mc20, mc30);
|
||||||
|
|
||||||
ptr_c00 += 8;
|
UPDATE_C(pg32, ptr_c0, oc0, vc0);
|
||||||
|
UPDATE_C(pg32, ptr_c0 + 4, oc1, vc1);
|
||||||
|
UPDATE_C(pg32, ptr_c1, oc2, vc2);
|
||||||
|
UPDATE_C(pg32, ptr_c1 + 4, oc3, vc3);
|
||||||
|
|
||||||
|
ptr_c0 += 8;
|
||||||
|
ptr_c1 += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 4) {
|
if (m & 4) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a1 = ptr_a0 + 2 * k;
|
ptr_a += 4 * pad_k;
|
||||||
ptr_a += 4 * k;
|
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
|
|
||||||
LOAD_C(0, 0);
|
INIT_C(0, 0);
|
||||||
LOAD_C(1, 0);
|
INIT_C(1, 0);
|
||||||
|
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
|
||||||
LOAD_A(0); LOAD_A(1);
|
|
||||||
LOAD_B(0);
|
|
||||||
|
|
||||||
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
|
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
|
||||||
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
MATMUL(1, 0);
|
MATMUL(1, 0);
|
||||||
|
ptr_a0 += 16;
|
||||||
ptr_a0 += 8; ptr_a1 += 8;
|
|
||||||
ptr_b0 += 8;
|
ptr_b0 += 8;
|
||||||
}
|
}
|
||||||
if (krest) {
|
|
||||||
if (krest == 1) {
|
|
||||||
LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1);
|
|
||||||
LOAD_KREST_1(b, 0);
|
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1);
|
|
||||||
LOAD_KREST_2(b, 0);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1);
|
|
||||||
LOAD_KREST_3(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
MATMUL(1, 0);
|
|
||||||
}
|
|
||||||
STORE_C(0, 0)
|
|
||||||
STORE_C(1, 0)
|
|
||||||
|
|
||||||
ptr_c00 += 4;
|
vc0 = svuzp1(mc00, mc10);
|
||||||
|
vc1 = svuzp2(mc00, mc10);
|
||||||
|
|
||||||
|
UPDATE_C(pg32, ptr_c0, oc0, vc0);
|
||||||
|
UPDATE_C(pg32, ptr_c1, oc1, vc1);
|
||||||
|
|
||||||
|
ptr_c0 += 4;
|
||||||
|
ptr_c1 += 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 2) {
|
if (m & 2) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a += 2 * k;
|
ptr_a += 2 * pad_k;
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
|
|
||||||
LOAD_C(0, 0);
|
INIT_C(0, 0);
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
|
||||||
LOAD_A(0);
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
LOAD_B(0);
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
|
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
|
|
||||||
ptr_a0 += 8;
|
ptr_a0 += 8;
|
||||||
ptr_b0 += 8;
|
ptr_b0 += 8;
|
||||||
}
|
}
|
||||||
if (krest) {
|
|
||||||
if (krest == 1) {
|
vc0 = svuzp1(mc00, mc00);
|
||||||
LOAD_KREST_1(a, 0);
|
vc1 = svuzp2(mc00, mc00);
|
||||||
LOAD_KREST_1(b, 0);
|
UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
|
||||||
} else if (krest == 2) {
|
UPDATE_C(pg32_low, ptr_c1, oc1, vc1);
|
||||||
LOAD_KREST_2(a, 0);
|
|
||||||
LOAD_KREST_2(b, 0);
|
ptr_c0 += 2;
|
||||||
} else if (krest == 3) {
|
ptr_c1 += 2;
|
||||||
LOAD_KREST_3(a, 0);
|
|
||||||
LOAD_KREST_3(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
}
|
|
||||||
STORE_C(0, 0);
|
|
||||||
ptr_c00 += 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 1) {
|
if (m & 1) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
|
INIT_C(0, 0);
|
||||||
LOAD_C(0, 0);
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
|
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
|
||||||
ma0 = svld1_bf16(pg16_low, ptr_a0);
|
ma0 = svld1_bf16(pg16_low, ptr_a0);
|
||||||
LOAD_B(0);
|
mb0 = svld1_bf16(pg16, ptr_b0);
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
ptr_a0 += 4;
|
ptr_a0 += 4;
|
||||||
ptr_b0 += 8;
|
ptr_b0 += 8;
|
||||||
}
|
}
|
||||||
if (krest) {
|
vc1 = svuzp2(mc00, mc00);
|
||||||
if (krest == 1) {
|
|
||||||
LOAD_KREST_1_LOW(a, 0);
|
UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
|
||||||
LOAD_KREST_1(b, 0);
|
UPDATE_C(pg32_first, ptr_c1, oc1, vc1);
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2_LOW(a, 0);
|
|
||||||
LOAD_KREST_2(b, 0);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3_LOW(a, 0);
|
|
||||||
LOAD_KREST_3(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
}
|
|
||||||
STORE_C_LOW(0, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ptr_b += 2 * k;
|
ptr_b += 2 * pad_k;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n & 1) {
|
if (n & 1) {
|
||||||
ptr_c00 = ptr_c;
|
ptr_c0 = ptr_c;
|
||||||
ptr_a = (bfloat16_t *)A;
|
ptr_a = (bfloat16_t *)A;
|
||||||
|
|
||||||
for (BLASLONG i = 0; i < m / 8; i++) {
|
for (BLASLONG i = 0; i < m / 8; i++) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a1 = ptr_a0 + 2 * k;
|
ptr_a += 8 * pad_k;
|
||||||
ptr_a2 = ptr_a1 + 2 * k;
|
|
||||||
ptr_a3 = ptr_a2 + 2 * k;
|
|
||||||
ptr_a += 8 * k;
|
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
|
|
||||||
LOAD_C_EVEN(0, 0);
|
INIT_C(0, 0);
|
||||||
LOAD_C_EVEN(1, 0);
|
INIT_C(1, 0);
|
||||||
LOAD_C_EVEN(2, 0);
|
INIT_C(2, 0);
|
||||||
LOAD_C_EVEN(3, 0);
|
INIT_C(3, 0);
|
||||||
|
|
||||||
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
|
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
|
||||||
|
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
|
||||||
|
ma3 = svld1_bf16(pg16, ptr_a0 + 24);
|
||||||
|
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
|
||||||
LOAD_A(0); LOAD_A(1); LOAD_A(2); LOAD_A(3);
|
|
||||||
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
||||||
|
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
|
@ -528,86 +398,48 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
|
||||||
MATMUL(2, 0);
|
MATMUL(2, 0);
|
||||||
MATMUL(3, 0);
|
MATMUL(3, 0);
|
||||||
|
|
||||||
ptr_a0 += 8; ptr_a1 += 8; ptr_a2 += 8; ptr_a3 += 8;
|
ptr_a0 += 32;
|
||||||
ptr_b0 += 4;
|
ptr_b0 += 4;
|
||||||
}
|
}
|
||||||
if (krest) {
|
|
||||||
if (krest == 1) {
|
|
||||||
LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1);
|
|
||||||
LOAD_KREST_1(a, 2); LOAD_KREST_1(a, 3);
|
|
||||||
LOAD_KREST_1_LOW(b, 0);
|
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1);
|
|
||||||
LOAD_KREST_2(a, 2); LOAD_KREST_2(a, 3);
|
|
||||||
LOAD_KREST_2_LOW(b, 0);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1);
|
|
||||||
LOAD_KREST_3(a, 2); LOAD_KREST_3(a, 3);
|
|
||||||
LOAD_KREST_3_LOW(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
MATMUL(1, 0);
|
|
||||||
MATMUL(2, 0);
|
|
||||||
MATMUL(3, 0);
|
|
||||||
}
|
|
||||||
STORE_C_EVEN(0, 0)
|
|
||||||
STORE_C_EVEN(1, 0);
|
|
||||||
STORE_C_EVEN(2, 0);
|
|
||||||
STORE_C_EVEN(3, 0);
|
|
||||||
|
|
||||||
ptr_c00 += 8;
|
vc0 = svuzp1(mc00, mc10);
|
||||||
|
vc1 = svuzp1(mc20, mc30);
|
||||||
|
|
||||||
|
UPDATE_C(pg32, ptr_c0, oc0, vc0);
|
||||||
|
UPDATE_C(pg32, ptr_c0 + 4, oc1, vc1);
|
||||||
|
|
||||||
|
ptr_c0 += 8;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 4) {
|
if (m & 4) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a1 = ptr_a0 + 2 * k;
|
ptr_a += 4 * pad_k;
|
||||||
ptr_a += 4 * k;
|
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
|
INIT_C(0, 0);
|
||||||
LOAD_C_EVEN(0, 0);
|
INIT_C(1, 0);
|
||||||
LOAD_C_EVEN(1, 0);
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
|
||||||
LOAD_A(0); LOAD_A(1);
|
|
||||||
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
||||||
|
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
MATMUL(1, 0);
|
MATMUL(1, 0);
|
||||||
|
ptr_a0 += 16;
|
||||||
ptr_a0 += 8; ptr_a1 += 8;
|
|
||||||
ptr_b0 += 4;
|
ptr_b0 += 4;
|
||||||
}
|
}
|
||||||
if (krest) {
|
vc0 = svuzp1(mc00, mc10);
|
||||||
if (krest == 1) {
|
UPDATE_C(pg32, ptr_c0, oc0, vc0);
|
||||||
LOAD_KREST_1(a, 0); LOAD_KREST_1(a, 1);
|
ptr_c0 += 4;
|
||||||
LOAD_KREST_1_LOW(b, 0);
|
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2(a, 0); LOAD_KREST_2(a, 1);
|
|
||||||
LOAD_KREST_2_LOW(b, 0);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3(a, 0); LOAD_KREST_3(a, 1);
|
|
||||||
LOAD_KREST_3_LOW(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
MATMUL(1, 0);
|
|
||||||
}
|
|
||||||
STORE_C_EVEN(0, 0)
|
|
||||||
STORE_C_EVEN(1, 0)
|
|
||||||
|
|
||||||
ptr_c00 += 4;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 2) {
|
if (m & 2) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_a += 2 * k;
|
ptr_a += 2 * pad_k;
|
||||||
|
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
|
|
||||||
LOAD_C_EVEN(0, 0);
|
INIT_C(0, 0);
|
||||||
|
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
LOAD_A(0);
|
ma0 = svld1_bf16(pg16, ptr_a0);
|
||||||
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
||||||
|
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
|
@ -615,49 +447,23 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
|
||||||
ptr_a0 += 8;
|
ptr_a0 += 8;
|
||||||
ptr_b0 += 4;
|
ptr_b0 += 4;
|
||||||
}
|
}
|
||||||
if (krest) {
|
vc0 = svuzp1(mc00, mc00);
|
||||||
if (krest == 1) {
|
UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
|
||||||
LOAD_KREST_1(a, 0);
|
ptr_c0 += 2;
|
||||||
LOAD_KREST_1_LOW(b, 0);
|
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2(a, 0);
|
|
||||||
LOAD_KREST_2_LOW(b, 0);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3(a, 0);
|
|
||||||
LOAD_KREST_3_LOW(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
}
|
|
||||||
STORE_C_EVEN(0, 0);
|
|
||||||
ptr_c00 += 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m & 1) {
|
if (m & 1) {
|
||||||
ptr_a0 = ptr_a;
|
ptr_a0 = ptr_a;
|
||||||
ptr_b0 = ptr_b;
|
ptr_b0 = ptr_b;
|
||||||
LOAD_C_FIRST(0, 0);
|
INIT_C(0, 0);
|
||||||
for (BLASLONG p = 0; p < k / 4; p++) {
|
for (BLASLONG p = 0; p < pad_k; p += 4) {
|
||||||
ma0 = svld1_bf16(pg16_low, ptr_a0);
|
ma0 = svld1_bf16(pg16_low, ptr_a0);
|
||||||
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
mb0 = svld1_bf16(pg16_low, ptr_b0);
|
||||||
|
|
||||||
MATMUL(0, 0);
|
MATMUL(0, 0);
|
||||||
|
|
||||||
ptr_a0 += 4;
|
ptr_a0 += 4;
|
||||||
ptr_b0 += 4;
|
ptr_b0 += 4;
|
||||||
}
|
}
|
||||||
if (krest) {
|
UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
|
||||||
if (krest == 1) {
|
|
||||||
LOAD_KREST_1_LOW(a, 0);
|
|
||||||
LOAD_KREST_1_LOW(b, 0);
|
|
||||||
} else if (krest == 2) {
|
|
||||||
LOAD_KREST_2_LOW(a, 0);
|
|
||||||
LOAD_KREST_2_LOW(b, 0);
|
|
||||||
} else if (krest == 3) {
|
|
||||||
LOAD_KREST_3_LOW(a, 0);
|
|
||||||
LOAD_KREST_3_LOW(b, 0);
|
|
||||||
}
|
|
||||||
MATMUL(0, 0);
|
|
||||||
}
|
|
||||||
STORE_C_FIRST(0, 0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,126 @@
|
||||||
|
/***************************************************************************
|
||||||
|
* Copyright (c) 2022, The OpenBLAS Project
|
||||||
|
* All rights reserved.
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
* 1. Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* 3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
* its contributors may be used to endorse or promote products
|
||||||
|
* derived from this software without specific prior written permission.
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
* POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
* *****************************************************************************/
|
||||||
|
|
||||||
|
#include <arm_sve.h>
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
|
||||||
|
IFLOAT *a_offset;
|
||||||
|
IFLOAT *a_offsetx[4];
|
||||||
|
IFLOAT *b_offset;
|
||||||
|
a_offset = a;
|
||||||
|
b_offset = b;
|
||||||
|
|
||||||
|
svbool_t pg16 = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
|
||||||
|
svbfloat16_t v0, v1, v2, v3;
|
||||||
|
|
||||||
|
for (BLASLONG j = 0; j < n / 4; j++) {
|
||||||
|
a_offsetx[0] = a_offset;
|
||||||
|
a_offsetx[1] = a_offsetx[0] + lda;
|
||||||
|
a_offsetx[2] = a_offsetx[1] + lda;
|
||||||
|
a_offsetx[3] = a_offsetx[2] + lda;
|
||||||
|
a_offset += 4 * lda;
|
||||||
|
|
||||||
|
for (BLASLONG i = 0; i < m / 4; i++) {
|
||||||
|
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
|
||||||
|
v1 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[1]);
|
||||||
|
v2 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[2]);
|
||||||
|
v3 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[3]);
|
||||||
|
|
||||||
|
svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
|
||||||
|
svst1_bf16(pg16, (bfloat16_t *)b_offset + 4, v1);
|
||||||
|
svst1_bf16(pg16, (bfloat16_t *)b_offset + 8, v2);
|
||||||
|
svst1_bf16(pg16, (bfloat16_t *)b_offset + 12, v3);
|
||||||
|
|
||||||
|
b_offset += 16;
|
||||||
|
a_offsetx[0] += 4;
|
||||||
|
a_offsetx[1] += 4;
|
||||||
|
a_offsetx[2] += 4;
|
||||||
|
a_offsetx[3] += 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m & 3) {
|
||||||
|
BLASLONG rest = m & 3;
|
||||||
|
for (BLASLONG col = 0; col < 4; col++) {
|
||||||
|
b_offset[4 * col] = a_offsetx[col][0];
|
||||||
|
b_offset[4 * col + 1] = rest == 1 ? 0 : a_offsetx[col][1];
|
||||||
|
b_offset[4 * col + 2] = rest <= 2 ? 0 : a_offsetx[col][2];
|
||||||
|
b_offset[4 * col + 3] = rest <= 3 ? 0 : a_offsetx[col][3];
|
||||||
|
}
|
||||||
|
b_offset += 16;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n & 2) {
|
||||||
|
a_offsetx[0] = a_offset;
|
||||||
|
a_offsetx[1] = a_offsetx[0] + lda;
|
||||||
|
a_offset += 2 * lda;
|
||||||
|
|
||||||
|
for (BLASLONG i = 0; i < m / 4; i++) {
|
||||||
|
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
|
||||||
|
v1 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[1]);
|
||||||
|
svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
|
||||||
|
svst1_bf16(pg16, (bfloat16_t *)b_offset + 4, v1);
|
||||||
|
|
||||||
|
b_offset += 8;
|
||||||
|
a_offsetx[0] += 4;
|
||||||
|
a_offsetx[1] += 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m & 3) {
|
||||||
|
BLASLONG rest = m & 3;
|
||||||
|
for (BLASLONG col = 0; col < 2; col++) {
|
||||||
|
b_offset[4 * col] = a_offsetx[col][0];
|
||||||
|
b_offset[4 * col + 1] = rest == 1 ? 0 : a_offsetx[col][1];
|
||||||
|
b_offset[4 * col + 2] = rest <= 2 ? 0 : a_offsetx[col][2];
|
||||||
|
b_offset[4 * col + 3] = rest <= 3 ? 0 : a_offsetx[col][3];
|
||||||
|
}
|
||||||
|
b_offset += 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n & 1) {
|
||||||
|
a_offsetx[0] = a_offset;
|
||||||
|
for (BLASLONG i = 0; i < m / 4; i++) {
|
||||||
|
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
|
||||||
|
svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
|
||||||
|
b_offset += 4;
|
||||||
|
a_offsetx[0] += 4;
|
||||||
|
}
|
||||||
|
if (m & 3) {
|
||||||
|
BLASLONG rest = m & 3;
|
||||||
|
b_offset[0] = a_offsetx[0][0];
|
||||||
|
b_offset[1] = rest == 1 ? 0 : a_offsetx[0][1];
|
||||||
|
b_offset[2] = rest <= 2 ? 0 : a_offsetx[0][2];
|
||||||
|
b_offset[3] = rest <= 3 ? 0 : a_offsetx[0][3];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -1,101 +0,0 @@
|
||||||
/***************************************************************************
|
|
||||||
* Copyright (c) 2022, The OpenBLAS Project
|
|
||||||
* All rights reserved.
|
|
||||||
* Redistribution and use in source and binary forms, with or without
|
|
||||||
* modification, are permitted provided that the following conditions are
|
|
||||||
* met:
|
|
||||||
* 1. Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.
|
|
||||||
* 2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer in
|
|
||||||
* the documentation and/or other materials provided with the
|
|
||||||
* distribution.
|
|
||||||
* 3. Neither the name of the OpenBLAS project nor the names of
|
|
||||||
* its contributors may be used to endorse or promote products
|
|
||||||
* derived from this software without specific prior written permission.
|
|
||||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
|
||||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
* *****************************************************************************/
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
|
|
||||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
|
|
||||||
IFLOAT *a_offset, *a_offset1, *a_offset2;
|
|
||||||
IFLOAT *b_offset;
|
|
||||||
|
|
||||||
a_offset = a;
|
|
||||||
b_offset = b;
|
|
||||||
|
|
||||||
for (BLASLONG j = 0; j < n / 2; j++) {
|
|
||||||
a_offset1 = a_offset;
|
|
||||||
a_offset2 = a_offset1 + lda;
|
|
||||||
a_offset += 2 * lda;
|
|
||||||
for (BLASLONG i = 0; i < m / 4; i++) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset1 + 1);
|
|
||||||
*(b_offset + 2) = *(a_offset1 + 2);
|
|
||||||
*(b_offset + 3) = *(a_offset1 + 3);
|
|
||||||
*(b_offset + 4) = *(a_offset2 + 0);
|
|
||||||
*(b_offset + 5) = *(a_offset2 + 1);
|
|
||||||
*(b_offset + 6) = *(a_offset2 + 2);
|
|
||||||
*(b_offset + 7) = *(a_offset2 + 3);
|
|
||||||
|
|
||||||
a_offset1 += 4;
|
|
||||||
a_offset2 += 4;
|
|
||||||
b_offset += 8;
|
|
||||||
}
|
|
||||||
BLASLONG rest = m & 3;
|
|
||||||
if (rest == 3) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset1 + 1);
|
|
||||||
*(b_offset + 2) = *(a_offset1 + 2);
|
|
||||||
*(b_offset + 3) = *(a_offset2 + 0);
|
|
||||||
*(b_offset + 4) = *(a_offset2 + 1);
|
|
||||||
*(b_offset + 5) = *(a_offset2 + 2);
|
|
||||||
b_offset += 6;
|
|
||||||
} else if (rest == 2) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset1 + 1);
|
|
||||||
*(b_offset + 2) = *(a_offset2 + 0);
|
|
||||||
*(b_offset + 3) = *(a_offset2 + 1);
|
|
||||||
b_offset += 4;
|
|
||||||
} else if (rest == 1) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset2 + 0);
|
|
||||||
b_offset += 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (n & 1) {
|
|
||||||
for (BLASLONG i = 0; i < m / 4; i++) {
|
|
||||||
*(b_offset + 0) = *(a_offset + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset + 1);
|
|
||||||
*(b_offset + 2) = *(a_offset + 2);
|
|
||||||
*(b_offset + 3) = *(a_offset + 3);
|
|
||||||
|
|
||||||
b_offset += 4;
|
|
||||||
a_offset += 4;
|
|
||||||
}
|
|
||||||
BLASLONG rest = m & 3;
|
|
||||||
if (rest == 3) {
|
|
||||||
*(b_offset + 0) = *(a_offset + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset + 1);
|
|
||||||
*(b_offset + 2) = *(a_offset + 2);
|
|
||||||
} else if (rest == 2) {
|
|
||||||
*(b_offset + 0) = *(a_offset + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset + 1);
|
|
||||||
} else if (rest == 1) {
|
|
||||||
*(b_offset + 0) = *(a_offset + 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -0,0 +1,165 @@
|
||||||
|
/***************************************************************************
|
||||||
|
* Copyright (c) 2022, The OpenBLAS Project
|
||||||
|
* All rights reserved.
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
* 1. Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer in
|
||||||
|
* the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
* 3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
* its contributors may be used to endorse or promote products
|
||||||
|
* derived from this software without specific prior written permission.
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||||
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||||
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||||
|
* POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
* *****************************************************************************/
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
|
||||||
|
IFLOAT *a_offset, *a_offset0, *a_offset1, *a_offset2, *a_offset3;
|
||||||
|
IFLOAT *b_offset;
|
||||||
|
a_offset = a;
|
||||||
|
b_offset = b;
|
||||||
|
|
||||||
|
for (BLASLONG j = 0; j < n / 8; j++) {
|
||||||
|
a_offset0 = a_offset;
|
||||||
|
a_offset1 = a_offset0 + lda;
|
||||||
|
a_offset2 = a_offset1 + lda;
|
||||||
|
a_offset3 = a_offset2 + lda;
|
||||||
|
a_offset += 8;
|
||||||
|
|
||||||
|
for (BLASLONG i = 0; i < m / 4; i++) {
|
||||||
|
for (BLASLONG line = 0; line < 8; line++) {
|
||||||
|
b_offset[line * 4] = a_offset0[line];
|
||||||
|
b_offset[line * 4 + 1] = a_offset1[line];
|
||||||
|
b_offset[line * 4 + 2] = a_offset2[line];
|
||||||
|
b_offset[line * 4 + 3] = a_offset3[line];
|
||||||
|
}
|
||||||
|
|
||||||
|
b_offset += 32;
|
||||||
|
a_offset0 += 4 * lda;
|
||||||
|
a_offset1 += 4 * lda;
|
||||||
|
a_offset2 += 4 * lda;
|
||||||
|
a_offset3 += 4 * lda;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m & 3) {
|
||||||
|
BLASLONG rest = m & 3;
|
||||||
|
for (BLASLONG line = 0; line < 8; line++) {
|
||||||
|
b_offset[line * 4] = a_offset0[line];
|
||||||
|
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
|
||||||
|
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
|
||||||
|
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
|
||||||
|
}
|
||||||
|
b_offset += 32;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n & 4) {
|
||||||
|
a_offset0 = a_offset;
|
||||||
|
a_offset1 = a_offset0 + lda;
|
||||||
|
a_offset2 = a_offset1 + lda;
|
||||||
|
a_offset3 = a_offset2 + lda;
|
||||||
|
a_offset += 4;
|
||||||
|
|
||||||
|
for (BLASLONG i = 0; i < m / 4; i++) {
|
||||||
|
for (BLASLONG line = 0; line < 4; line++) {
|
||||||
|
b_offset[line * 4] = a_offset0[line];
|
||||||
|
b_offset[line * 4 + 1] = a_offset1[line];
|
||||||
|
b_offset[line * 4 + 2] = a_offset2[line];
|
||||||
|
b_offset[line * 4 + 3] = a_offset3[line];
|
||||||
|
}
|
||||||
|
|
||||||
|
b_offset += 16;
|
||||||
|
a_offset0 += 4 * lda;
|
||||||
|
a_offset1 += 4 * lda;
|
||||||
|
a_offset2 += 4 * lda;
|
||||||
|
a_offset3 += 4 * lda;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m & 3) {
|
||||||
|
BLASLONG rest = m & 3;
|
||||||
|
for (BLASLONG line = 0; line < 4; line++) {
|
||||||
|
b_offset[line * 4] = a_offset0[line];
|
||||||
|
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
|
||||||
|
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
|
||||||
|
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
|
||||||
|
}
|
||||||
|
b_offset += 16;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n & 2) {
|
||||||
|
a_offset0 = a_offset;
|
||||||
|
a_offset1 = a_offset0 + lda;
|
||||||
|
a_offset2 = a_offset1 + lda;
|
||||||
|
a_offset3 = a_offset2 + lda;
|
||||||
|
a_offset += 2;
|
||||||
|
|
||||||
|
for (BLASLONG i = 0; i < m / 4; i++) {
|
||||||
|
for (BLASLONG line = 0; line < 2; line++) {
|
||||||
|
b_offset[line * 4] = a_offset0[line];
|
||||||
|
b_offset[line * 4 + 1] = a_offset1[line];
|
||||||
|
b_offset[line * 4 + 2] = a_offset2[line];
|
||||||
|
b_offset[line * 4 + 3] = a_offset3[line];
|
||||||
|
}
|
||||||
|
b_offset += 8;
|
||||||
|
a_offset0 += 4 * lda;
|
||||||
|
a_offset1 += 4 * lda;
|
||||||
|
a_offset2 += 4 * lda;
|
||||||
|
a_offset3 += 4 * lda;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m & 3) {
|
||||||
|
BLASLONG rest = m & 3;
|
||||||
|
for (BLASLONG line = 0; line < 2; line++) {
|
||||||
|
b_offset[line * 4] = a_offset0[line];
|
||||||
|
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
|
||||||
|
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
|
||||||
|
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
|
||||||
|
}
|
||||||
|
b_offset += 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (n & 1) {
|
||||||
|
a_offset0 = a_offset;
|
||||||
|
a_offset1 = a_offset0 + lda;
|
||||||
|
a_offset2 = a_offset1 + lda;
|
||||||
|
a_offset3 = a_offset2 + lda;
|
||||||
|
|
||||||
|
for (BLASLONG i = 0; i < m / 4; i++) {
|
||||||
|
b_offset[0] = *a_offset0;
|
||||||
|
b_offset[1] = *a_offset1;
|
||||||
|
b_offset[2] = *a_offset2;
|
||||||
|
b_offset[3] = *a_offset3;
|
||||||
|
b_offset += 4;
|
||||||
|
a_offset0 += 4 * lda;
|
||||||
|
a_offset1 += 4 * lda;
|
||||||
|
a_offset2 += 4 * lda;
|
||||||
|
a_offset3 += 4 * lda;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m & 3) {
|
||||||
|
BLASLONG rest = m & 3;
|
||||||
|
b_offset[0] = *a_offset0;
|
||||||
|
b_offset[1] = rest == 1 ? 0 : *a_offset1;
|
||||||
|
b_offset[2] = rest <= 2 ? 0 : *a_offset2;
|
||||||
|
b_offset[3] = rest <= 3 ? 0 : *a_offset3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
|
@ -1,109 +0,0 @@
|
||||||
/***************************************************************************
|
|
||||||
* Copyright (c) 2022, The OpenBLAS Project
|
|
||||||
* All rights reserved.
|
|
||||||
* Redistribution and use in source and binary forms, with or without
|
|
||||||
* modification, are permitted provided that the following conditions are
|
|
||||||
* met:
|
|
||||||
* 1. Redistributions of source code must retain the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer.
|
|
||||||
* 2. Redistributions in binary form must reproduce the above copyright
|
|
||||||
* notice, this list of conditions and the following disclaimer in
|
|
||||||
* the documentation and/or other materials provided with the
|
|
||||||
* distribution.
|
|
||||||
* 3. Neither the name of the OpenBLAS project nor the names of
|
|
||||||
* its contributors may be used to endorse or promote products
|
|
||||||
* derived from this software without specific prior written permission.
|
|
||||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
||||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
|
||||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
||||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
||||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
||||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
||||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
||||||
* POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
* *****************************************************************************/
|
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
|
|
||||||
|
|
||||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
|
|
||||||
IFLOAT *a_offset, *a_offset1, *a_offset2, *a_offset3, *a_offset4;
|
|
||||||
IFLOAT *b_offset;
|
|
||||||
a_offset = a;
|
|
||||||
b_offset = b;
|
|
||||||
|
|
||||||
for (BLASLONG j = 0; j < n / 2; j++) {
|
|
||||||
a_offset1 = a_offset;
|
|
||||||
a_offset2 = a_offset1 + lda;
|
|
||||||
a_offset3 = a_offset2 + lda;
|
|
||||||
a_offset4 = a_offset3 + lda;
|
|
||||||
a_offset += 2;
|
|
||||||
|
|
||||||
for (BLASLONG i = 0; i < m / 4; i++) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset2 + 0);
|
|
||||||
*(b_offset + 2) = *(a_offset3 + 0);
|
|
||||||
*(b_offset + 3) = *(a_offset4 + 0);
|
|
||||||
*(b_offset + 4) = *(a_offset1 + 1);
|
|
||||||
*(b_offset + 5) = *(a_offset2 + 1);
|
|
||||||
*(b_offset + 6) = *(a_offset3 + 1);
|
|
||||||
*(b_offset + 7) = *(a_offset4 + 1);
|
|
||||||
|
|
||||||
b_offset += 8;
|
|
||||||
a_offset1 += 4 * lda;
|
|
||||||
a_offset2 += 4 * lda;
|
|
||||||
a_offset3 += 4 * lda;
|
|
||||||
a_offset4 += 4 * lda;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m & 3) {
|
|
||||||
BLASLONG rest = m & 3;
|
|
||||||
if (rest == 3) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset2 + 0);
|
|
||||||
*(b_offset + 2) = *(a_offset3 + 0);
|
|
||||||
*(b_offset + 3) = *(a_offset1 + 1);
|
|
||||||
*(b_offset + 4) = *(a_offset2 + 1);
|
|
||||||
*(b_offset + 5) = *(a_offset3 + 1);
|
|
||||||
b_offset += 6;
|
|
||||||
} else if (rest == 2) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset2 + 0);
|
|
||||||
*(b_offset + 2) = *(a_offset1 + 1);
|
|
||||||
*(b_offset + 3) = *(a_offset2 + 1);
|
|
||||||
b_offset += 4;
|
|
||||||
} else if (rest == 1) {
|
|
||||||
*(b_offset + 0) = *(a_offset1 + 0);
|
|
||||||
*(b_offset + 1) = *(a_offset1 + 1);
|
|
||||||
b_offset += 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (n & 1) {
|
|
||||||
for (BLASLONG i = 0; i < m / 4; i++) {
|
|
||||||
*(b_offset + 0) = *(a_offset);
|
|
||||||
*(b_offset + 1) = *(a_offset + lda);
|
|
||||||
*(b_offset + 2) = *(a_offset + lda * 2);
|
|
||||||
*(b_offset + 3) = *(a_offset + lda * 3);
|
|
||||||
|
|
||||||
b_offset += 4;
|
|
||||||
a_offset += 4 * lda;
|
|
||||||
}
|
|
||||||
BLASLONG rest = m & 3;
|
|
||||||
if (rest == 3) {
|
|
||||||
*(b_offset + 0) = *(a_offset);
|
|
||||||
*(b_offset + 1) = *(a_offset + lda);
|
|
||||||
*(b_offset + 2) = *(a_offset + lda * 2);
|
|
||||||
} else if (rest == 2) {
|
|
||||||
*(b_offset + 0) = *(a_offset);
|
|
||||||
*(b_offset + 1) = *(a_offset + lda);
|
|
||||||
} else if (rest == 1) {
|
|
||||||
*(b_offset + 0) = *(a_offset);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
|
@ -62,6 +62,8 @@ gotoblas_t TABLE_NAME = {
|
||||||
MAX(SBGEMM_DEFAULT_UNROLL_M, SBGEMM_DEFAULT_UNROLL_N),
|
MAX(SBGEMM_DEFAULT_UNROLL_M, SBGEMM_DEFAULT_UNROLL_N),
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
SBGEMM_ALIGN_K,
|
||||||
|
|
||||||
sbstobf16_kTS, sbdtobf16_kTS, sbf16tos_kTS, dbf16tod_kTS,
|
sbstobf16_kTS, sbdtobf16_kTS, sbf16tos_kTS, dbf16tod_kTS,
|
||||||
|
|
||||||
samax_kTS, samin_kTS, smax_kTS, smin_kTS,
|
samax_kTS, samin_kTS, smax_kTS, smin_kTS,
|
||||||
|
@ -866,8 +868,9 @@ gotoblas_t TABLE_NAME = {
|
||||||
cgeadd_kTS,
|
cgeadd_kTS,
|
||||||
#endif
|
#endif
|
||||||
#if BUILD_COMPLEX16==1
|
#if BUILD_COMPLEX16==1
|
||||||
zgeadd_kTS
|
zgeadd_kTS,
|
||||||
#endif
|
#endif
|
||||||
|
1, // align_k
|
||||||
};
|
};
|
||||||
|
|
||||||
#if (ARCH_ARM64)
|
#if (ARCH_ARM64)
|
||||||
|
|
5
param.h
5
param.h
|
@ -79,6 +79,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
#define SBGEMM_DEFAULT_P 256
|
#define SBGEMM_DEFAULT_P 256
|
||||||
#define SBGEMM_DEFAULT_R 256
|
#define SBGEMM_DEFAULT_R 256
|
||||||
#define SBGEMM_DEFAULT_Q 256
|
#define SBGEMM_DEFAULT_Q 256
|
||||||
|
#define SBGEMM_ALIGN_K 1 // must be 2^x
|
||||||
|
|
||||||
#ifdef OPTERON
|
#ifdef OPTERON
|
||||||
|
|
||||||
#define SNUMOPT 4
|
#define SNUMOPT 4
|
||||||
|
@ -3394,6 +3396,9 @@ is a big desktop or server with abundant cache rather than a phone or embedded d
|
||||||
|
|
||||||
#elif defined(NEOVERSEN2)
|
#elif defined(NEOVERSEN2)
|
||||||
|
|
||||||
|
#undef SBGEMM_ALIGN_K
|
||||||
|
#define SBGEMM_ALIGN_K 4
|
||||||
|
|
||||||
#undef SBGEMM_DEFAULT_UNROLL_M
|
#undef SBGEMM_DEFAULT_UNROLL_M
|
||||||
#undef SBGEMM_DEFAULT_UNROLL_N
|
#undef SBGEMM_DEFAULT_UNROLL_N
|
||||||
#define SBGEMM_DEFAULT_UNROLL_M 8
|
#define SBGEMM_DEFAULT_UNROLL_M 8
|
||||||
|
|
Loading…
Reference in New Issue