Merge pull request #3364 from guowangy/bf16-cooperlake
Add SBGEMM kernel for Cooperlake
This commit is contained in:
commit
da5bd8b5e3
|
@ -524,6 +524,9 @@ void blas_set_parameter(void){
|
|||
xgemm_p = ((xgemm_p + XGEMM_UNROLL_M - 1)/XGEMM_UNROLL_M) * XGEMM_UNROLL_M;
|
||||
#endif
|
||||
|
||||
#ifdef BUILD_BFLOAT16
|
||||
sbgemm_r = (((BUFFER_SIZE - ((SBGEMM_P * SBGEMM_Q * 4 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (SBGEMM_Q * 4)) - 15) & ~15;
|
||||
#endif
|
||||
sgemm_r = (((BUFFER_SIZE - ((SGEMM_P * SGEMM_Q * 4 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (SGEMM_Q * 4)) - 15) & ~15;
|
||||
dgemm_r = (((BUFFER_SIZE - ((DGEMM_P * DGEMM_Q * 8 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (DGEMM_Q * 8)) - 15) & ~15;
|
||||
cgemm_r = (((BUFFER_SIZE - ((CGEMM_P * CGEMM_Q * 8 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (CGEMM_Q * 8)) - 15) & ~15;
|
||||
|
@ -629,7 +632,9 @@ void blas_set_parameter(void){
|
|||
xgemm_p = 16 * (size + 1);
|
||||
#endif
|
||||
|
||||
#ifdef BUILD_BFLOAT16
|
||||
sbgemm_r = (((BUFFER_SIZE - ((SBGEMM_P * SBGEMM_Q * 4 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (SBGEMM_Q * 4)) - 15) & ~15;
|
||||
#endif
|
||||
sgemm_r = (((BUFFER_SIZE - ((SGEMM_P * SGEMM_Q * 4 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (SGEMM_Q * 4)) - 15) & ~15;
|
||||
dgemm_r = (((BUFFER_SIZE - ((DGEMM_P * DGEMM_Q * 8 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (DGEMM_Q * 8)) - 15) & ~15;
|
||||
cgemm_r = (((BUFFER_SIZE - ((CGEMM_P * CGEMM_Q * 8 + GEMM_OFFSET_A + GEMM_ALIGN) & ~GEMM_ALIGN)) / (CGEMM_Q * 8)) - 15) & ~15;
|
||||
|
|
|
@ -326,7 +326,7 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS
|
|||
|
||||
PRINT_DEBUG_CNAME;
|
||||
|
||||
#if !defined(COMPLEX) && !defined(DOUBLE) && defined(USE_SGEMM_KERNEL_DIRECT)
|
||||
#if !defined(COMPLEX) && !defined(DOUBLE) && !defined(BFLOAT16) && defined(USE_SGEMM_KERNEL_DIRECT)
|
||||
#ifdef DYNAMIC_ARCH
|
||||
if (support_avx512() )
|
||||
#endif
|
||||
|
|
|
@ -9,3 +9,14 @@ SBGEMM_SMALL_K_TN = sbgemm_small_kernel_tn_cooperlake.c
|
|||
SBGEMM_SMALL_K_B0_TN = sbgemm_small_kernel_tn_cooperlake.c
|
||||
SBGEMM_SMALL_K_TT = sbgemm_small_kernel_tt_cooperlake.c
|
||||
SBGEMM_SMALL_K_B0_TT = sbgemm_small_kernel_tt_cooperlake.c
|
||||
|
||||
SBGEMM_BETA = sgemm_beta_skylakex.c
|
||||
SBGEMMKERNEL = sbgemm_kernel_16x4_cooperlake.c
|
||||
SBGEMMINCOPY = sbgemm_ncopy_16_cooperlake.c
|
||||
SBGEMMITCOPY = sbgemm_tcopy_16_cooperlake.c
|
||||
SBGEMMONCOPY = sbgemm_ncopy_4_cooperlake.c
|
||||
SBGEMMOTCOPY = sbgemm_tcopy_4_cooperlake.c
|
||||
SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX)
|
||||
SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX)
|
||||
SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX)
|
||||
SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX)
|
||||
|
|
|
@ -0,0 +1,499 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2021, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include <immintrin.h>
|
||||
#include "common.h"
|
||||
|
||||
#define VMOVLDUP(addr, zmm) asm("vmovsldup (%1), %0": "=v"(zmm): "r"(addr))
|
||||
#define VMOVHDUP(addr, zmm) asm("vmovshdup (%1), %0": "=v"(zmm): "r"(addr))
|
||||
#define BROADCAST64(base, step, n, offset, zmm) \
|
||||
if (n == 0) asm("vbroadcastsd %c2(%1), %0": "=v"(zmm): "r"(base), "n"(offset*2)); \
|
||||
else asm("vbroadcastsd %c4(%1, %2, %c3), %0": "=v"(zmm): "r"(base), "r"(step), "n"(n*2), "n"(offset*2))
|
||||
|
||||
#define DECLARE_A_PAIR(A) \
|
||||
__m512i A_lo_##A; __m512i A_hi_##A;
|
||||
|
||||
#define LOAD_A_PAIR(A) \
|
||||
VMOVLDUP(ptr_a##A, A_lo_##A); \
|
||||
VMOVHDUP(ptr_a##A, A_hi_##A);
|
||||
|
||||
#define MASK_LOAD_A_PAIR(A) { \
|
||||
__m512 tmp = _mm512_maskz_loadu_ps(mmask, ptr_a##A); \
|
||||
A_lo_##A = (__m512i) _mm512_moveldup_ps(tmp); \
|
||||
A_hi_##A = (__m512i) _mm512_movehdup_ps(tmp); \
|
||||
}
|
||||
|
||||
#define LOAD_A_PAIR_TAIL(A) { \
|
||||
__m256i ymm = _mm256_loadu_si256((void *)ptr_a##A); \
|
||||
__m512 zmm = (__m512) _mm512_cvtepu16_epi32(ymm); \
|
||||
A_lo_##A = (__m512i) _mm512_moveldup_ps(zmm); \
|
||||
A_hi_##A = (__m512i) _mm512_movehdup_ps(zmm); \
|
||||
}
|
||||
|
||||
#define MASK_LOAD_A_PAIR_TAIL(A) { \
|
||||
__m256i ymm = _mm256_maskz_loadu_epi16(mmask, ptr_a##A); \
|
||||
__m512 zmm = (__m512) _mm512_cvtepu16_epi32(ymm); \
|
||||
A_lo_##A = (__m512i) _mm512_moveldup_ps(zmm); \
|
||||
A_hi_##A = (__m512i) _mm512_movehdup_ps(zmm); \
|
||||
}
|
||||
|
||||
#define DECLARE_B_PAIR() \
|
||||
__m512i B_lo; __m512i B_hi;
|
||||
|
||||
#define PREFETCH_B_STEP 32
|
||||
#define PREFETCH_B(Bx, By) \
|
||||
if (By == 0) asm("prefetcht0 %c1(%0)": : "r"(ptr_b##Bx), "n"(PREFETCH_B_STEP * 2)); \
|
||||
else asm("prefetcht0 %c3(%0, %1, %c2)": : "r"(ptr_b##Bx), "r"(n_blksize), "n"(By*2), "n"(PREFETCH_B_STEP * 2))
|
||||
|
||||
#define BROADCAST_B_PAIR(Bx, By) \
|
||||
BROADCAST64(ptr_b##Bx, n_blksize, By, 0, B_lo); \
|
||||
BROADCAST64(ptr_b##Bx, n_blksize, By, 4, B_hi);
|
||||
|
||||
#define MASK_BROADCAST_B_PAIR(Bx, x) {\
|
||||
__m128 xmm = _mm_maskz_loadu_ps(nmask, ptr_b##Bx); \
|
||||
B_lo = (__m512i) _mm512_broadcastsd_pd((__m128d) xmm); \
|
||||
B_hi = (__m512i) _mm512_broadcastsd_pd(_mm_permute_pd((__m128d) xmm, 0x1)); \
|
||||
}
|
||||
|
||||
#define BROADCAST_B_PAIR_TAIL(Bx, By) {\
|
||||
__m128i xmm = (__m128i) _mm_load_sd((double *)(ptr_b##Bx + n_blksize * By)); \
|
||||
xmm = _mm_cvtepu16_epi32(xmm); \
|
||||
B_lo = _mm512_broadcast_i32x2(xmm); \
|
||||
B_hi = _mm512_broadcast_i32x2((__m128i) _mm_permute_pd((__m128d) xmm, 0x1)); \
|
||||
}
|
||||
|
||||
#define MASK_BROADCAST_B_PAIR_TAIL(Bx, By) {\
|
||||
__m128i xmm = _mm_maskz_loadu_epi16(nmask, ptr_b##Bx + n_blksize * By); \
|
||||
xmm = _mm_cvtepu16_epi32(xmm); \
|
||||
B_lo = _mm512_broadcast_i32x2(xmm); \
|
||||
B_hi = _mm512_broadcast_i32x2((__m128i) _mm_permute_pd((__m128d) xmm, 0x1)); \
|
||||
}
|
||||
|
||||
#define DECLARE_RESULT_4X(A, Bx, By) \
|
||||
__m512 result_00_##A##Bx##By = _mm512_setzero_ps(); \
|
||||
__m512 result_01_##A##Bx##By = _mm512_setzero_ps(); \
|
||||
__m512 result_10_##A##Bx##By = _mm512_setzero_ps(); \
|
||||
__m512 result_11_##A##Bx##By = _mm512_setzero_ps();
|
||||
|
||||
#define FMA(a, b, r) r = _mm512_dpbf16_ps(r, (__m512bh)a, (__m512bh)b)
|
||||
|
||||
#define MATMUL_4X(A, Bx, By) \
|
||||
FMA(A_lo_##A, B_lo, result_00_##A##Bx##By); \
|
||||
FMA(A_hi_##A, B_lo, result_01_##A##Bx##By); \
|
||||
FMA(A_lo_##A, B_hi, result_10_##A##Bx##By); \
|
||||
FMA(A_hi_##A, B_hi, result_11_##A##Bx##By);
|
||||
|
||||
#define _STORE_C_2nx16(addr, val0, val1) \
|
||||
asm("vfmadd213ps (%1), %2, %0": "+v"(val0) : "r"(addr), "v"(alpha_512)); \
|
||||
asm("vfmadd213ps (%1, %3, 4), %2, %0": "+v"(val1) : "r"(addr), "v"(alpha_512), "r"(ldc)); \
|
||||
asm("vmovups %0, (%1)": : "v"(val0), "r"(addr)); \
|
||||
asm("vmovups %0, (%1, %2, 4)": : "v"(val1), "r"(addr), "r"(ldc))
|
||||
|
||||
#define _MASK_STORE_C_2nx16(addr, val0, val1) \
|
||||
asm("vfmadd213ps (%1), %2, %0 %{%3%} ": "+v"(val0) : "r"(addr), "v"(alpha_512), "k"(mmask)); \
|
||||
asm("vfmadd213ps (%1, %3, 4), %2, %0 %{%4%}": "+v"(val1) : "r"(addr), "v"(alpha_512), "r"(ldc), "k"(mmask)); \
|
||||
asm("vmovups %0, (%1) %{%2%}": : "v"(val0), "r"(addr), "k"(mmask)); \
|
||||
asm("vmovups %0, (%1, %2, 4) %{%3%}": : "v"(val1), "r"(addr), "r"(ldc), "k"(mmask))
|
||||
|
||||
#define _REORDER_C_2X(result_0, result_1) { \
|
||||
__m512 tmp0, tmp1; \
|
||||
tmp0 = _mm512_unpacklo_ps(result_0, result_1); \
|
||||
tmp1 = _mm512_unpackhi_ps(result_0, result_1); \
|
||||
result_0 = (__m512) _mm512_unpacklo_pd((__m512d) tmp0, (__m512d) tmp1); \
|
||||
result_1 = (__m512) _mm512_unpackhi_pd((__m512d) tmp0, (__m512d) tmp1); \
|
||||
}
|
||||
|
||||
#define _STORE_2X(ptr_c, result_0, result_1) {\
|
||||
_REORDER_C_2X(result_0, result_1) \
|
||||
_STORE_C_2nx16(ptr_c, result_0, result_1); \
|
||||
ptr_c += ldc * 2; \
|
||||
}
|
||||
|
||||
#define _MASK_STORE_2X(ptr_c, result_0, result_1) {\
|
||||
_REORDER_C_2X(result_0, result_1) \
|
||||
_MASK_STORE_C_2nx16(ptr_c, result_0, result_1); \
|
||||
ptr_c += ldc * 2; \
|
||||
}
|
||||
|
||||
#define STORE_4X(A, Bx, By) { \
|
||||
_STORE_2X(ptr_c##A, result_00_##A##Bx##By, result_01_##A##Bx##By); \
|
||||
_STORE_2X(ptr_c##A, result_10_##A##Bx##By, result_11_##A##Bx##By); \
|
||||
}
|
||||
|
||||
#define MASK_STORE_4X(A, Bx, By) { \
|
||||
_MASK_STORE_2X(ptr_c##A, result_00_##A##Bx##By, result_01_##A##Bx##By); \
|
||||
_MASK_STORE_2X(ptr_c##A, result_10_##A##Bx##By, result_11_##A##Bx##By); \
|
||||
}
|
||||
|
||||
#define _STORE_C_16(addr, val0) \
|
||||
asm("vfmadd213ps (%1), %2, %0": "+v"(val0) : "r"(addr), "v"(alpha_512)); \
|
||||
asm("vmovups %0, (%1)": : "v"(val0), "r"(addr));
|
||||
|
||||
#define _MASK_STORE_C_16(addr, val0) \
|
||||
asm("vfmadd213ps (%1), %2, %0 %{%3%} ": "+v"(val0) : "r"(addr), "v"(alpha_512), "k"(mmask)); \
|
||||
asm("vmovups %0, (%1) %{%2%}": : "v"(val0), "r"(addr), "k"(mmask));
|
||||
|
||||
#define N_STORE_4X(A, Bx, By) { \
|
||||
_REORDER_C_2X(result_00_##A##Bx##By, result_01_##A##Bx##By); \
|
||||
_REORDER_C_2X(result_10_##A##Bx##By, result_11_##A##Bx##By); \
|
||||
switch(n_count) { \
|
||||
case 3: _STORE_C_16(ptr_c + ldc * 2, result_10_##A##Bx##By); \
|
||||
case 2: _STORE_C_16(ptr_c + ldc * 1, result_01_##A##Bx##By); \
|
||||
case 1: _STORE_C_16(ptr_c + ldc * 0, result_00_##A##Bx##By); \
|
||||
} \
|
||||
ptr_c##A += ldc * n_count; \
|
||||
}
|
||||
|
||||
#define N_MASK_STORE_4X(A, Bx, By) { \
|
||||
_REORDER_C_2X(result_00_##A##Bx##By, result_01_##A##Bx##By); \
|
||||
_REORDER_C_2X(result_10_##A##Bx##By, result_11_##A##Bx##By); \
|
||||
switch(n_count) { \
|
||||
case 3: _MASK_STORE_C_16(ptr_c + ldc * 2, result_10_##A##Bx##By); \
|
||||
case 2: _MASK_STORE_C_16(ptr_c + ldc * 1, result_01_##A##Bx##By); \
|
||||
case 1: _MASK_STORE_C_16(ptr_c + ldc * 0, result_00_##A##Bx##By); \
|
||||
} \
|
||||
ptr_c##A += ldc * n_count; \
|
||||
}
|
||||
|
||||
|
||||
int CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
|
||||
{
|
||||
IFLOAT *ptr_a = A, *ptr_b = B;
|
||||
IFLOAT *ptr_b0, *ptr_b1;
|
||||
IFLOAT *ptr_a0, *ptr_a1;
|
||||
FLOAT *ptr_c = C;
|
||||
FLOAT *ptr_c0, *ptr_c1;
|
||||
BLASLONG n_count = n;
|
||||
BLASLONG m_count, k_count;
|
||||
BLASLONG n_blksize = 4 * k;
|
||||
BLASLONG cn_offset = 0;
|
||||
__m512 alpha_512 = _mm512_broadcastss_ps(_mm_load_ss(&alpha));
|
||||
|
||||
for (; n_count > 23; n_count -= 24) {
|
||||
IFLOAT *ptr_b00 = ptr_b;
|
||||
IFLOAT *ptr_b10 = ptr_b + n_blksize * 3;
|
||||
ptr_a0 = ptr_a;
|
||||
ptr_c = C + cn_offset * ldc;
|
||||
m_count = m;
|
||||
for (; m_count > 15; m_count -= 16) {
|
||||
ptr_b0 = ptr_b00;
|
||||
ptr_b1 = ptr_b10;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
|
||||
DECLARE_RESULT_4X(0, 1, 0); DECLARE_RESULT_4X(0, 1, 1); DECLARE_RESULT_4X(0, 1, 2);
|
||||
k_count = k;
|
||||
for (; k_count > 3; k_count -=4) {
|
||||
LOAD_A_PAIR(0);
|
||||
_mm_prefetch(ptr_a0 + 128, _MM_HINT_T0);
|
||||
ptr_a0 += 16 * 2;
|
||||
BROADCAST_B_PAIR(0, 0); PREFETCH_B(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR(0, 1); PREFETCH_B(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR(0, 2); PREFETCH_B(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4 * 2;
|
||||
BROADCAST_B_PAIR(1, 0); PREFETCH_B(1, 0); MATMUL_4X(0, 1, 0);
|
||||
BROADCAST_B_PAIR(1, 1); PREFETCH_B(1, 1); MATMUL_4X(0, 1, 1);
|
||||
BROADCAST_B_PAIR(1, 2); PREFETCH_B(1, 2); MATMUL_4X(0, 1, 2);
|
||||
ptr_b1 += 4 * 2;
|
||||
|
||||
LOAD_A_PAIR(0);
|
||||
_mm_prefetch(ptr_a0 + 128, _MM_HINT_T0);
|
||||
ptr_a0 += 16 * 2;
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4 * 2;
|
||||
BROADCAST_B_PAIR(1, 0); MATMUL_4X(0, 1, 0);
|
||||
BROADCAST_B_PAIR(1, 1); MATMUL_4X(0, 1, 1);
|
||||
BROADCAST_B_PAIR(1, 2); MATMUL_4X(0, 1, 2);
|
||||
ptr_b1 += 4 * 2;
|
||||
}
|
||||
for (; k_count > 1; k_count -=2) {
|
||||
LOAD_A_PAIR(0);
|
||||
ptr_a0 += 16 * 2;
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4 * 2;
|
||||
BROADCAST_B_PAIR(1, 0); MATMUL_4X(0, 1, 0);
|
||||
BROADCAST_B_PAIR(1, 1); MATMUL_4X(0, 1, 1);
|
||||
BROADCAST_B_PAIR(1, 2); MATMUL_4X(0, 1, 2);
|
||||
ptr_b1 += 4 * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
LOAD_A_PAIR_TAIL(0);
|
||||
ptr_a0 += 16;
|
||||
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4;
|
||||
BROADCAST_B_PAIR_TAIL(1, 0); MATMUL_4X(0, 1, 0);
|
||||
BROADCAST_B_PAIR_TAIL(1, 1); MATMUL_4X(0, 1, 1);
|
||||
BROADCAST_B_PAIR_TAIL(1, 2); MATMUL_4X(0, 1, 2);
|
||||
ptr_b1 += 4;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
STORE_4X(0, 0, 0); STORE_4X(0, 0, 1); STORE_4X(0, 0, 2);
|
||||
STORE_4X(0, 1, 0); STORE_4X(0, 1, 1); STORE_4X(0, 1, 2);
|
||||
ptr_c += 16;
|
||||
}
|
||||
if (m_count > 0) {
|
||||
__mmask16 mmask = (1UL << m_count) - 1;
|
||||
ptr_b0 = ptr_b00;
|
||||
ptr_b1 = ptr_b10;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
|
||||
DECLARE_RESULT_4X(0, 1, 0); DECLARE_RESULT_4X(0, 1, 1); DECLARE_RESULT_4X(0, 1, 2);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
MASK_LOAD_A_PAIR(0);
|
||||
ptr_a0 += m_count * 2;
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4 * 2;
|
||||
BROADCAST_B_PAIR(1, 0); MATMUL_4X(0, 1, 0);
|
||||
BROADCAST_B_PAIR(1, 1); MATMUL_4X(0, 1, 1);
|
||||
BROADCAST_B_PAIR(1, 2); MATMUL_4X(0, 1, 2);
|
||||
ptr_b1 += 4 * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
MASK_LOAD_A_PAIR_TAIL(0);
|
||||
ptr_a0 += m_count;
|
||||
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4;
|
||||
BROADCAST_B_PAIR_TAIL(1, 0); MATMUL_4X(0, 1, 0);
|
||||
BROADCAST_B_PAIR_TAIL(1, 1); MATMUL_4X(0, 1, 1);
|
||||
BROADCAST_B_PAIR_TAIL(1, 2); MATMUL_4X(0, 1, 2);
|
||||
ptr_b1 += 4;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
MASK_STORE_4X(0, 0, 0); MASK_STORE_4X(0, 0, 1); MASK_STORE_4X(0, 0, 2);
|
||||
MASK_STORE_4X(0, 1, 0); MASK_STORE_4X(0, 1, 1); MASK_STORE_4X(0, 1, 2);
|
||||
ptr_c += m_count;
|
||||
}
|
||||
ptr_b += 24 * k;
|
||||
cn_offset += 24;
|
||||
}
|
||||
for (; n_count > 11; n_count -= 12) {
|
||||
IFLOAT *ptr_b00 = ptr_b;
|
||||
ptr_a0 = ptr_a;
|
||||
ptr_a1 = ptr_a + 16 * k;
|
||||
ptr_c = C + cn_offset * ldc;
|
||||
m_count = m;
|
||||
for (; m_count > 31; m_count -= 32) {
|
||||
ptr_b0 = ptr_b00;
|
||||
DECLARE_A_PAIR(0); DECLARE_A_PAIR(1);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
|
||||
DECLARE_RESULT_4X(1, 0, 0); DECLARE_RESULT_4X(1, 0, 1); DECLARE_RESULT_4X(1, 0, 2);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
LOAD_A_PAIR(0); LOAD_A_PAIR(1);
|
||||
ptr_a0 += 16 * 2;
|
||||
ptr_a1 += 16 * 2;
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0); MATMUL_4X(1, 0, 0);
|
||||
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1); MATMUL_4X(1, 0, 1);
|
||||
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2); MATMUL_4X(1, 0, 2);
|
||||
ptr_b0 += 4 * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
LOAD_A_PAIR_TAIL(0); LOAD_A_PAIR_TAIL(1);
|
||||
ptr_a0 += 16;
|
||||
ptr_a1 += 16;
|
||||
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0); MATMUL_4X(1, 0, 0);
|
||||
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1); MATMUL_4X(1, 0, 1);
|
||||
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2); MATMUL_4X(1, 0, 2);
|
||||
ptr_b0 += 4;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
ptr_c1 = ptr_c + 16;
|
||||
STORE_4X(0, 0, 0); STORE_4X(1, 0, 0);
|
||||
STORE_4X(0, 0, 1); STORE_4X(1, 0, 1);
|
||||
STORE_4X(0, 0, 2); STORE_4X(1, 0, 2);
|
||||
ptr_c += 16 * 2;
|
||||
ptr_a0 = ptr_a1;
|
||||
ptr_a1 = ptr_a0 + 16 * k;
|
||||
}
|
||||
for (; m_count > 15; m_count -= 16) {
|
||||
ptr_b0 = ptr_b00;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
LOAD_A_PAIR(0);
|
||||
ptr_a0 += 16 * 2;
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4 * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
LOAD_A_PAIR_TAIL(0);
|
||||
ptr_a0 += 16;
|
||||
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
STORE_4X(0, 0, 0); STORE_4X(0, 0, 1); STORE_4X(0, 0, 2);
|
||||
ptr_c += 16;
|
||||
}
|
||||
if (m_count > 0) {
|
||||
__mmask16 mmask = (1UL << m_count) - 1;
|
||||
ptr_b0 = ptr_b00;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
MASK_LOAD_A_PAIR(0);
|
||||
ptr_a0 += m_count * 2;
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4 * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
MASK_LOAD_A_PAIR_TAIL(0);
|
||||
ptr_a0 += m_count;
|
||||
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1);
|
||||
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2);
|
||||
ptr_b0 += 4;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
MASK_STORE_4X(0, 0, 0); MASK_STORE_4X(0, 0, 1); MASK_STORE_4X(0, 0, 2);
|
||||
ptr_c += m_count;
|
||||
}
|
||||
ptr_b += 12 * k;
|
||||
cn_offset += 12;
|
||||
}
|
||||
for (; n_count > 3; n_count -= 4) {
|
||||
IFLOAT *ptr_b00 = ptr_b;
|
||||
ptr_a0 = ptr_a;
|
||||
ptr_c = C + cn_offset * ldc;
|
||||
m_count = m;
|
||||
for (; m_count > 15; m_count -= 16) {
|
||||
ptr_b0 = ptr_b00;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
LOAD_A_PAIR(0);
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += 4 * 2;
|
||||
ptr_a0 += 16 * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
LOAD_A_PAIR_TAIL(0);
|
||||
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += 4;
|
||||
ptr_a0 += 16;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
STORE_4X(0, 0, 0);
|
||||
ptr_c += 16;
|
||||
}
|
||||
if (m_count > 0) {
|
||||
__mmask16 mmask = (1UL << m_count) - 1;
|
||||
ptr_b0 = ptr_b00;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
MASK_LOAD_A_PAIR(0);
|
||||
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += 4 * 2;
|
||||
ptr_a0 += m_count * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
MASK_LOAD_A_PAIR_TAIL(0);
|
||||
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += 4;
|
||||
ptr_a0 += m_count;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
MASK_STORE_4X(0, 0, 0);
|
||||
ptr_c += m_count;
|
||||
}
|
||||
ptr_b += 4 * k;
|
||||
cn_offset += 4;
|
||||
}
|
||||
if (n_count > 0) {
|
||||
__mmask8 nmask = (1UL << n_count) - 1;
|
||||
IFLOAT *ptr_b00 = ptr_b;
|
||||
ptr_a0 = ptr_a;
|
||||
ptr_c = C + cn_offset * ldc;
|
||||
m_count = m;
|
||||
for (; m_count > 15; m_count -= 16) {
|
||||
ptr_b0 = ptr_b00;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
LOAD_A_PAIR(0);
|
||||
MASK_BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += n_count * 2;
|
||||
ptr_a0 += 16 * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
LOAD_A_PAIR_TAIL(0);
|
||||
MASK_BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += n_count;
|
||||
ptr_a0 += 16;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
N_STORE_4X(0, 0, 0);
|
||||
ptr_c += 16;
|
||||
}
|
||||
if (m_count > 0) {
|
||||
__mmask16 mmask = (1UL << m_count) - 1;
|
||||
ptr_b0 = ptr_b00;
|
||||
DECLARE_A_PAIR(0);
|
||||
DECLARE_B_PAIR();
|
||||
DECLARE_RESULT_4X(0, 0, 0);
|
||||
for (k_count = k; k_count > 1; k_count -=2) {
|
||||
MASK_LOAD_A_PAIR(0);
|
||||
MASK_BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += n_count * 2;
|
||||
ptr_a0 += m_count * 2;
|
||||
}
|
||||
if (k_count > 0) {
|
||||
MASK_LOAD_A_PAIR_TAIL(0);
|
||||
MASK_BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
|
||||
ptr_b0 += n_count;
|
||||
ptr_a0 += m_count;
|
||||
}
|
||||
ptr_c0 = ptr_c;
|
||||
N_MASK_STORE_4X(0, 0, 0);
|
||||
ptr_c += m_count;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,353 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2021, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <immintrin.h>
|
||||
#include "common.h"
|
||||
|
||||
#define _MM512_SHUFFLE_i32(result, in1, in2, imm8) \
|
||||
asm("vshufps %3, %2, %1, %0": "=v"(result): "v"(in1), "v"(in2), "N"(imm8))
|
||||
|
||||
#define REORDER_8x32(t0, t1, t2, t3, t4, t5, t6, t7) { \
|
||||
__m512i v; \
|
||||
t0 = _mm512_unpacklo_epi32(r0, r1); \
|
||||
t1 = _mm512_unpackhi_epi32(r0, r1); \
|
||||
t2 = _mm512_unpacklo_epi32(r2, r3); \
|
||||
t3 = _mm512_unpackhi_epi32(r2, r3); \
|
||||
t4 = _mm512_unpacklo_epi32(r4, r5); \
|
||||
t5 = _mm512_unpackhi_epi32(r4, r5); \
|
||||
t6 = _mm512_unpacklo_epi32(r6, r7); \
|
||||
t7 = _mm512_unpackhi_epi32(r6, r7); \
|
||||
_MM512_SHUFFLE_i32(v, t0, t2, 0x4E); \
|
||||
r0 = _mm512_mask_blend_epi32(kc, t0, v); \
|
||||
r1 = _mm512_mask_blend_epi32(k3, t2, v); \
|
||||
_MM512_SHUFFLE_i32(v, t1, t3, 0x4E); \
|
||||
r2 = _mm512_mask_blend_epi32(kc, t1, v); \
|
||||
r3 = _mm512_mask_blend_epi32(k3, t3, v); \
|
||||
_MM512_SHUFFLE_i32(v, t4, t6, 0x4E); \
|
||||
r4 = _mm512_mask_blend_epi32(kc, t4, v); \
|
||||
r5 = _mm512_mask_blend_epi32(k3, t6, v); \
|
||||
_MM512_SHUFFLE_i32(v, t5, t7, 0x4E); \
|
||||
r6 = _mm512_mask_blend_epi32(kc, t5, v); \
|
||||
r7 = _mm512_mask_blend_epi32(k3, t7, v); \
|
||||
t0 = _mm512_permutex2var_epi32(r0, idx_lo, r4); \
|
||||
t1 = _mm512_permutex2var_epi32(r1, idx_lo, r5); \
|
||||
t2 = _mm512_permutex2var_epi32(r2, idx_lo, r6); \
|
||||
t3 = _mm512_permutex2var_epi32(r3, idx_lo, r7); \
|
||||
t4 = _mm512_permutex2var_epi32(r0, idx_hi, r4); \
|
||||
t5 = _mm512_permutex2var_epi32(r1, idx_hi, r5); \
|
||||
t6 = _mm512_permutex2var_epi32(r2, idx_hi, r6); \
|
||||
t7 = _mm512_permutex2var_epi32(r3, idx_hi, r7); \
|
||||
}
|
||||
|
||||
#define STORE_512_LO(x) \
|
||||
v = _mm512_permutex2var_epi64(t0##x, idx_lo2, t1##x); \
|
||||
_mm512_storeu_si512(boffset0 + x*32, v);
|
||||
|
||||
#define STORE_512_HI(x) \
|
||||
v = _mm512_permutex2var_epi64(t0##x, idx_hi2, t1##x); \
|
||||
_mm512_storeu_si512(boffset0 + (x + 8)*32, v);
|
||||
|
||||
#define MASK_STORE_512_LO(x) \
|
||||
v = _mm512_permutex2var_epi64(t0##x, idx_lo2, t1##x); \
|
||||
_mm512_mask_storeu_epi32(boffset0 + 2*x*remain_n, nmask, v);
|
||||
|
||||
#define MASK_STORE_512_HI(x) \
|
||||
v = _mm512_permutex2var_epi64(t0##x, idx_hi2, t1##x); \
|
||||
_mm512_mask_storeu_epi32(boffset0 + 2*(x + 8)*remain_n, nmask, v);
|
||||
|
||||
#define STORE_512(x, y) {\
|
||||
__m512i v; \
|
||||
if (x == 0) { STORE_512_LO(y); } \
|
||||
else { STORE_512_HI(y); } \
|
||||
}
|
||||
|
||||
#define MASK_STORE_512(x, y) {\
|
||||
__m512i v; \
|
||||
if (x == 0) { MASK_STORE_512_LO(y); } \
|
||||
else { MASK_STORE_512_HI(y); } \
|
||||
}
|
||||
|
||||
#define SET_TAIL(y, x) {\
|
||||
if (y == 0) tail = _mm512_permutex2var_epi64(t0##x, idx_lo2, t1##x); \
|
||||
else tail = _mm512_permutex2var_epi64(t0##x, idx_hi2, t1##x); \
|
||||
}
|
||||
|
||||
#define GET_TAIL() \
|
||||
switch (n_store + 1) { \
|
||||
case 16: SET_TAIL(1, 7); break; \
|
||||
case 15: SET_TAIL(1, 6); break; \
|
||||
case 14: SET_TAIL(1, 5); break; \
|
||||
case 13: SET_TAIL(1, 4); break; \
|
||||
case 12: SET_TAIL(1, 3); break; \
|
||||
case 11: SET_TAIL(1, 2); break; \
|
||||
case 10: SET_TAIL(1, 1); break; \
|
||||
case 9: SET_TAIL(1, 0); break; \
|
||||
case 8: SET_TAIL(0, 7); break; \
|
||||
case 7: SET_TAIL(0, 6); break; \
|
||||
case 6: SET_TAIL(0, 5); break; \
|
||||
case 5: SET_TAIL(0, 4); break; \
|
||||
case 4: SET_TAIL(0, 3); break; \
|
||||
case 3: SET_TAIL(0, 2); break; \
|
||||
case 2: SET_TAIL(0, 1); break; \
|
||||
case 1: SET_TAIL(0, 0); break; \
|
||||
}
|
||||
|
||||
|
||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
|
||||
BLASLONG i, j;
|
||||
|
||||
IFLOAT *boffset0;
|
||||
IFLOAT *aoffset;
|
||||
IFLOAT *aoffset00, *aoffset01, *aoffset02, *aoffset03, *aoffset04, *aoffset05, *aoffset06, *aoffset07;
|
||||
IFLOAT *aoffset10, *aoffset11, *aoffset12, *aoffset13, *aoffset14, *aoffset15, *aoffset16, *aoffset17;
|
||||
aoffset = a;
|
||||
boffset0 = b;
|
||||
|
||||
BLASLONG n16 = n & ~15;
|
||||
BLASLONG m32 = m & ~31;
|
||||
|
||||
int permute_table[] = {
|
||||
0x0, 0x1, 0x2, 0x3, 0x10, 0x11, 0x12, 0x13, 0x8, 0x9, 0xa, 0xb, 0x18, 0x19, 0x1a, 0x1b,
|
||||
0x4, 0x5, 0x6, 0x7, 0x14, 0x15, 0x16, 0x17, 0xc, 0xd, 0xe, 0xf, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
};
|
||||
u_int64_t permute_table2[] = {
|
||||
0x00, 0x01, 0x02, 0x03, 8|0x0, 8|0x1, 8|0x2, 8|0x3,
|
||||
0x04, 0x05, 0x06, 0x07, 8|0x4, 8|0x5, 8|0x6, 8|0x7,
|
||||
};
|
||||
__m512i idx_lo = _mm512_loadu_si512(permute_table);
|
||||
__m512i idx_hi = _mm512_loadu_si512(permute_table + 16);
|
||||
__m512i idx_lo2 = _mm512_loadu_si512(permute_table2);
|
||||
__m512i idx_hi2 = _mm512_loadu_si512(permute_table2 + 8);
|
||||
__mmask16 kc = 0xcccc;
|
||||
__mmask16 k3 = 0x3333;
|
||||
__m512i r0, r1, r2, r3, r4, r5, r6, r7;
|
||||
__m512i t00, t01, t02, t03, t04, t05, t06, t07;
|
||||
__m512i t10, t11, t12, t13, t14, t15, t16, t17;
|
||||
|
||||
for (j = 0; j < n16; j += 16) {
|
||||
aoffset00 = aoffset;
|
||||
aoffset01 = aoffset00 + lda;
|
||||
aoffset02 = aoffset01 + lda;
|
||||
aoffset03 = aoffset02 + lda;
|
||||
aoffset04 = aoffset03 + lda;
|
||||
aoffset05 = aoffset04 + lda;
|
||||
aoffset06 = aoffset05 + lda;
|
||||
aoffset07 = aoffset06 + lda;
|
||||
aoffset10 = aoffset07 + lda;
|
||||
aoffset11 = aoffset10 + lda;
|
||||
aoffset12 = aoffset11 + lda;
|
||||
aoffset13 = aoffset12 + lda;
|
||||
aoffset14 = aoffset13 + lda;
|
||||
aoffset15 = aoffset14 + lda;
|
||||
aoffset16 = aoffset15 + lda;
|
||||
aoffset17 = aoffset16 + lda;
|
||||
aoffset += 16 * lda;
|
||||
for (i = 0; i < m32; i += 32) {
|
||||
r0 = _mm512_loadu_si512(aoffset00 + i);
|
||||
r1 = _mm512_loadu_si512(aoffset01 + i);
|
||||
r2 = _mm512_loadu_si512(aoffset02 + i);
|
||||
r3 = _mm512_loadu_si512(aoffset03 + i);
|
||||
r4 = _mm512_loadu_si512(aoffset04 + i);
|
||||
r5 = _mm512_loadu_si512(aoffset05 + i);
|
||||
r6 = _mm512_loadu_si512(aoffset06 + i);
|
||||
r7 = _mm512_loadu_si512(aoffset07 + i);
|
||||
REORDER_8x32(t00, t01, t02, t03, t04, t05, t06, t07);
|
||||
r0 = _mm512_loadu_si512(aoffset10 + i);
|
||||
r1 = _mm512_loadu_si512(aoffset11 + i);
|
||||
r2 = _mm512_loadu_si512(aoffset12 + i);
|
||||
r3 = _mm512_loadu_si512(aoffset13 + i);
|
||||
r4 = _mm512_loadu_si512(aoffset14 + i);
|
||||
r5 = _mm512_loadu_si512(aoffset15 + i);
|
||||
r6 = _mm512_loadu_si512(aoffset16 + i);
|
||||
r7 = _mm512_loadu_si512(aoffset17 + i);
|
||||
REORDER_8x32(t10, t11, t12, t13, t14, t15, t16, t17);
|
||||
STORE_512(0, 0); STORE_512(0, 1); STORE_512(0, 2); STORE_512(0, 3);
|
||||
STORE_512(0, 4); STORE_512(0, 5); STORE_512(0, 6); STORE_512(0, 7);
|
||||
STORE_512(1, 0); STORE_512(1, 1); STORE_512(1, 2); STORE_512(1, 3);
|
||||
STORE_512(1, 4); STORE_512(1, 5); STORE_512(1, 6); STORE_512(1, 7);
|
||||
boffset0 += 16 * 32;
|
||||
}
|
||||
if (i < m) {
|
||||
int remain_m = m - i;
|
||||
__mmask32 mmask = (1UL << remain_m) - 1;
|
||||
r0 = _mm512_maskz_loadu_epi16(mmask, aoffset00 + i);
|
||||
r1 = _mm512_maskz_loadu_epi16(mmask, aoffset01 + i);
|
||||
r2 = _mm512_maskz_loadu_epi16(mmask, aoffset02 + i);
|
||||
r3 = _mm512_maskz_loadu_epi16(mmask, aoffset03 + i);
|
||||
r4 = _mm512_maskz_loadu_epi16(mmask, aoffset04 + i);
|
||||
r5 = _mm512_maskz_loadu_epi16(mmask, aoffset05 + i);
|
||||
r6 = _mm512_maskz_loadu_epi16(mmask, aoffset06 + i);
|
||||
r7 = _mm512_maskz_loadu_epi16(mmask, aoffset07 + i);
|
||||
REORDER_8x32(t00, t01, t02, t03, t04, t05, t06, t07);
|
||||
r0 = _mm512_maskz_loadu_epi16(mmask, aoffset10 + i);
|
||||
r1 = _mm512_maskz_loadu_epi16(mmask, aoffset11 + i);
|
||||
r2 = _mm512_maskz_loadu_epi16(mmask, aoffset12 + i);
|
||||
r3 = _mm512_maskz_loadu_epi16(mmask, aoffset13 + i);
|
||||
r4 = _mm512_maskz_loadu_epi16(mmask, aoffset14 + i);
|
||||
r5 = _mm512_maskz_loadu_epi16(mmask, aoffset15 + i);
|
||||
r6 = _mm512_maskz_loadu_epi16(mmask, aoffset16 + i);
|
||||
r7 = _mm512_maskz_loadu_epi16(mmask, aoffset17 + i);
|
||||
REORDER_8x32(t10, t11, t12, t13, t14, t15, t16, t17);
|
||||
int n_store = remain_m/2;
|
||||
switch (n_store) {
|
||||
case 15: STORE_512(1, 6);
|
||||
case 14: STORE_512(1, 5);
|
||||
case 13: STORE_512(1, 4);
|
||||
case 12: STORE_512(1, 3);
|
||||
case 11: STORE_512(1, 2);
|
||||
case 10: STORE_512(1, 1);
|
||||
case 9: STORE_512(1, 0);
|
||||
case 8: STORE_512(0, 7);
|
||||
case 7: STORE_512(0, 6);
|
||||
case 6: STORE_512(0, 5);
|
||||
case 5: STORE_512(0, 4);
|
||||
case 4: STORE_512(0, 3);
|
||||
case 3: STORE_512(0, 2);
|
||||
case 2: STORE_512(0, 1);
|
||||
case 1: STORE_512(0, 0);
|
||||
}
|
||||
boffset0 += n_store * 32;
|
||||
if (m & 0x1) {
|
||||
__m512i tail;
|
||||
GET_TAIL();
|
||||
_mm256_storeu_si256((void *)boffset0, _mm512_cvtepi32_epi16(tail));
|
||||
boffset0 += 16;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if (j < n) {
|
||||
int remain_n = n - j;
|
||||
__mmask16 nmask = (1UL << remain_n) - 1;
|
||||
int load0, load1;
|
||||
if (remain_n > 8) {
|
||||
load0 = 8;
|
||||
load1 = remain_n - 8;
|
||||
} else {
|
||||
load0 = remain_n;
|
||||
load1 = 0;
|
||||
}
|
||||
aoffset00 = aoffset;
|
||||
aoffset01 = aoffset00 + lda;
|
||||
aoffset02 = aoffset01 + lda;
|
||||
aoffset03 = aoffset02 + lda;
|
||||
aoffset04 = aoffset03 + lda;
|
||||
aoffset05 = aoffset04 + lda;
|
||||
aoffset06 = aoffset05 + lda;
|
||||
aoffset07 = aoffset06 + lda;
|
||||
aoffset10 = aoffset07 + lda;
|
||||
aoffset11 = aoffset10 + lda;
|
||||
aoffset12 = aoffset11 + lda;
|
||||
aoffset13 = aoffset12 + lda;
|
||||
aoffset14 = aoffset13 + lda;
|
||||
aoffset15 = aoffset14 + lda;
|
||||
aoffset16 = aoffset15 + lda;
|
||||
aoffset17 = aoffset16 + lda;
|
||||
aoffset += 16 * lda;
|
||||
for (i = 0; i < m32; i += 32) {
|
||||
switch (load0) {
|
||||
case 8: r7 = _mm512_loadu_si512(aoffset07 + i);
|
||||
case 7: r6 = _mm512_loadu_si512(aoffset06 + i);
|
||||
case 6: r5 = _mm512_loadu_si512(aoffset05 + i);
|
||||
case 5: r4 = _mm512_loadu_si512(aoffset04 + i);
|
||||
case 4: r3 = _mm512_loadu_si512(aoffset03 + i);
|
||||
case 3: r2 = _mm512_loadu_si512(aoffset02 + i);
|
||||
case 2: r1 = _mm512_loadu_si512(aoffset01 + i);
|
||||
case 1: r0 = _mm512_loadu_si512(aoffset00 + i);
|
||||
}
|
||||
REORDER_8x32(t00, t01, t02, t03, t04, t05, t06, t07);
|
||||
switch (load1) {
|
||||
case 8: r7 = _mm512_loadu_si512(aoffset17 + i);
|
||||
case 7: r6 = _mm512_loadu_si512(aoffset16 + i);
|
||||
case 6: r5 = _mm512_loadu_si512(aoffset15 + i);
|
||||
case 5: r4 = _mm512_loadu_si512(aoffset14 + i);
|
||||
case 4: r3 = _mm512_loadu_si512(aoffset13 + i);
|
||||
case 3: r2 = _mm512_loadu_si512(aoffset12 + i);
|
||||
case 2: r1 = _mm512_loadu_si512(aoffset11 + i);
|
||||
case 1: r0 = _mm512_loadu_si512(aoffset10 + i);
|
||||
}
|
||||
REORDER_8x32(t10, t11, t12, t13, t14, t15, t16, t17);
|
||||
MASK_STORE_512(0, 0); MASK_STORE_512(0, 1); MASK_STORE_512(0, 2); MASK_STORE_512(0, 3);
|
||||
MASK_STORE_512(0, 4); MASK_STORE_512(0, 5); MASK_STORE_512(0, 6); MASK_STORE_512(0, 7);
|
||||
MASK_STORE_512(1, 0); MASK_STORE_512(1, 1); MASK_STORE_512(1, 2); MASK_STORE_512(1, 3);
|
||||
MASK_STORE_512(1, 4); MASK_STORE_512(1, 5); MASK_STORE_512(1, 6); MASK_STORE_512(1, 7);
|
||||
boffset0 += remain_n * 32;
|
||||
}
|
||||
if (i < m) {
|
||||
int remain_m = m - i;
|
||||
__mmask32 mmask = (1UL << remain_m) - 1;
|
||||
switch (load0) {
|
||||
case 8: r7 = _mm512_maskz_loadu_epi16(mmask, aoffset07 + i);
|
||||
case 7: r6 = _mm512_maskz_loadu_epi16(mmask, aoffset06 + i);
|
||||
case 6: r5 = _mm512_maskz_loadu_epi16(mmask, aoffset05 + i);
|
||||
case 5: r4 = _mm512_maskz_loadu_epi16(mmask, aoffset04 + i);
|
||||
case 4: r3 = _mm512_maskz_loadu_epi16(mmask, aoffset03 + i);
|
||||
case 3: r2 = _mm512_maskz_loadu_epi16(mmask, aoffset02 + i);
|
||||
case 2: r1 = _mm512_maskz_loadu_epi16(mmask, aoffset01 + i);
|
||||
case 1: r0 = _mm512_maskz_loadu_epi16(mmask, aoffset00 + i);
|
||||
}
|
||||
REORDER_8x32(t00, t01, t02, t03, t04, t05, t06, t07);
|
||||
switch (load1) {
|
||||
case 8: r7 = _mm512_maskz_loadu_epi16(mmask, aoffset17 + i);
|
||||
case 7: r6 = _mm512_maskz_loadu_epi16(mmask, aoffset16 + i);
|
||||
case 6: r5 = _mm512_maskz_loadu_epi16(mmask, aoffset15 + i);
|
||||
case 5: r4 = _mm512_maskz_loadu_epi16(mmask, aoffset14 + i);
|
||||
case 4: r3 = _mm512_maskz_loadu_epi16(mmask, aoffset13 + i);
|
||||
case 3: r2 = _mm512_maskz_loadu_epi16(mmask, aoffset12 + i);
|
||||
case 2: r1 = _mm512_maskz_loadu_epi16(mmask, aoffset11 + i);
|
||||
case 1: r0 = _mm512_maskz_loadu_epi16(mmask, aoffset10 + i);
|
||||
}
|
||||
REORDER_8x32(t10, t11, t12, t13, t14, t15, t16, t17);
|
||||
int n_store = remain_m/2;
|
||||
switch (n_store) {
|
||||
case 15: MASK_STORE_512(1, 6);
|
||||
case 14: MASK_STORE_512(1, 5);
|
||||
case 13: MASK_STORE_512(1, 4);
|
||||
case 12: MASK_STORE_512(1, 3);
|
||||
case 11: MASK_STORE_512(1, 2);
|
||||
case 10: MASK_STORE_512(1, 1);
|
||||
case 9: MASK_STORE_512(1, 0);
|
||||
case 8: MASK_STORE_512(0, 7);
|
||||
case 7: MASK_STORE_512(0, 6);
|
||||
case 6: MASK_STORE_512(0, 5);
|
||||
case 5: MASK_STORE_512(0, 4);
|
||||
case 4: MASK_STORE_512(0, 3);
|
||||
case 3: MASK_STORE_512(0, 2);
|
||||
case 2: MASK_STORE_512(0, 1);
|
||||
case 1: MASK_STORE_512(0, 0);
|
||||
}
|
||||
boffset0 += n_store * remain_n * 2;
|
||||
if (m & 0x1) {
|
||||
__m512i tail;
|
||||
GET_TAIL();
|
||||
_mm256_mask_storeu_epi16((void *)boffset0, nmask, _mm512_cvtepi32_epi16(tail));
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,208 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2021, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <immintrin.h>
|
||||
#include "common.h"
|
||||
|
||||
#define REORDER_4x32(r0, r1, r2, r3) {\
|
||||
__m512i t0, t1, t2, t3; \
|
||||
t0 = _mm512_unpacklo_epi32(r0, r1); \
|
||||
t1 = _mm512_unpackhi_epi32(r0, r1); \
|
||||
t2 = _mm512_unpacklo_epi32(r2, r3); \
|
||||
t3 = _mm512_unpackhi_epi32(r2, r3); \
|
||||
r0 = _mm512_unpacklo_epi64(t0, t2); \
|
||||
r1 = _mm512_unpackhi_epi64(t0, t2); \
|
||||
r2 = _mm512_unpacklo_epi64(t1, t3); \
|
||||
r3 = _mm512_unpackhi_epi64(t1, t3); \
|
||||
t0 = _mm512_permutex2var_epi32(r0, idx_lo_128, r1); \
|
||||
t1 = _mm512_permutex2var_epi32(r0, idx_hi_128, r1); \
|
||||
t2 = _mm512_permutex2var_epi32(r2, idx_lo_128, r3); \
|
||||
t3 = _mm512_permutex2var_epi32(r2, idx_hi_128, r3); \
|
||||
r0 = _mm512_permutex2var_epi32(t0, idx_lo_256, t2); \
|
||||
r1 = _mm512_permutex2var_epi32(t1, idx_lo_256, t3); \
|
||||
r2 = _mm512_permutex2var_epi32(t0, idx_hi_256, t2); \
|
||||
r3 = _mm512_permutex2var_epi32(t1, idx_hi_256, t3); \
|
||||
}
|
||||
|
||||
#define REORDER_4x8(r0, r1, r2, r3) {\
|
||||
__m128i t0, t1, t2, t3; \
|
||||
t0 = _mm_unpacklo_epi32(r0, r1); \
|
||||
t1 = _mm_unpackhi_epi32(r0, r1); \
|
||||
t2 = _mm_unpacklo_epi32(r2, r3); \
|
||||
t3 = _mm_unpackhi_epi32(r2, r3); \
|
||||
r0 = _mm_unpacklo_epi64(t0, t2); \
|
||||
r1 = _mm_unpackhi_epi64(t0, t2); \
|
||||
r2 = _mm_unpacklo_epi64(t1, t3); \
|
||||
r3 = _mm_unpackhi_epi64(t1, t3); \
|
||||
}
|
||||
|
||||
#define GET_TAIL(tail, remain_m) \
|
||||
switch((remain_m + 1)/2) { \
|
||||
case 1: tail = r0; break; \
|
||||
case 2: tail = r1; break; \
|
||||
case 3: tail = r2; break; \
|
||||
case 4: tail = r3; break; \
|
||||
}
|
||||
|
||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
|
||||
BLASLONG i, j;
|
||||
IFLOAT *aoffset;
|
||||
IFLOAT *aoffset0, *aoffset1, *aoffset2, *aoffset3;
|
||||
|
||||
IFLOAT *boffset;
|
||||
|
||||
aoffset = a;
|
||||
boffset = b;
|
||||
|
||||
BLASLONG m32 = m & ~31;
|
||||
BLASLONG m8 = m & ~7;
|
||||
BLASLONG n4 = n & ~3;
|
||||
|
||||
int permute_table[] = {
|
||||
0x0, 0x1, 0x2, 0x3, 0x10, 0x11, 0x12, 0x13, 0x8, 0x9, 0xa, 0xb, 0x18, 0x19, 0x1a, 0x1b,
|
||||
0x4, 0x5, 0x6, 0x7, 0x14, 0x15, 0x16, 0x17, 0xc, 0xd, 0xe, 0xf, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
||||
0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
};
|
||||
__m512i idx_lo_128 = _mm512_loadu_si512(permute_table);
|
||||
__m512i idx_hi_128 = _mm512_loadu_si512(permute_table + 16);
|
||||
__m512i idx_lo_256 = _mm512_loadu_si512(permute_table + 32);
|
||||
__m512i idx_hi_256 = _mm512_loadu_si512(permute_table + 48);
|
||||
|
||||
for (j = 0; j < n4; j += 4) {
|
||||
aoffset0 = aoffset;
|
||||
aoffset1 = aoffset0 + lda;
|
||||
aoffset2 = aoffset1 + lda;
|
||||
aoffset3 = aoffset2 + lda;
|
||||
aoffset += 4 * lda;
|
||||
|
||||
for (i = 0; i < m32; i += 32) {
|
||||
__m512i r0, r1, r2, r3;
|
||||
r0 = _mm512_loadu_si512(aoffset0 + i);
|
||||
r1 = _mm512_loadu_si512(aoffset1 + i);
|
||||
r2 = _mm512_loadu_si512(aoffset2 + i);
|
||||
r3 = _mm512_loadu_si512(aoffset3 + i);
|
||||
REORDER_4x32(r0, r1, r2, r3);
|
||||
_mm512_storeu_si512(boffset + 32*0, r0);
|
||||
_mm512_storeu_si512(boffset + 32*1, r1);
|
||||
_mm512_storeu_si512(boffset + 32*2, r2);
|
||||
_mm512_storeu_si512(boffset + 32*3, r3);
|
||||
boffset += 32 * 4;
|
||||
}
|
||||
for (; i < m8; i += 8) {
|
||||
__m128i r0 = _mm_loadu_si128((void *)(aoffset0 + i));
|
||||
__m128i r1 = _mm_loadu_si128((void *)(aoffset1 + i));
|
||||
__m128i r2 = _mm_loadu_si128((void *)(aoffset2 + i));
|
||||
__m128i r3 = _mm_loadu_si128((void *)(aoffset3 + i));
|
||||
REORDER_4x8(r0, r1, r2, r3);
|
||||
_mm_storeu_si128((void *)(boffset + 8*0), r0);
|
||||
_mm_storeu_si128((void *)(boffset + 8*1), r1);
|
||||
_mm_storeu_si128((void *)(boffset + 8*2), r2);
|
||||
_mm_storeu_si128((void *)(boffset + 8*3), r3);
|
||||
boffset += 8 * 4;
|
||||
}
|
||||
if (i < m) {
|
||||
int remain_m = m - i;
|
||||
__mmask8 r_mask = (1UL << remain_m) - 1;
|
||||
__m128i r0 = _mm_maskz_loadu_epi16(r_mask, aoffset0 + i);
|
||||
__m128i r1 = _mm_maskz_loadu_epi16(r_mask, aoffset1 + i);
|
||||
__m128i r2 = _mm_maskz_loadu_epi16(r_mask, aoffset2 + i);
|
||||
__m128i r3 = _mm_maskz_loadu_epi16(r_mask, aoffset3 + i);
|
||||
REORDER_4x8(r0, r1, r2, r3);
|
||||
|
||||
// store should skip the tail odd line
|
||||
int num_store = remain_m/2;
|
||||
switch(num_store) {
|
||||
case 3: _mm_storeu_si128((void *)(boffset + 8*2), r2);
|
||||
case 2: _mm_storeu_si128((void *)(boffset + 8*1), r1);
|
||||
case 1: _mm_storeu_si128((void *)(boffset + 8*0), r0);
|
||||
}
|
||||
boffset += 8 * num_store;
|
||||
|
||||
if (m & 0x1) { // handling the tail
|
||||
__m128i tail;
|
||||
GET_TAIL(tail, remain_m);
|
||||
/* tail vector is fill with zero like:
|
||||
* a, 0, b, 0, c, 0, d, 0
|
||||
* need to extract lo words of data and store
|
||||
*/
|
||||
tail = _mm_cvtepi32_epi16(tail);
|
||||
_mm_store_sd((double *)boffset, (__m128d) tail); // only lower 4 bfloat valid
|
||||
boffset += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (j < n) {
|
||||
int remain_n = n - j;
|
||||
__mmask8 nmask = (1UL << remain_n) - 1;
|
||||
aoffset0 = aoffset;
|
||||
aoffset1 = aoffset0 + lda;
|
||||
aoffset2 = aoffset1 + lda;
|
||||
aoffset3 = aoffset2 + lda;
|
||||
__m128i r0, r1, r2, r3;
|
||||
for (i = 0; i < m8; i += 8) {
|
||||
switch (remain_n) {
|
||||
case 3: r2 = _mm_loadu_si128((void *)(aoffset2 + i));
|
||||
case 2: r1 = _mm_loadu_si128((void *)(aoffset1 + i));
|
||||
case 1: r0 = _mm_loadu_si128((void *)(aoffset0 + i));
|
||||
}
|
||||
REORDER_4x8(r0, r1, r2, r3);
|
||||
_mm_mask_storeu_epi32(boffset + remain_n * 0, nmask, r0);
|
||||
_mm_mask_storeu_epi32(boffset + remain_n * 2, nmask, r1);
|
||||
_mm_mask_storeu_epi32(boffset + remain_n * 4, nmask, r2);
|
||||
_mm_mask_storeu_epi32(boffset + remain_n * 6, nmask, r3);
|
||||
boffset += 8 * remain_n;
|
||||
}
|
||||
if (i < m) {
|
||||
int remain_m = m - i;
|
||||
__mmask8 mmask = (1UL << remain_m) - 1;
|
||||
switch (remain_n) {
|
||||
case 3: r2 = _mm_maskz_loadu_epi16(mmask, aoffset2 + i);
|
||||
case 2: r1 = _mm_maskz_loadu_epi16(mmask, aoffset1 + i);
|
||||
case 1: r0 = _mm_maskz_loadu_epi16(mmask, aoffset0 + i);
|
||||
}
|
||||
REORDER_4x8(r0, r1, r2, r3);
|
||||
|
||||
int num_store = remain_m/2;
|
||||
switch (num_store) {
|
||||
case 3: _mm_mask_storeu_epi32(boffset + remain_n * 4, nmask, r2);
|
||||
case 2: _mm_mask_storeu_epi32(boffset + remain_n * 2, nmask, r1);
|
||||
case 1: _mm_mask_storeu_epi32(boffset + remain_n * 0, nmask, r0);
|
||||
}
|
||||
boffset += 2 * num_store * remain_n;
|
||||
|
||||
if (m & 0x1) {
|
||||
__m128i tail;
|
||||
GET_TAIL(tail, remain_m);
|
||||
tail = _mm_cvtepi32_epi16(tail);
|
||||
_mm_mask_storeu_epi16(boffset, nmask, tail);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -38,5 +38,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
int CNAME(int transa, int transb, BLASLONG M, BLASLONG N, BLASLONG K, FLOAT alpha, FLOAT beta)
|
||||
{
|
||||
return 1;
|
||||
double MNK = (double) M * (double) N * (double) K;
|
||||
if (MNK > 256.0*256.0*256.0) // disable for big size matrix
|
||||
return 0;
|
||||
/* small matrix kernel works well for N = 8, 16, 32 */
|
||||
if (N == 8 || N == 16 || N == 32)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2021, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <immintrin.h>
|
||||
#include "common.h"
|
||||
|
||||
|
||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
|
||||
BLASLONG i, j;
|
||||
|
||||
IFLOAT *boffset0, *boffset1;
|
||||
|
||||
boffset0 = b;
|
||||
|
||||
BLASLONG n32 = n & ~31;
|
||||
BLASLONG m4 = m & ~3;
|
||||
BLASLONG m2 = m & ~1;
|
||||
|
||||
uint32_t permute_table[] = {
|
||||
0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
|
||||
0x08, 0x09, 0x0a, 0x0b, 0x18, 0x19, 0x1a, 0x1b, 0x0c, 0x0d, 0x0e, 0x0f, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
};
|
||||
|
||||
__m512i idx_lo = _mm512_loadu_si512(permute_table);
|
||||
__m512i idx_hi = _mm512_loadu_si512(permute_table + 16);
|
||||
|
||||
for (j = 0; j < n32; j += 32) {
|
||||
/* process 2x16 n at the same time */
|
||||
boffset1 = boffset0 + m * 16;
|
||||
for (i = 0; i < m4; i += 4) {
|
||||
/* bf16 fma need special memory layout:
|
||||
* for memory layout like below:
|
||||
* a00, a01, a02, a03, a04, a05 ....
|
||||
* a10, a11, a12, a13, a14, a15 ....
|
||||
* need to copy as:
|
||||
* a00, a10, a01, a11, a02, a12, a03, a13, ...
|
||||
*/
|
||||
__m512i a0 = _mm512_loadu_si512(&a[(i + 0)*lda + j]);
|
||||
__m512i a1 = _mm512_loadu_si512(&a[(i + 1)*lda + j]);
|
||||
__m512i a2 = _mm512_loadu_si512(&a[(i + 2)*lda + j]);
|
||||
__m512i a3 = _mm512_loadu_si512(&a[(i + 3)*lda + j]);
|
||||
|
||||
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
|
||||
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
|
||||
__m512i a10 = _mm512_unpacklo_epi16(a2, a3);
|
||||
__m512i a11 = _mm512_unpackhi_epi16(a2, a3);
|
||||
|
||||
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
|
||||
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
|
||||
a2 = _mm512_permutex2var_epi32(a10, idx_lo, a11);
|
||||
a3 = _mm512_permutex2var_epi32(a10, idx_hi, a11);
|
||||
|
||||
_mm512_storeu_si512(boffset0, a0);
|
||||
_mm512_storeu_si512(boffset1, a1);
|
||||
_mm512_storeu_si512(boffset0 + 32, a2);
|
||||
_mm512_storeu_si512(boffset1 + 32, a3);
|
||||
boffset0 += 64;
|
||||
boffset1 += 64;
|
||||
}
|
||||
for (; i < m2; i += 2) {
|
||||
__m512i a0 = _mm512_loadu_si512(&a[(i + 0)*lda + j]);
|
||||
__m512i a1 = _mm512_loadu_si512(&a[(i + 1)*lda + j]);
|
||||
|
||||
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
|
||||
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
|
||||
|
||||
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
|
||||
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
|
||||
|
||||
_mm512_storeu_si512(boffset0, a0);
|
||||
_mm512_storeu_si512(boffset1, a1);
|
||||
boffset0 += 32;
|
||||
boffset1 += 32;
|
||||
}
|
||||
for (; i < m; i++) {
|
||||
/* just copy the only remains row */
|
||||
__m256i a0 = _mm256_loadu_si256((void *)&a[(i + 0)*lda + j]);
|
||||
__m256i a1 = _mm256_loadu_si256((void *)&a[(i + 0)*lda + j + 16]);
|
||||
_mm256_storeu_si256((void *)boffset0, a0);
|
||||
_mm256_storeu_si256((void *)boffset1, a1);
|
||||
boffset0 += 16;
|
||||
boffset1 += 16;
|
||||
}
|
||||
boffset0 = boffset1;
|
||||
}
|
||||
if (j < n) {
|
||||
uint32_t remains = n - j;
|
||||
__mmask32 r_mask = (1UL << remains) - 1;
|
||||
if (remains > 16) {
|
||||
boffset1 = boffset0 + m * 16;
|
||||
uint32_t tail1 = remains - 16;
|
||||
__mmask16 w_mask1 = (1UL << tail1) - 1;
|
||||
for (i = 0; i < m2; i += 2) {
|
||||
__m512i a0 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
|
||||
__m512i a1 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
|
||||
|
||||
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
|
||||
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
|
||||
|
||||
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
|
||||
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
|
||||
|
||||
_mm512_storeu_si512(boffset0, a0);
|
||||
_mm512_mask_storeu_epi32(boffset1, w_mask1, a1);
|
||||
|
||||
boffset0 += 32;
|
||||
boffset1 += 2 * tail1;
|
||||
}
|
||||
for (; i < m; i++) {
|
||||
__m256i a0 = _mm256_loadu_si256((void *)&a[(i + 0)*lda + j]);
|
||||
__m256i a1 = _mm256_maskz_loadu_epi16(w_mask1, (void *)&a[(i + 0)*lda + j + 16]);
|
||||
_mm256_storeu_si256((void *)boffset0, a0);
|
||||
_mm256_mask_storeu_epi16((void *)boffset1, w_mask1, a1);
|
||||
boffset0 += 16;
|
||||
boffset1 += tail1;
|
||||
}
|
||||
} else {
|
||||
__mmask16 w_mask = (1UL << remains ) - 1;
|
||||
for (i = 0; i < m2; i += 2) {
|
||||
__m512i a0 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
|
||||
__m512i a1 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
|
||||
|
||||
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
|
||||
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
|
||||
|
||||
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
|
||||
|
||||
_mm512_mask_storeu_epi32(boffset0, w_mask, a0);
|
||||
boffset0 += 2 * remains;
|
||||
}
|
||||
for (; i < m; i++) {
|
||||
__m256i a0 = _mm256_maskz_loadu_epi16(w_mask, &a[(i + 0)*lda + j]);
|
||||
_mm256_mask_storeu_epi16(boffset0, w_mask, a0);
|
||||
boffset0 += remains;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,216 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2021, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <immintrin.h>
|
||||
#include "common.h"
|
||||
|
||||
#define STORE_VEC(Bx, By, vec) \
|
||||
if (By == 0) asm("vmovdqu16 %0, (%1)": : "v"(vec), "r"(boffset##Bx)); \
|
||||
else asm("vmovdqu16 %0, (%1, %2, %c3)": : "v"(vec), "r"(boffset##Bx), "r"(blk_size), "n"(By * 2));
|
||||
|
||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
|
||||
BLASLONG i, j;
|
||||
|
||||
IFLOAT *boffset0, *boffset1;
|
||||
|
||||
boffset0 = b;
|
||||
|
||||
BLASLONG n24 = n - (n % 24);
|
||||
BLASLONG n8 = n & ~7;
|
||||
BLASLONG m8 = m & ~7;
|
||||
BLASLONG m4 = m & ~3;
|
||||
BLASLONG m2 = m & ~1;
|
||||
|
||||
int permute_table[] = {
|
||||
0x0, 0x1, 0x2, 0x3, 0x10, 0x11, 0x12, 0x13, 0x8, 0x9, 0xa, 0xb, 0x18, 0x19, 0x1a, 0x1b,
|
||||
0x4, 0x5, 0x6, 0x7, 0x14, 0x15, 0x16, 0x17, 0xc, 0xd, 0xe, 0xf, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
||||
0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
|
||||
};
|
||||
|
||||
j = 0;
|
||||
if (n > 23) {
|
||||
/* n = 24 is the max width in current blocking setting */
|
||||
__m512i idx_lo_128 = _mm512_loadu_si512(permute_table);
|
||||
__m512i idx_hi_128 = _mm512_loadu_si512(permute_table + 16);
|
||||
__m512i idx_lo_256 = _mm512_loadu_si512(permute_table + 32);
|
||||
__m512i idx_hi_256 = _mm512_loadu_si512(permute_table + 48);
|
||||
__mmask32 mask24 = (1UL << 24) - 1;
|
||||
BLASLONG blk_size = m * 4;
|
||||
BLASLONG stride = blk_size * 3;
|
||||
|
||||
for (; j < n24; j += 24) {
|
||||
boffset1 = boffset0 + stride;
|
||||
for (i = 0; i < m8; i += 8) {
|
||||
__m512i r0, r1, r2, r3, r4, r5, r6, r7;
|
||||
__m512i t0, t1, t2, t3, t4, t5, t6, t7;
|
||||
r0 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 0)*lda + j]);
|
||||
r1 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 1)*lda + j]);
|
||||
r2 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 2)*lda + j]);
|
||||
r3 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 3)*lda + j]);
|
||||
r4 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 4)*lda + j]);
|
||||
r5 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 5)*lda + j]);
|
||||
r6 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 6)*lda + j]);
|
||||
r7 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 7)*lda + j]);
|
||||
|
||||
t0 = _mm512_unpacklo_epi16(r0, r1);
|
||||
t1 = _mm512_unpackhi_epi16(r0, r1);
|
||||
t2 = _mm512_unpacklo_epi16(r2, r3);
|
||||
t3 = _mm512_unpackhi_epi16(r2, r3);
|
||||
t4 = _mm512_unpacklo_epi16(r4, r5);
|
||||
t5 = _mm512_unpackhi_epi16(r4, r5);
|
||||
t6 = _mm512_unpacklo_epi16(r6, r7);
|
||||
t7 = _mm512_unpackhi_epi16(r6, r7);
|
||||
|
||||
r0 = _mm512_permutex2var_epi32(t0, idx_lo_128, t2);
|
||||
r1 = _mm512_permutex2var_epi32(t1, idx_lo_128, t3);
|
||||
r2 = _mm512_permutex2var_epi32(t4, idx_lo_128, t6);
|
||||
r3 = _mm512_permutex2var_epi32(t5, idx_lo_128, t7);
|
||||
r4 = _mm512_permutex2var_epi32(t0, idx_hi_128, t2);
|
||||
r5 = _mm512_permutex2var_epi32(t1, idx_hi_128, t3);
|
||||
r6 = _mm512_permutex2var_epi32(t4, idx_hi_128, t6);
|
||||
r7 = _mm512_permutex2var_epi32(t5, idx_hi_128, t7);
|
||||
|
||||
t0 = _mm512_permutex2var_epi32(r0, idx_lo_256, r2);
|
||||
t1 = _mm512_permutex2var_epi32(r1, idx_lo_256, r3);
|
||||
t2 = _mm512_permutex2var_epi32(r4, idx_lo_256, r6);
|
||||
t3 = _mm512_permutex2var_epi32(r5, idx_lo_256, r7);
|
||||
t4 = _mm512_permutex2var_epi32(r0, idx_hi_256, r2);
|
||||
t5 = _mm512_permutex2var_epi32(r1, idx_hi_256, r3);
|
||||
|
||||
STORE_VEC(0, 0, t0); STORE_VEC(0, 1, t1); STORE_VEC(0, 2, t2);
|
||||
STORE_VEC(1, 0, t3); STORE_VEC(1, 1, t4); STORE_VEC(1, 2, t5);
|
||||
boffset0 += 32;
|
||||
boffset1 += 32;
|
||||
}
|
||||
for (; i < m2; i += 2) {
|
||||
__m512i r0, r1, t0, t1;
|
||||
r0 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 0)*lda + j]);
|
||||
r1 = _mm512_maskz_loadu_epi16(mask24, &a[(i + 1)*lda + j]);
|
||||
t0 = _mm512_unpacklo_epi16(r0, r1);
|
||||
t1 = _mm512_unpackhi_epi16(r0, r1);
|
||||
STORE_VEC(0, 0, _mm512_extracti32x4_epi32(t0, 0));
|
||||
STORE_VEC(0, 1, _mm512_extracti32x4_epi32(t1, 0));
|
||||
STORE_VEC(0, 2, _mm512_extracti32x4_epi32(t0, 1));
|
||||
STORE_VEC(1, 0, _mm512_extracti32x4_epi32(t1, 1));
|
||||
STORE_VEC(1, 1, _mm512_extracti32x4_epi32(t0, 2));
|
||||
STORE_VEC(1, 2, _mm512_extracti32x4_epi32(t1, 2));
|
||||
boffset0 += 8;
|
||||
boffset1 += 8;
|
||||
}
|
||||
for (; i < m; i++) {
|
||||
*(uint64_t *)(boffset0 + blk_size * 0) = *(uint64_t *)&a[i * lda + j + 0];
|
||||
*(uint64_t *)(boffset0 + blk_size * 1) = *(uint64_t *)&a[i * lda + j + 4];
|
||||
*(uint64_t *)(boffset0 + blk_size * 2) = *(uint64_t *)&a[i * lda + j + 8];
|
||||
*(uint64_t *)(boffset1 + blk_size * 0) = *(uint64_t *)&a[i * lda + j + 12];
|
||||
*(uint64_t *)(boffset1 + blk_size * 1) = *(uint64_t *)&a[i * lda + j + 16];
|
||||
*(uint64_t *)(boffset1 + blk_size * 2) = *(uint64_t *)&a[i * lda + j + 20];
|
||||
boffset0 += 4;
|
||||
boffset1 += 4;
|
||||
}
|
||||
boffset0 += stride * 2;
|
||||
}
|
||||
}
|
||||
|
||||
for (; j < n8; j += 8) {
|
||||
boffset1 = boffset0 + m * 4;
|
||||
for (i = 0; i < m4; i += 4) {
|
||||
__m128i a0 = _mm_loadu_si128((void *)&a[(i + 0)*lda + j]);
|
||||
__m128i a1 = _mm_loadu_si128((void *)&a[(i + 1)*lda + j]);
|
||||
__m128i a2 = _mm_loadu_si128((void *)&a[(i + 2)*lda + j]);
|
||||
__m128i a3 = _mm_loadu_si128((void *)&a[(i + 3)*lda + j]);
|
||||
__m128i a00 = _mm_unpacklo_epi16(a0, a1);
|
||||
__m128i a01 = _mm_unpackhi_epi16(a0, a1);
|
||||
__m128i a10 = _mm_unpacklo_epi16(a2, a3);
|
||||
__m128i a11 = _mm_unpackhi_epi16(a2, a3);
|
||||
_mm_storeu_si128((void *)(boffset0 + 0), a00);
|
||||
_mm_storeu_si128((void *)(boffset0 + 8), a10);
|
||||
_mm_storeu_si128((void *)(boffset1 + 0), a01);
|
||||
_mm_storeu_si128((void *)(boffset1 + 8), a11);
|
||||
boffset0 += 16;
|
||||
boffset1 += 16;
|
||||
}
|
||||
for (; i < m2; i+= 2) {
|
||||
__m128i a0 = _mm_loadu_si128((void *)&a[(i + 0)*lda + j]);
|
||||
__m128i a1 = _mm_loadu_si128((void *)&a[(i + 1)*lda + j]);
|
||||
__m128i a00 = _mm_unpacklo_epi16(a0, a1);
|
||||
__m128i a01 = _mm_unpackhi_epi16(a0, a1);
|
||||
_mm_storeu_si128((void *)(boffset0 + 0), a00);
|
||||
_mm_storeu_si128((void *)(boffset1 + 0), a01);
|
||||
boffset0 += 8;
|
||||
boffset1 += 8;
|
||||
}
|
||||
for (; i < m; i++) {
|
||||
__m128d a0 = _mm_loadu_pd((void *)&a[(i + 0)*lda + j]);
|
||||
_mm_store_sd((void *)boffset0, a0);
|
||||
_mm_store_sd((void *)boffset1, _mm_permute_pd(a0, 0x1));
|
||||
boffset0 += 4;
|
||||
boffset1 += 4;
|
||||
}
|
||||
boffset0 = boffset1;
|
||||
}
|
||||
if (j < n) {
|
||||
uint32_t remains = n - j;
|
||||
__mmask8 r_mask = (1UL << remains) - 1;
|
||||
if (remains > 4) {
|
||||
boffset1 = boffset0 + m * 4;
|
||||
uint32_t tail1 = remains - 4;
|
||||
__mmask8 w_mask1 = (1UL << tail1) - 1;
|
||||
for (i = 0; i < m2; i += 2) {
|
||||
__m128i a0 = _mm_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
|
||||
__m128i a1 = _mm_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
|
||||
__m128i a00 = _mm_unpacklo_epi16(a0, a1);
|
||||
__m128i a01 = _mm_unpackhi_epi16(a0, a1);
|
||||
_mm_storeu_si128((void *)boffset0, a00);
|
||||
_mm_mask_storeu_epi32((void *)boffset1, w_mask1, a01);
|
||||
boffset0 += 8;
|
||||
boffset1 += 2 * tail1;
|
||||
}
|
||||
for (; i < m; i++) {
|
||||
__m128i a0 = _mm_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
|
||||
_mm_store_sd((void *)boffset0, (__m128d) a0);
|
||||
_mm_mask_storeu_epi16((void *)boffset1, w_mask1, (__m128i) _mm_permute_pd((__m128d) a0, 0x1));
|
||||
boffset0 += 4;
|
||||
boffset1 += tail1;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < m2; i += 2) {
|
||||
__m128i a0 = _mm_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
|
||||
__m128i a1 = _mm_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
|
||||
__m128i a00 = _mm_unpacklo_epi16(a0, a1);
|
||||
_mm_mask_storeu_epi32((void *)boffset0, r_mask, a00);
|
||||
boffset0 += 2 * remains;
|
||||
}
|
||||
for (; i < m; i++) {
|
||||
__m128i a0 = _mm_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
|
||||
_mm_mask_storeu_epi16((void *)boffset0, r_mask, a0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -41,7 +41,7 @@
|
|||
#include <immintrin.h>
|
||||
|
||||
int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta,
|
||||
FLOAT *dummy2, BLASLONG dummy3, FLOAT *dummy4, BLASLONG dummy5,
|
||||
IFLOAT *dummy2, BLASLONG dummy3, IFLOAT *dummy4, BLASLONG dummy5,
|
||||
FLOAT *c, BLASLONG ldc){
|
||||
|
||||
BLASLONG i, j;
|
||||
|
|
11
param.h
11
param.h
|
@ -1771,6 +1771,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#endif
|
||||
#define USE_SGEMM_KERNEL_DIRECT 1
|
||||
|
||||
#undef SBGEMM_DEFAULT_UNROLL_N
|
||||
#undef SBGEMM_DEFAULT_UNROLL_M
|
||||
#undef SBGEMM_DEFAULT_P
|
||||
#undef SBGEMM_DEFAULT_R
|
||||
#undef SBGEMM_DEFAULT_Q
|
||||
#define SBGEMM_DEFAULT_UNROLL_N 4
|
||||
#define SBGEMM_DEFAULT_UNROLL_M 16
|
||||
#define SBGEMM_DEFAULT_P 384
|
||||
#define SBGEMM_DEFAULT_Q 768
|
||||
#define SBGEMM_DEFAULT_R sbgemm_r
|
||||
|
||||
#ifdef ARCH_X86
|
||||
|
||||
#define SGEMM_DEFAULT_UNROLL_M 4
|
||||
|
|
Loading…
Reference in New Issue