sbgemm: cooperlake: kernel works for NN

This commit is contained in:
Wangyang Guo 2021-08-16 19:39:24 +08:00
parent 2ec9f3a8aa
commit 9df0953cde
3 changed files with 496 additions and 151 deletions

View File

@ -31,8 +31,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define VMOVLDUP(addr, zmm) asm("vmovsldup (%1), %0": "=v"(zmm): "r"(addr))
#define VMOVHDUP(addr, zmm) asm("vmovshdup (%1), %0": "=v"(zmm): "r"(addr))
#define BROADCAST64(base, step, n, offset, zmm) \
if (n == 0) asm("vbroadcastsd %2(%1), %0": "=v"(zmm): "r"(base), "n"(offset*2)); \
else asm("vbroadcastsd %4(%1, %2, %3), %0": "=v"(zmm): "r"(base), "r"(step), "n"(n*2), "n"(offset*2))
if (n == 0) asm("vbroadcastsd %c2(%1), %0": "=v"(zmm): "r"(base), "n"(offset*2)); \
else asm("vbroadcastsd %c4(%1, %2, %c3), %0": "=v"(zmm): "r"(base), "r"(step), "n"(n*2), "n"(offset*2))
#define DECLARE_A_PAIR(A) \
__m512i A_lo_##A; __m512i A_hi_##A;
@ -41,8 +41,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
VMOVLDUP(ptr_a##A, A_lo_##A); \
VMOVHDUP(ptr_a##A, A_hi_##A);
#define MASK_LOAD_A_PAIR(A) { \
__m512 tmp = _mm512_maskz_loadu_ps(mmask, ptr_a##A); \
A_lo_##A = (__m512i) _mm512_moveldup_ps(tmp); \
A_hi_##A = (__m512i) _mm512_movehdup_ps(tmp); \
}
#define LOAD_A_PAIR_TAIL(A) { \
__m256i ymm = _mm256_loadu_si256(ptr_a##A); \
__m256i ymm = _mm256_loadu_si256((void *)ptr_a##A); \
__m512 zmm = (__m512) _mm512_cvtepu16_epi32(ymm); \
A_lo_##A = (__m512i) _mm512_moveldup_ps(zmm); \
A_hi_##A = (__m512i) _mm512_movehdup_ps(zmm); \
}
#define MASK_LOAD_A_PAIR_TAIL(A) { \
__m256i ymm = _mm256_maskz_loadu_epi16(mmask, ptr_a##A); \
__m512 zmm = (__m512) _mm512_cvtepu16_epi32(ymm); \
A_lo_##A = (__m512i) _mm512_moveldup_ps(zmm); \
A_hi_##A = (__m512i) _mm512_movehdup_ps(zmm); \
@ -53,13 +66,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define BROADCAST_B_PAIR(Bx, By) \
BROADCAST64(ptr_b##Bx, n_blksize, By, 0, B_lo); \
BROADCAST64(ptr_b##Bx, n_blksize, By, 2, B_hi);
BROADCAST64(ptr_b##Bx, n_blksize, By, 4, B_hi);
#define MASK_BROADCAST_B_PAIR(Bx, x) {\
__m128 xmm = _mm_maskz_loadu_ps(nmask, ptr_b##Bx); \
B_lo = (__m512i) _mm512_broadcastsd_pd((__m128d) xmm); \
B_hi = (__m512i) _mm512_broadcastsd_pd(_mm_permute_pd((__m128d) xmm, 0x1)); \
}
#define BROADCAST_B_PAIR_TAIL(Bx, By) {\
__m128i xmm = (__m128i) _mm_load_sd(ptr_b##Bx + n_blksize * By); \
__m128i xmm = (__m128i) _mm_load_sd((double *)(ptr_b##Bx + n_blksize * By)); \
xmm = _mm_cvtepu16_epi32(xmm); \
B_lo = _mm512_broadcastd_epi32(xmm); \
B_hi = _mm512_broadcastd_epi32((__m128i) _mm_permute_pd((__m128d) xmm, 0x1)); \
B_lo = _mm512_broadcast_i32x2(xmm); \
B_hi = _mm512_broadcast_i32x2((__m128i) _mm_permute_pd((__m128d) xmm, 0x1)); \
}
#define MASK_BROADCAST_B_PAIR_TAIL(Bx, By) {\
__m128i xmm = _mm_maskz_loadu_epi16(nmask, ptr_b##Bx + n_blksize * By); \
xmm = _mm_cvtepu16_epi32(xmm); \
B_lo = _mm512_broadcast_i32x2(xmm); \
B_hi = _mm512_broadcast_i32x2((__m128i) _mm_permute_pd((__m128d) xmm, 0x1)); \
}
#define DECLARE_RESULT_4X(A, Bx, By) \
@ -76,25 +102,103 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMA(A_lo_##A, B_hi, result_10_##A##Bx##By); \
FMA(A_hi_##A, B_hi, result_11_##A##Bx##By);
#define STORE_4X(A, Bx, By)
#define _STORE_C_2nx16(addr, val0, val1) \
asm("vfmadd213ps (%1), %2, %0": "+v"(val0) : "r"(addr), "v"(alpha_512)); \
asm("vfmadd213ps (%1, %3, 4), %2, %0": "+v"(val1) : "r"(addr), "v"(alpha_512), "r"(ldc)); \
asm("vmovups %0, (%1)": : "v"(val0), "r"(addr)); \
asm("vmovups %0, (%1, %2, 4)": : "v"(val1), "r"(addr), "r"(ldc))
#define _MASK_STORE_C_2nx16(addr, val0, val1) \
asm("vfmadd213ps (%1), %2, %0 %{%3%} ": "+v"(val0) : "r"(addr), "v"(alpha_512), "k"(mmask)); \
asm("vfmadd213ps (%1, %3, 4), %2, %0 %{%4%}": "+v"(val1) : "r"(addr), "v"(alpha_512), "r"(ldc), "k"(mmask)); \
asm("vmovups %0, (%1) %{%2%}": : "v"(val0), "r"(addr), "k"(mmask)); \
asm("vmovups %0, (%1, %2, 4) %{%3%}": : "v"(val1), "r"(addr), "r"(ldc), "k"(mmask))
#define _REORDER_C_2X(result_0, result_1) { \
__m512 tmp0, tmp1; \
tmp0 = _mm512_unpacklo_ps(result_0, result_1); \
tmp1 = _mm512_unpackhi_ps(result_0, result_1); \
result_0 = (__m512) _mm512_unpacklo_pd((__m512d) tmp0, (__m512d) tmp1); \
result_1 = (__m512) _mm512_unpackhi_pd((__m512d) tmp0, (__m512d) tmp1); \
}
#define _STORE_2X(ptr_c, result_0, result_1) {\
_REORDER_C_2X(result_0, result_1) \
_STORE_C_2nx16(ptr_c, result_0, result_1); \
ptr_c += ldc * 2; \
}
#define _MASK_STORE_2X(ptr_c, result_0, result_1) {\
_REORDER_C_2X(result_0, result_1) \
_MASK_STORE_C_2nx16(ptr_c, result_0, result_1); \
ptr_c += ldc * 2; \
}
#define STORE_4X(A, Bx, By) { \
_STORE_2X(ptr_c##A, result_00_##A##Bx##By, result_01_##A##Bx##By); \
_STORE_2X(ptr_c##A, result_10_##A##Bx##By, result_11_##A##Bx##By); \
}
#define MASK_STORE_4X(A, Bx, By) { \
_MASK_STORE_2X(ptr_c##A, result_00_##A##Bx##By, result_01_##A##Bx##By); \
_MASK_STORE_2X(ptr_c##A, result_10_##A##Bx##By, result_11_##A##Bx##By); \
}
#define _STORE_C_16(addr, val0) \
asm("vfmadd213ps (%1), %2, %0": "+v"(val0) : "r"(addr), "v"(alpha_512)); \
asm("vmovups %0, (%1)": : "v"(val0), "r"(addr));
#define _MASK_STORE_C_16(addr, val0) \
asm("vfmadd213ps (%1), %2, %0 %{%3%} ": "+v"(val0) : "r"(addr), "v"(alpha_512), "k"(mmask)); \
asm("vmovups %0, (%1) %{%2%}": : "v"(val0), "r"(addr), "k"(mmask));
#define N_STORE_4X(A, Bx, By) { \
_REORDER_C_2X(result_00_##A##Bx##By, result_01_##A##Bx##By); \
_REORDER_C_2X(result_10_##A##Bx##By, result_11_##A##Bx##By); \
switch(n_count) { \
case 3: _STORE_C_16(ptr_c + ldc * 2, result_10_##A##Bx##By); \
case 2: _STORE_C_16(ptr_c + ldc * 1, result_01_##A##Bx##By); \
case 1: _STORE_C_16(ptr_c + ldc * 0, result_00_##A##Bx##By); \
} \
ptr_c##A += ldc * n_count; \
}
#define N_MASK_STORE_4X(A, Bx, By) { \
_REORDER_C_2X(result_00_##A##Bx##By, result_01_##A##Bx##By); \
_REORDER_C_2X(result_10_##A##Bx##By, result_11_##A##Bx##By); \
switch(n_count) { \
case 3: _MASK_STORE_C_16(ptr_c + ldc * 2, result_10_##A##Bx##By); \
case 2: _MASK_STORE_C_16(ptr_c + ldc * 1, result_01_##A##Bx##By); \
case 1: _MASK_STORE_C_16(ptr_c + ldc * 0, result_00_##A##Bx##By); \
} \
ptr_c##A += ldc * n_count; \
}
int CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
{
IFLOAT *ptr_a = A, *ptr_b = B, *ptr_c = C;
IFLOAT *ptr_a = A, *ptr_b = B;
IFLOAT *ptr_b0, *ptr_b1;
IFLOAT *ptr_a0, *ptr_a1;
FLOAT *ptr_c = C;
FLOAT *ptr_c0, *ptr_c1;
BLASLONG n_count = n;
BLASLONG m_count, k_count;
BLASLONG n_blksize = 4 * k;
BLASLONG cn_offset = 0;
__m512 alpha_512 = _mm512_broadcastss_ps(_mm_load_ss(&alpha));
for (; n_count > 23; n_count -= 24) {
IFLOAT *ptr_b00 = ptr_b;
IFLOAT *ptr_b10 = ptr_b + n_blksize * 3;
ptr_a0 = ptr_a;
ptr_c = C + cn_offset * ldc;
m_count = m;
ptr_b0 = ptr_b;
ptr_b1 = ptr_b0 + n_blksize * 3;
for (; m_count > 15; m_count -= 16) {
DECLARE_A_PAIR(0); DECLARE_B_PAIR();
ptr_b0 = ptr_b00;
ptr_b1 = ptr_b10;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
DECLARE_RESULT_4X(0, 1, 0); DECLARE_RESULT_4X(0, 1, 1); DECLARE_RESULT_4X(0, 1, 2);
for (k_count = k; k_count > 1; k_count -=2) {
@ -105,8 +209,8 @@ int CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT *
BROADCAST_B_PAIR(1, 0); MATMUL_4X(0, 1, 0);
BROADCAST_B_PAIR(1, 1); MATMUL_4X(0, 1, 1);
BROADCAST_B_PAIR(1, 2); MATMUL_4X(0, 1, 2);
ptr_b0 += 24 * 2;
ptr_b1 += 24 * 2;
ptr_b0 += 4 * 2;
ptr_b1 += 4 * 2;
ptr_a0 += 16 * 2;
}
if (k_count > 0) {
@ -117,10 +221,249 @@ int CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT *
BROADCAST_B_PAIR_TAIL(1, 0); MATMUL_4X(0, 1, 0);
BROADCAST_B_PAIR_TAIL(1, 1); MATMUL_4X(0, 1, 1);
BROADCAST_B_PAIR_TAIL(1, 2); MATMUL_4X(0, 1, 2);
ptr_b0 += 24;
ptr_b1 += 24;
ptr_b0 += 4;
ptr_b1 += 4;
ptr_a0 += 16;
}
ptr_c0 = ptr_c;
STORE_4X(0, 0, 0); STORE_4X(0, 0, 1); STORE_4X(0, 0, 2);
STORE_4X(0, 1, 0); STORE_4X(0, 1, 1); STORE_4X(0, 1, 2);
ptr_c += 16;
}
if (m_count > 0) {
__mmask16 mmask = (1UL << m_count) - 1;
ptr_b0 = ptr_b00;
ptr_b1 = ptr_b10;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
DECLARE_RESULT_4X(0, 1, 0); DECLARE_RESULT_4X(0, 1, 1); DECLARE_RESULT_4X(0, 1, 2);
for (k_count = k; k_count > 1; k_count -=2) {
MASK_LOAD_A_PAIR(0);
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
BROADCAST_B_PAIR(1, 0); MATMUL_4X(0, 1, 0);
BROADCAST_B_PAIR(1, 1); MATMUL_4X(0, 1, 1);
BROADCAST_B_PAIR(1, 2); MATMUL_4X(0, 1, 2);
ptr_b0 += 4 * 2;
ptr_b1 += 4 * 2;
ptr_a0 += m_count * 2;
}
if (k_count > 0) {
MASK_LOAD_A_PAIR_TAIL(0);
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1);
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2);
BROADCAST_B_PAIR_TAIL(1, 0); MATMUL_4X(0, 1, 0);
BROADCAST_B_PAIR_TAIL(1, 1); MATMUL_4X(0, 1, 1);
BROADCAST_B_PAIR_TAIL(1, 2); MATMUL_4X(0, 1, 2);
ptr_b0 += 4;
ptr_b1 += 4;
ptr_a0 += m_count;
}
ptr_c0 = ptr_c;
MASK_STORE_4X(0, 0, 0); MASK_STORE_4X(0, 0, 1); MASK_STORE_4X(0, 0, 2);
MASK_STORE_4X(0, 1, 0); MASK_STORE_4X(0, 1, 1); MASK_STORE_4X(0, 1, 2);
ptr_c += m_count;
}
ptr_b += 24 * k;
cn_offset += 24;
}
for (; n_count > 11; n_count -= 12) {
IFLOAT *ptr_b00 = ptr_b;
ptr_a0 = ptr_a;
ptr_a1 = ptr_a + 16 * k;
ptr_c = C + cn_offset * ldc;
m_count = m;
for (; m_count > 31; m_count -= 32) {
ptr_b0 = ptr_b00;
DECLARE_A_PAIR(0); DECLARE_A_PAIR(1);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
DECLARE_RESULT_4X(1, 0, 0); DECLARE_RESULT_4X(1, 0, 1); DECLARE_RESULT_4X(1, 0, 2);
for (k_count = k; k_count > 1; k_count -=2) {
LOAD_A_PAIR(0); LOAD_A_PAIR(1);
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0); MATMUL_4X(1, 0, 0);
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1); MATMUL_4X(1, 0, 1);
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2); MATMUL_4X(1, 0, 2);
ptr_b0 += 4 * 2;
ptr_a0 += 16 * 2;
ptr_a1 += 16 * 2;
}
if (k_count > 0) {
LOAD_A_PAIR_TAIL(0); LOAD_A_PAIR_TAIL(1);
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0); MATMUL_4X(1, 0, 0);
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1); MATMUL_4X(1, 0, 1);
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2); MATMUL_4X(1, 0, 2);
ptr_b0 += 4;
ptr_a0 += 16;
ptr_a1 += 16;
}
ptr_c0 = ptr_c;
ptr_c1 = ptr_c + 16;
STORE_4X(0, 0, 0); STORE_4X(1, 0, 0);
STORE_4X(0, 0, 1); STORE_4X(1, 0, 1);
STORE_4X(0, 0, 2); STORE_4X(1, 0, 2);
ptr_c += 16 * 2;
}
if (m > 31) {
ptr_a0 = ptr_a1;
}
for (; m_count > 15; m_count -= 16) {
ptr_b0 = ptr_b00;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
for (k_count = k; k_count > 1; k_count -=2) {
LOAD_A_PAIR(0);
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
ptr_b0 += 4 * 2;
ptr_a0 += 16 * 2;
}
if (k_count > 0) {
LOAD_A_PAIR_TAIL(0);
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1);
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2);
ptr_b0 += 4;
ptr_a0 += 16;
}
ptr_c0 = ptr_c;
STORE_4X(0, 0, 0); STORE_4X(0, 0, 1); STORE_4X(0, 0, 2);
ptr_c += 16;
}
if (m_count > 0) {
__mmask16 mmask = (1UL << m_count) - 1;
ptr_b0 = ptr_b00;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0); DECLARE_RESULT_4X(0, 0, 1); DECLARE_RESULT_4X(0, 0, 2);
for (k_count = k; k_count > 1; k_count -=2) {
MASK_LOAD_A_PAIR(0);
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
BROADCAST_B_PAIR(0, 1); MATMUL_4X(0, 0, 1);
BROADCAST_B_PAIR(0, 2); MATMUL_4X(0, 0, 2);
ptr_b0 += 4 * 2;
ptr_a0 += m_count * 2;
}
if (k_count > 0) {
MASK_LOAD_A_PAIR_TAIL(0);
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
BROADCAST_B_PAIR_TAIL(0, 1); MATMUL_4X(0, 0, 1);
BROADCAST_B_PAIR_TAIL(0, 2); MATMUL_4X(0, 0, 2);
ptr_b0 += 4;
ptr_a0 += m_count;
}
ptr_c0 = ptr_c;
MASK_STORE_4X(0, 0, 0); MASK_STORE_4X(0, 0, 1); MASK_STORE_4X(0, 0, 2);
ptr_c += m_count;
}
ptr_b += 12 * k;
cn_offset += 12;
}
for (; n_count > 3; n_count -= 4) {
IFLOAT *ptr_b00 = ptr_b;
ptr_a0 = ptr_a;
ptr_c = C + cn_offset * ldc;
m_count = m;
for (; m_count > 15; m_count -= 16) {
ptr_b0 = ptr_b00;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0);
for (k_count = k; k_count > 1; k_count -=2) {
LOAD_A_PAIR(0);
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += 4 * 2;
ptr_a0 += 16 * 2;
}
if (k_count > 0) {
LOAD_A_PAIR_TAIL(0);
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += 4;
ptr_a0 += 16;
}
ptr_c0 = ptr_c;
STORE_4X(0, 0, 0);
ptr_c += 16;
}
if (m_count > 0) {
__mmask16 mmask = (1UL << m_count) - 1;
ptr_b0 = ptr_b00;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0);
for (k_count = k; k_count > 1; k_count -=2) {
MASK_LOAD_A_PAIR(0);
BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += 4 * 2;
ptr_a0 += m_count * 2;
}
if (k_count > 0) {
MASK_LOAD_A_PAIR_TAIL(0);
BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += 4;
ptr_a0 += m_count;
}
ptr_c0 = ptr_c;
MASK_STORE_4X(0, 0, 0);
ptr_c += m_count;
}
ptr_b += 4 * k;
cn_offset += 4;
}
if (n_count > 0) {
__mmask8 nmask = (1UL << n_count) - 1;
IFLOAT *ptr_b00 = ptr_b;
ptr_a0 = ptr_a;
ptr_c = C + cn_offset * ldc;
m_count = m;
for (; m_count > 15; m_count -= 16) {
ptr_b0 = ptr_b00;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0);
for (k_count = k; k_count > 1; k_count -=2) {
LOAD_A_PAIR(0);
MASK_BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += n_count * 2;
ptr_a0 += 16 * 2;
}
if (k_count > 0) {
LOAD_A_PAIR_TAIL(0);
MASK_BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += n_count;
ptr_a0 += 16;
}
ptr_c0 = ptr_c;
N_STORE_4X(0, 0, 0);
ptr_c += 16;
}
if (m_count > 0) {
__mmask16 mmask = (1UL << m_count) - 1;
ptr_b0 = ptr_b00;
DECLARE_A_PAIR(0);
DECLARE_B_PAIR();
DECLARE_RESULT_4X(0, 0, 0);
for (k_count = k; k_count > 1; k_count -=2) {
MASK_LOAD_A_PAIR(0);
MASK_BROADCAST_B_PAIR(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += n_count * 2;
ptr_a0 += m_count * 2;
}
if (k_count > 0) {
MASK_LOAD_A_PAIR_TAIL(0);
MASK_BROADCAST_B_PAIR_TAIL(0, 0); MATMUL_4X(0, 0, 0);
ptr_b0 += n_count;
ptr_a0 += m_count;
}
ptr_c0 = ptr_c;
N_MASK_STORE_4X(0, 0, 0);
ptr_c += m_count;
}
}
return 0;
}

View File

@ -79,8 +79,8 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
aoffset = a;
boffset = b;
BLASLONG m32 = n & ~31;
BLASLONG m8 = n & ~7;
BLASLONG m32 = m & ~31;
BLASLONG m8 = m & ~7;
BLASLONG n4 = n & ~3;
int permute_table[] = {
@ -115,15 +115,15 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
boffset += 32 * 4;
}
for (; i < m8; i += 8) {
__m128i r0 = _mm_loadu_si128(aoffset0 + i);
__m128i r1 = _mm_loadu_si128(aoffset1 + i);
__m128i r2 = _mm_loadu_si128(aoffset2 + i);
__m128i r3 = _mm_loadu_si128(aoffset3 + i);
__m128i r0 = _mm_loadu_si128((void *)(aoffset0 + i));
__m128i r1 = _mm_loadu_si128((void *)(aoffset1 + i));
__m128i r2 = _mm_loadu_si128((void *)(aoffset2 + i));
__m128i r3 = _mm_loadu_si128((void *)(aoffset3 + i));
REORDER_4x8(r0, r1, r2, r3);
_mm_storeu_si128(boffset + 8*0, r0);
_mm_storeu_si128(boffset + 8*1, r1);
_mm_storeu_si128(boffset + 8*2, r2);
_mm_storeu_si128(boffset + 8*3, r3);
_mm_storeu_si128((void *)(boffset + 8*0), r0);
_mm_storeu_si128((void *)(boffset + 8*1), r1);
_mm_storeu_si128((void *)(boffset + 8*2), r2);
_mm_storeu_si128((void *)(boffset + 8*3), r3);
boffset += 8 * 4;
}
if (i < m) {
@ -138,9 +138,9 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
// store should skip the tail odd line
int num_store = remain_m/2;
switch(num_store) {
case 3: _mm_storeu_si128(boffset + 8*2, r0);
case 2: _mm_storeu_si128(boffset + 8*1, r0);
case 1: _mm_storeu_si128(boffset + 8*0, r0);
case 3: _mm_storeu_si128((void *)(boffset + 8*2), r2);
case 2: _mm_storeu_si128((void *)(boffset + 8*1), r1);
case 1: _mm_storeu_si128((void *)(boffset + 8*0), r0);
}
boffset += 8 * num_store;
@ -152,7 +152,7 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
* need to extract lo words of data and store
*/
tail = _mm_cvtepi32_epi16(tail);
_mm_store_sd(boffset, (__m128d) tail); // only lower 4 bfloat valid
_mm_store_sd((double *)boffset, (__m128d) tail); // only lower 4 bfloat valid
boffset += 4;
}
}
@ -167,16 +167,16 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
__m128i r0, r1, r2, r3;
for (i = 0; i < m8; i += 8) {
switch (remain_n) {
case 3: r2 = _mm_loadu_si128(aoffset2 + i);
case 2: r1 = _mm_loadu_si128(aoffset1 + i);
case 1: r0 = _mm_loadu_si128(aoffset0 + i);
case 3: r2 = _mm_loadu_si128((void *)(aoffset2 + i));
case 2: r1 = _mm_loadu_si128((void *)(aoffset1 + i));
case 1: r0 = _mm_loadu_si128((void *)(aoffset0 + i));
}
REORDER_4x8(r0, r1, r2, r3);
_mm_mask_storeu_epi16(boffset + remain_n * 0, nmask, r0);
_mm_mask_storeu_epi16(boffset + remain_n * 1, nmask, r1);
_mm_mask_storeu_epi16(boffset + remain_n * 2, nmask, r2);
_mm_mask_storeu_epi16(boffset + remain_n * 3, nmask, r3);
boffset += 4 * remain_n;
_mm_mask_storeu_epi32(boffset + remain_n * 0, nmask, r0);
_mm_mask_storeu_epi32(boffset + remain_n * 2, nmask, r1);
_mm_mask_storeu_epi32(boffset + remain_n * 4, nmask, r2);
_mm_mask_storeu_epi32(boffset + remain_n * 6, nmask, r3);
boffset += 8 * remain_n;
}
if (i < m) {
int remain_m = m - i;
@ -190,9 +190,9 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
int num_store = remain_m/2;
switch (num_store) {
case 3: _mm_mask_storeu_epi16(boffset + remain_n * 2, nmask, r2);
case 2: _mm_mask_storeu_epi16(boffset + remain_n * 1, nmask, r1);
case 1: _mm_mask_storeu_epi16(boffset + remain_n * 0, nmask, r0);
case 3: _mm_mask_storeu_epi32(boffset + remain_n * 4, nmask, r2);
case 2: _mm_mask_storeu_epi32(boffset + remain_n * 2, nmask, r1);
case 1: _mm_mask_storeu_epi32(boffset + remain_n * 0, nmask, r0);
}
boffset += 2 * num_store * remain_n;
@ -204,4 +204,5 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
}
}
}
return 0;
}

View File

@ -29,134 +29,135 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <immintrin.h>
#include "common.h"
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){
BLASLONG i, j;
BLASLONG i, j;
IFLOAT *boffset0, *boffset1;
IFLOAT *boffset0, *boffset1;
boffset0 = b;
boffset0 = b;
BLASLONG n32 = n & ~31;
BLASLONG m4 = m & ~3;
BLASLONG m2 = m & ~1;
BLASLONG n32 = n & ~31;
BLASLONG m4 = m & ~3;
BLASLONG m2 = m & ~1;
uint32_t permute_table = {
0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13, 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17,
0x08, 0x18, 0x09, 0x19, 0x0a, 0x1a, 0x0b, 0x1b, 0x0c, 0x1c, 0x0d, 0x1d, 0x0e, 0x1e, 0x0f, 0x1f,
};
uint32_t permute_table[] = {
0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17,
0x08, 0x09, 0x0a, 0x0b, 0x18, 0x19, 0x1a, 0x1b, 0x0c, 0x0d, 0x0e, 0x0f, 0x1c, 0x1d, 0x1e, 0x1f,
};
__m512i idx_lo = _mm512_loadu_si512(permute_table);
__m512i idx_hi = _mm512_loadu_si512(permute_table + 16);
__m512i idx_lo = _mm512_loadu_si512(permute_table);
__m512i idx_hi = _mm512_loadu_si512(permute_table + 16);
for (j = 0; j < n32; j += 32) {
/* process 2x16 n at the same time */
boffset1 = boffset0 + m * 16;
for (i = 0; i < m4; i += 4) {
/* bf16 fma need special memory layout:
* for memory layout like below:
* a00, a01, a02, a03, a04, a05 ....
* a10, a11, a12, a13, a14, a15 ....
* need to copy as:
* a00, a10, a01, a11, a02, a12, a03, a13, ...
*/
__m512i a0 = _mm512_loadu_si512(&a[(i + 0)*lda + j]);
__m512i a1 = _mm512_loadu_si512(&a[(i + 1)*lda + j]);
__m512i a2 = _mm512_loadu_si512(&a[(i + 2)*lda + j]);
__m512i a3 = _mm512_loadu_si512(&a[(i + 3)*lda + j]);
for (j = 0; j < n32; j += 32) {
/* process 2x16 n at the same time */
boffset1 = boffset0 + m * 16;
for (i = 0; i < m4; i += 4) {
/* bf16 fma need special memory layout:
* for memory layout like below:
* a00, a01, a02, a03, a04, a05 ....
* a10, a11, a12, a13, a14, a15 ....
* need to copy as:
* a00, a10, a01, a11, a02, a12, a03, a13, ...
*/
__m512i a0 = _mm512_loadu_si512(&a[(i + 0)*lda + j]);
__m512i a1 = _mm512_loadu_si512(&a[(i + 1)*lda + j]);
__m512i a2 = _mm512_loadu_si512(&a[(i + 2)*lda + j]);
__m512i a3 = _mm512_loadu_si512(&a[(i + 3)*lda + j]);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
__m512i a10 = _mm512_unpacklo_epi16(a2, a3);
__m512i a11 = _mm512_unpackhi_epi16(a2, a3);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
__m512i a10 = _mm512_unpacklo_epi16(a2, a3);
__m512i a11 = _mm512_unpackhi_epi16(a2, a3);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
a2 = _mm512_permutex2var_epi32(a10, idx_lo, a11);
a3 = _mm512_permutex2var_epi32(a10, idx_hi, a11);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
a2 = _mm512_permutex2var_epi32(a10, idx_lo, a11);
a3 = _mm512_permutex2var_epi32(a10, idx_hi, a11);
_mm512_storeu_si512(boffset0, a0);
_mm512_storeu_si512(boffset1, a1);
_mm512_storeu_si512(boffset0 + 32, a2);
_mm512_storeu_si512(boffset1 + 32, a3);
boffset0 += 64;
boffset1 += 64;
}
for (; i < m2; i += 2) {
__m512i a0 = _mm512_loadu_si512(&a[(i + 0)*lda + j]);
__m512i a1 = _mm512_loadu_si512(&a[(i + 1)*lda + j]);
_mm512_storeu_si512(boffset0, a0);
_mm512_storeu_si512(boffset1, a1);
_mm512_storeu_si512(boffset0 + 32, a2);
_mm512_storeu_si512(boffset1 + 32, a3);
boffset0 += 64;
boffset1 += 64;
}
for (; i < m2; i += 2) {
__m512i a0 = _mm512_loadu_si512(&a[(i + 0)*lda + j]);
__m512i a1 = _mm512_loadu_si512(&a[(i + 1)*lda + j]);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
_mm512_storeu_si512(boffset0, a0);
_mm512_storeu_si512(boffset1, a1);
boffset0 += 32;
boffset1 += 32;
}
for (; i < m; i++) {
/* just copy the only remains row */
__m256i a0 = _mm256_loadu_si256(&a[(i + 0)*lda + j]);
__m256i a1 = _mm256_loadu_si256(&a[(i + 0)*lda + j + 16]);
_mm256_storeu_si256(boffset0, a0);
_mm256_storeu_si256(boffset1, a1);
boffset0 += 16;
boffset1 += 16;
}
boffset0 = boffset1;
}
if (j < n) {
uint32_t remains = n - j;
__mmask32 r_mask = (1UL << remains) - 1;
if (remains > 16) {
boffset1 = boffset0 + m * 16;
uint32_t tail1 = remains - 16;
__mmask16 w_mask1 = (1UL << tail1) - 1;
for (i = 0; i < m2; i += 2) {
__m512i a0 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
__m512i a1 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
_mm512_storeu_si512(boffset0, a0);
_mm512_storeu_si512(boffset1, a1);
boffset0 += 32;
boffset1 += 32;
}
for (; i < m; i++) {
/* just copy the only remains row */
__m256i a0 = _mm256_loadu_si256((void *)&a[(i + 0)*lda + j]);
__m256i a1 = _mm256_loadu_si256((void *)&a[(i + 0)*lda + j + 16]);
_mm256_storeu_si256((void *)boffset0, a0);
_mm256_storeu_si256((void *)boffset1, a1);
boffset0 += 16;
boffset1 += 16;
}
boffset0 = boffset1;
}
if (j < n) {
uint32_t remains = n - j;
__mmask32 r_mask = (1UL << remains) - 1;
if (remains > 16) {
boffset1 = boffset0 + m * 16;
uint32_t tail1 = remains - 16;
__mmask16 w_mask1 = (1UL << tail1) - 1;
for (i = 0; i < m2; i += 2) {
__m512i a0 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
__m512i a1 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
a1 = _mm512_permutex2var_epi32(a00, idx_hi, a01);
_mm512_storeu_si512(boffset0, a0);
_mm512_mask_storeu_epi32(boffset1, w_mask1, a1);
_mm512_storeu_si512(boffset0, a0);
_mm512_mask_storeu_epi32(boffset1, w_mask1, a1);
boffset0 += 32;
boffset1 += 2 * tail1;
}
for (; i < m; i++) {
__m256i a0 = _mm256_loadu_si256(&a[(i + 0)*lda + j]);
__m256i a1 = _mm256_maskz_loadu_epi16(w_mask1, &a[(i + 0)*lda + j + 16]);
_mm256_storeu_si256(boffset0, a0);
_mm256_mask_storeu_epi16(boffset1, w_mask1, a1);
boffset0 += 16;
boffset1 += tail1;
}
} else {
__mmask16 w_mask = (1UL << remains ) - 1;
for (i = 0; i < m2; i += 2) {
__m512i a0 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
__m512i a1 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
boffset0 += 32;
boffset1 += 2 * tail1;
}
for (; i < m; i++) {
__m256i a0 = _mm256_loadu_si256((void *)&a[(i + 0)*lda + j]);
__m256i a1 = _mm256_maskz_loadu_epi16(w_mask1, (void *)&a[(i + 0)*lda + j + 16]);
_mm256_storeu_si256((void *)boffset0, a0);
_mm256_mask_storeu_epi16((void *)boffset1, w_mask1, a1);
boffset0 += 16;
boffset1 += tail1;
}
} else {
__mmask16 w_mask = (1UL << remains ) - 1;
for (i = 0; i < m2; i += 2) {
__m512i a0 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 0)*lda + j]);
__m512i a1 = _mm512_maskz_loadu_epi16(r_mask, &a[(i + 1)*lda + j]);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
__m512i a00 = _mm512_unpacklo_epi16(a0, a1);
__m512i a01 = _mm512_unpackhi_epi16(a0, a1);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
a0 = _mm512_permutex2var_epi32(a00, idx_lo, a01);
_mm512_mask_storeu_epi32(boffset0, w_mask, a0);
boffset0 += 2 * remains;
}
for (; i < m; i++) {
__m256i a0 = _mm256_maskz_loadu_epi16(w_mask, &a[(i + 0)*lda + j]);
_mm256_mask_storeu_epi16(boffset0, w_mask, a0);
boffset0 += remains;
}
}
}
_mm512_mask_storeu_epi32(boffset0, w_mask, a0);
boffset0 += 2 * remains;
}
for (; i < m; i++) {
__m256i a0 = _mm256_maskz_loadu_epi16(w_mask, &a[(i + 0)*lda + j]);
_mm256_mask_storeu_epi16(boffset0, w_mask, a0);
boffset0 += remains;
}
}
}
}