Small Matrix: skylakex: sgemm nn: clean up unused code

This commit is contained in:
Wangyang Guo 2021-05-11 10:33:07 +00:00
parent 49b61a3f30
commit 3d8c6d9607
1 changed files with 0 additions and 222 deletions

View File

@ -63,48 +63,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define STORE_REDUCE(M, N) C[(j+N)*ldc + i + M] = alpha * _mm512_reduce_add_ps(result##M##N) + beta * C[(j+N)*ldc + i + M]; #define STORE_REDUCE(M, N) C[(j+N)*ldc + i + M] = alpha * _mm512_reduce_add_ps(result##M##N) + beta * C[(j+N)*ldc + i + M];
#endif #endif
#define DECLARE_RESULT_256(M, N) __m256 result##M##N = _mm256_setzero_ps()
#define LOAD_A_256(M, N) __m256 Aval##M = _mm256_loadu_ps(&A[lda * k + i + (M*8)])
#define BROADCAST_LOAD_B_256(M, N) __m256 Bval##N = _mm256_broadcastss_ps(_mm_load_ss(&B[k + ldb * (j+N)]))
#define MATMUL_256(M, N) result##M##N = _mm256_fmadd_ps(Aval##M, Bval##N, result##M##N)
#if defined(B0)
#define STORE_256(M, N) result##M##N = _mm256_mul_ps(result##M##N, alpha_256); \
_mm256_storeu_ps(&C[(j+N)*ldc + i + (M*8)], result##M##N)
#else
#define STORE_256(M, N) \
BLASLONG offset##M##N = (j+N)*ldc + i + (M*8); \
result##M##N = _mm256_mul_ps(result##M##N, alpha_256); \
asm("vfmadd231ps (%1, %2, 4), %3, %0": "+v"(result##M##N):"r"(&C), "r"(offset##M##N), "v"(beta_256)); \
_mm256_storeu_ps(&C[offset##M##N], result##M##N)
#endif
#define DECLARE_RESULT_128(M, N) __m128 result##M##N; asm("vpxorq %0, %0, %0": "+v"(result##M##N):)
#define LOAD_A_128(M, N) __m128 Aval##M = _mm_maskz_loadu_ps(mask, &A[lda * k + i + (M*4)])
#define BROADCAST_LOAD_B_128(M, N) __m128 Bval##N = _mm_broadcastss_ps(_mm_load_ss(&B[k + ldb * (j+N)]))
#define MATMUL_128(M, N) result##M##N = _mm_fmadd_ps(Aval##M, Bval##N, result##M##N)
#if defined(B0)
#define STORE_128(M, N) result##M##N = _mm_maskz_mul_ps(mask, result##M##N, alpha_128); \
_mm_mask_storeu_ps(&C[(j+N)*ldc + i + (M*4)], mask, result##M##N)
#else
#define STORE_128(M, N) \
BLASLONG offset##M##N = (j+N)*ldc + i + (M*4); \
result##M##N = _mm_maskz_mul_ps(mask, result##M##N, alpha_128); \
asm("vfmadd231ps (%1, %2, 4), %3, %0": "+v"(result##M##N):"r"(&C), "r"(offset##M##N), "v"(beta_128)); \
_mm_mask_storeu_ps(&C[offset##M##N], mask, result##M##N)
#endif
#define DECLARE_RESULT_S(M, N) float result##M##N = 0;
#define LOAD_A_S(M, N) float Aval##M = A[lda * k + i + M]
#define BROADCAST_LOAD_B_S(M, N) float Bval##N = B[k + ldb * (j+N)]
#define MATMUL_S(M, N) result##M##N += Aval##M * Bval##N
#if defined(B0)
#define STORE_S(M, N) C[(j+N)*ldc + i + M] = result##M##N * alpha
#else
#define STORE_S(M, N) C[(j+N)*ldc + i + M] = result##M##N * alpha + C[(j+N)*ldc + i + M] * beta
#endif
#if defined(B0) #if defined(B0)
int CNAME(BLASLONG M, BLASLONG N, BLASLONG K, FLOAT * A, BLASLONG lda, FLOAT alpha, FLOAT * B, BLASLONG ldb, FLOAT * C, BLASLONG ldc) int CNAME(BLASLONG M, BLASLONG N, BLASLONG K, FLOAT * A, BLASLONG lda, FLOAT alpha, FLOAT * B, BLASLONG ldb, FLOAT * C, BLASLONG ldc)
#else #else
@ -594,184 +552,4 @@ int CNAME(BLASLONG M, BLASLONG N, BLASLONG K, FLOAT * A, BLASLONG lda, FLOAT alp
free(mbuf); free(mbuf);
return; return;
} }
__m256 alpha_256 = _mm256_broadcastss_ps(_mm_load_ss(&alpha));
#if !defined(B0)
__m256 beta_256 = _mm256_broadcastss_ps(_mm_load_ss(&beta));
#endif
for (; i < m8; i += 8) {
for (j = 0; j < n4; j += 4) {
DECLARE_RESULT_256(0, 0);
DECLARE_RESULT_256(0, 1);
DECLARE_RESULT_256(0, 2);
DECLARE_RESULT_256(0, 3);
for (k = 0; k < K; k++) {
LOAD_A_256(0, x);
BROADCAST_LOAD_B_256(x, 0); BROADCAST_LOAD_B_256(x, 1);
BROADCAST_LOAD_B_256(x, 2); BROADCAST_LOAD_B_256(x, 3);
MATMUL_256(0, 0);
MATMUL_256(0, 1);
MATMUL_256(0, 2);
MATMUL_256(0, 3);
}
STORE_256(0, 0);
STORE_256(0, 1);
STORE_256(0, 2);
STORE_256(0, 3);
}
for (; j < n2; j += 2) {
DECLARE_RESULT_256(0, 0);
DECLARE_RESULT_256(0, 1);
for (k = 0; k < K; k++) {
LOAD_A_256(0, x);
BROADCAST_LOAD_B_256(x, 0); BROADCAST_LOAD_B_256(x, 1);
MATMUL_256(0, 0);
MATMUL_256(0, 1);
}
STORE_256(0, 0);
STORE_256(0, 1);
}
for (; j < N; j++) {
DECLARE_RESULT_256(0, 0);
for (k = 0; k < K; k++) {
LOAD_A_256(0, x);
BROADCAST_LOAD_B_256(x, 0);
MATMUL_256(0, 0);
}
STORE_256(0, 0);
}
}
__m128 alpha_128 = _mm_broadcastss_ps(_mm_load_ss(&alpha));
#if !defined(B0)
__m128 beta_128 = _mm_broadcastss_ps(_mm_load_ss(&beta));
#endif
for (; i < m4; i += 4) {
for (j = 0; j < n4; j += 4) {
DECLARE_RESULT_128(0, 0);
DECLARE_RESULT_128(0, 1);
DECLARE_RESULT_128(0, 2);
DECLARE_RESULT_128(0, 3);
for (k = 0; k < K; k++) {
LOAD_A_128(0, x);
BROADCAST_LOAD_B_128(x, 0); BROADCAST_LOAD_B_128(x, 1);
BROADCAST_LOAD_B_128(x, 2); BROADCAST_LOAD_B_128(x, 3);
MATMUL_128(0, 0);
MATMUL_128(0, 1);
MATMUL_128(0, 2);
MATMUL_128(0, 3);
}
STORE_128(0, 0);
STORE_128(0, 1);
STORE_128(0, 2);
STORE_128(0, 3);
}
for (; j < n2; j += 2) {
DECLARE_RESULT_128(0, 0);
DECLARE_RESULT_128(0, 1);
for (k = 0; k < K; k++) {
LOAD_A_128(0, x);
BROADCAST_LOAD_B_128(x, 0); BROADCAST_LOAD_B_128(x, 1);
MATMUL_128(0, 0);
MATMUL_128(0, 1);
}
STORE_128(0, 0);
STORE_128(0, 1);
}
for (; j < N; j++) {
DECLARE_RESULT_128(0, 0);
for (k = 0; k < K; k++) {
LOAD_A_128(0, x);
BROADCAST_LOAD_B_128(x, 0);
MATMUL_128(0, 0);
}
STORE_128(0, 0);
}
}
for (; i < m2; i += 2) {
for (j = 0; j < n4; j += 4) {
DECLARE_RESULT_S(0, 0); DECLARE_RESULT_S(1, 0);
DECLARE_RESULT_S(0, 1); DECLARE_RESULT_S(1, 1);
DECLARE_RESULT_S(0, 2); DECLARE_RESULT_S(1, 2);
DECLARE_RESULT_S(0, 3); DECLARE_RESULT_S(1, 3);
for (k = 0; k < K; k++) {
LOAD_A_S(0, x); LOAD_A_S(1, x);
BROADCAST_LOAD_B_S(x, 0); BROADCAST_LOAD_B_S(x, 1);
BROADCAST_LOAD_B_S(x, 2); BROADCAST_LOAD_B_S(x, 3);
MATMUL_S(0, 0); MATMUL_S(1, 0);
MATMUL_S(0, 1); MATMUL_S(1, 1);
MATMUL_S(0, 2); MATMUL_S(1, 2);
MATMUL_S(0, 3); MATMUL_S(1, 3);
}
STORE_S(0, 0); STORE_S(1, 0);
STORE_S(0, 1); STORE_S(1, 1);
STORE_S(0, 2); STORE_S(1, 2);
STORE_S(0, 3); STORE_S(1, 3);
}
for (; j < n2; j += 2) {
DECLARE_RESULT_S(0, 0); DECLARE_RESULT_S(1, 0);
DECLARE_RESULT_S(0, 1); DECLARE_RESULT_S(1, 1);
for (k = 0; k < K; k++) {
LOAD_A_S(0, x); LOAD_A_S(1, x);
BROADCAST_LOAD_B_S(x, 0); BROADCAST_LOAD_B_S(x, 1);
MATMUL_S(0, 0); MATMUL_S(1, 0);
MATMUL_S(0, 1); MATMUL_S(1, 1);
}
STORE_S(0, 0); STORE_S(1, 0);
STORE_S(0, 1); STORE_S(1, 1);
}
for (; j < N; j++) {
DECLARE_RESULT_S(0, 0); DECLARE_RESULT_S(1, 0);
for (k = 0; k < K; k++) {
LOAD_A_S(0, x); LOAD_A_S(1, x);
BROADCAST_LOAD_B_S(x, 0);
MATMUL_S(0, 0); MATMUL_S(1, 0);
}
STORE_S(0, 0); STORE_S(1, 0);
}
}
for (; i < M; i += 1) {
for (j = 0; j < n4; j += 4) {
DECLARE_RESULT_S(0, 0);
DECLARE_RESULT_S(0, 1);
DECLARE_RESULT_S(0, 2);
DECLARE_RESULT_S(0, 3);
for (k = 0; k < K; k++) {
LOAD_A_S(0, x);
BROADCAST_LOAD_B_S(x, 0); BROADCAST_LOAD_B_S(x, 1);
BROADCAST_LOAD_B_S(x, 2); BROADCAST_LOAD_B_S(x, 3);
MATMUL_S(0, 0);
MATMUL_S(0, 1);
MATMUL_S(0, 2);
MATMUL_S(0, 3);
}
STORE_S(0, 0);
STORE_S(0, 1);
STORE_S(0, 2);
STORE_S(0, 3);
}
for (; j < n2; j += 2) {
DECLARE_RESULT_S(0, 0);
DECLARE_RESULT_S(0, 1);
for (k = 0; k < K; k++) {
LOAD_A_S(0, x);
BROADCAST_LOAD_B_S(x, 0); BROADCAST_LOAD_B_S(x, 1);
MATMUL_S(0, 0);
MATMUL_S(0, 1);
}
STORE_S(0, 0);
STORE_S(0, 1);
}
for (; j < N; j++) {
DECLARE_RESULT_S(0, 0);
for (k = 0; k < K; k++) {
LOAD_A_S(0, x); LOAD_A_S(1, x);
BROADCAST_LOAD_B_S(x, 0);
MATMUL_S(0, 0);
}
STORE_S(0, 0);
}
}
} }