Merge pull request #1793 from fenrus75/ncopy

Add optimized *copy versions for skylakex
This commit is contained in:
Martin Kroeker 2018-10-09 08:19:14 +02:00 committed by GitHub
commit 667f0cc1cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 1049 additions and 138 deletions

View File

@ -4,10 +4,10 @@ SGEMMKERNEL = sgemm_kernel_16x4_skylakex.S
DGEMMKERNEL = dgemm_kernel_4x8_skylakex.c
DGEMMINCOPY = ../generic/gemm_ncopy_8.c
DGEMMITCOPY = ../generic/gemm_tcopy_8.c
DGEMMONCOPY = ../generic/gemm_ncopy_8.c
DGEMMOTCOPY = ../generic/gemm_tcopy_8.c
DGEMMINCOPY = dgemm_ncopy_8_skylakex.c
DGEMMITCOPY = dgemm_tcopy_8_skylakex.c
DGEMMONCOPY = dgemm_ncopy_8_skylakex.c
DGEMMOTCOPY = dgemm_tcopy_8_skylakex.c
SGEMM_BETA = ../generic/gemm_beta.c
DGEMM_BETA = ../generic/gemm_beta.c
DGEMM_BETA = dgemm_beta_skylakex.c

View File

@ -0,0 +1,150 @@
/*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include "common.h"
#include <immintrin.h>
int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta,
FLOAT *dummy2, BLASLONG dummy3, FLOAT *dummy4, BLASLONG dummy5,
FLOAT *c, BLASLONG ldc){
BLASLONG i, j;
FLOAT *c_offset1, *c_offset;
FLOAT ctemp1, ctemp2, ctemp3, ctemp4;
FLOAT ctemp5, ctemp6, ctemp7, ctemp8;
/* fast path.. just zero the whole matrix */
if (m == ldc && (unsigned long)beta == (unsigned long)ZERO) {
memset(c, 0, m * n * sizeof(FLOAT));
return 0;
}
c_offset = c;
if (beta == ZERO){
__m512d z_zero;
z_zero = _mm512_setzero_pd();
j = n;
do {
c_offset1 = c_offset;
c_offset += ldc;
i = m;
while (i > 32) {
_mm512_storeu_pd(c_offset1, z_zero);
_mm512_storeu_pd(c_offset1 + 8, z_zero);
_mm512_storeu_pd(c_offset1 + 16, z_zero);
_mm512_storeu_pd(c_offset1 + 24 , z_zero);
c_offset1 += 32;
i -= 32;
}
while (i > 8) {
_mm512_storeu_pd(c_offset1, z_zero);
c_offset1 += 8;
i -= 8;
}
while (i > 0) {
*c_offset1 = ZERO;
c_offset1 ++;
i --;
}
j --;
} while (j > 0);
} else {
j = n;
do {
c_offset1 = c_offset;
c_offset += ldc;
i = (m >> 3);
if (i > 0){
do {
ctemp1 = *(c_offset1 + 0);
ctemp2 = *(c_offset1 + 1);
ctemp3 = *(c_offset1 + 2);
ctemp4 = *(c_offset1 + 3);
ctemp5 = *(c_offset1 + 4);
ctemp6 = *(c_offset1 + 5);
ctemp7 = *(c_offset1 + 6);
ctemp8 = *(c_offset1 + 7);
ctemp1 *= beta;
ctemp2 *= beta;
ctemp3 *= beta;
ctemp4 *= beta;
ctemp5 *= beta;
ctemp6 *= beta;
ctemp7 *= beta;
ctemp8 *= beta;
*(c_offset1 + 0) = ctemp1;
*(c_offset1 + 1) = ctemp2;
*(c_offset1 + 2) = ctemp3;
*(c_offset1 + 3) = ctemp4;
*(c_offset1 + 4) = ctemp5;
*(c_offset1 + 5) = ctemp6;
*(c_offset1 + 6) = ctemp7;
*(c_offset1 + 7) = ctemp8;
c_offset1 += 8;
i --;
} while (i > 0);
}
i = (m & 7);
if (i > 0){
do {
ctemp1 = *c_offset1;
ctemp1 *= beta;
*c_offset1 = ctemp1;
c_offset1 ++;
i --;
} while (i > 0);
}
j --;
} while (j > 0);
}
return 0;
};

View File

@ -333,17 +333,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define KERNEL4x4_SUB() \
ymm0 = _mm256_loadu_pd(AO - 16); \
ymm1 = _mm256_loadu_pd(BO - 12); \
ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 12)); \
\
ymm4 += ymm0 * ymm1; \
\
ymm0 = _mm256_permute4x64_pd(ymm0, 0xb1); \
ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 11)); \
ymm5 += ymm0 * ymm1; \
\
ymm0 = _mm256_permute4x64_pd(ymm0, 0x1b); \
ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 10)); \
ymm6 += ymm0 * ymm1; \
\
ymm0 = _mm256_permute4x64_pd(ymm0, 0xb1); \
ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 9)); \
ymm7 += ymm0 * ymm1; \
AO += 4; \
BO += 4;
@ -356,24 +356,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ymm6 *= ymm0; \
ymm7 *= ymm0; \
\
ymm5 = _mm256_permute4x64_pd(ymm5, 0xb1); \
ymm7 = _mm256_permute4x64_pd(ymm7, 0xb1); \
\
ymm0 = _mm256_blend_pd(ymm4, ymm5, 0x0a); \
ymm1 = _mm256_blend_pd(ymm4, ymm5, 0x05); \
ymm2 = _mm256_blend_pd(ymm6, ymm7, 0x0a); \
ymm3 = _mm256_blend_pd(ymm6, ymm7, 0x05); \
\
ymm2 = _mm256_permute4x64_pd(ymm2, 0x1b); \
ymm3 = _mm256_permute4x64_pd(ymm3, 0x1b); \
ymm2 = _mm256_permute4x64_pd(ymm2, 0xb1); \
ymm3 = _mm256_permute4x64_pd(ymm3, 0xb1); \
\
ymm4 = _mm256_blend_pd(ymm2, ymm0, 0x03); \
ymm5 = _mm256_blend_pd(ymm3, ymm1, 0x03); \
ymm6 = _mm256_blend_pd(ymm0, ymm2, 0x03); \
ymm7 = _mm256_blend_pd(ymm1, ymm3, 0x03); \
\
ymm4 += _mm256_loadu_pd(CO1 + (0 * ldc)); \
ymm5 += _mm256_loadu_pd(CO1 + (1 * ldc)); \
ymm6 += _mm256_loadu_pd(CO1 + (2 * ldc)); \
@ -647,11 +629,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define SAVE2x2(ALPHA) \
if (ALPHA != 1.0) { \
xmm0 = _mm_set1_pd(ALPHA); \
xmm4 *= xmm0; \
xmm6 *= xmm0; \
} \
\
xmm4 += _mm_loadu_pd(CO1); \
xmm6 += _mm_loadu_pd(CO1 + ldc); \
@ -947,39 +927,15 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A,
"jg .label24\n"
/* multiply the result by alpha */
"vbroadcastsd (%[alpha]), %%zmm9\n"
"vmulpd %%zmm9, %%zmm1, %%zmm1\n"
"vmulpd %%zmm9, %%zmm2, %%zmm2\n"
"vmulpd %%zmm9, %%zmm3, %%zmm3\n"
"vmulpd %%zmm9, %%zmm4, %%zmm4\n"
"vmulpd %%zmm9, %%zmm5, %%zmm5\n"
"vmulpd %%zmm9, %%zmm6, %%zmm6\n"
"vmulpd %%zmm9, %%zmm7, %%zmm7\n"
"vmulpd %%zmm9, %%zmm8, %%zmm8\n"
"vmulpd %%zmm9, %%zmm11, %%zmm11\n"
"vmulpd %%zmm9, %%zmm12, %%zmm12\n"
"vmulpd %%zmm9, %%zmm13, %%zmm13\n"
"vmulpd %%zmm9, %%zmm14, %%zmm14\n"
"vmulpd %%zmm9, %%zmm15, %%zmm15\n"
"vmulpd %%zmm9, %%zmm16, %%zmm16\n"
"vmulpd %%zmm9, %%zmm17, %%zmm17\n"
"vmulpd %%zmm9, %%zmm18, %%zmm18\n"
"vmulpd %%zmm9, %%zmm21, %%zmm21\n"
"vmulpd %%zmm9, %%zmm22, %%zmm22\n"
"vmulpd %%zmm9, %%zmm23, %%zmm23\n"
"vmulpd %%zmm9, %%zmm24, %%zmm24\n"
"vmulpd %%zmm9, %%zmm25, %%zmm25\n"
"vmulpd %%zmm9, %%zmm26, %%zmm26\n"
"vmulpd %%zmm9, %%zmm27, %%zmm27\n"
"vmulpd %%zmm9, %%zmm28, %%zmm28\n"
/* And store additively in C */
"vaddpd (%[C0]), %%zmm1, %%zmm1\n"
"vaddpd (%[C1]), %%zmm2, %%zmm2\n"
"vaddpd (%[C2]), %%zmm3, %%zmm3\n"
"vaddpd (%[C3]), %%zmm4, %%zmm4\n"
"vaddpd (%[C4]), %%zmm5, %%zmm5\n"
"vaddpd (%[C5]), %%zmm6, %%zmm6\n"
"vaddpd (%[C6]), %%zmm7, %%zmm7\n"
"vaddpd (%[C7]), %%zmm8, %%zmm8\n"
"vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n"
"vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n"
"vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n"
"vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n"
"vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n"
"vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n"
"vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n"
"vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n"
"vmovupd %%zmm1, (%[C0])\n"
"vmovupd %%zmm2, (%[C1])\n"
"vmovupd %%zmm3, (%[C2])\n"
@ -989,14 +945,14 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A,
"vmovupd %%zmm7, (%[C6])\n"
"vmovupd %%zmm8, (%[C7])\n"
"vaddpd 64(%[C0]), %%zmm11, %%zmm11\n"
"vaddpd 64(%[C1]), %%zmm12, %%zmm12\n"
"vaddpd 64(%[C2]), %%zmm13, %%zmm13\n"
"vaddpd 64(%[C3]), %%zmm14, %%zmm14\n"
"vaddpd 64(%[C4]), %%zmm15, %%zmm15\n"
"vaddpd 64(%[C5]), %%zmm16, %%zmm16\n"
"vaddpd 64(%[C6]), %%zmm17, %%zmm17\n"
"vaddpd 64(%[C7]), %%zmm18, %%zmm18\n"
"vfmadd213pd 64(%[C0]), %%zmm9, %%zmm11\n"
"vfmadd213pd 64(%[C1]), %%zmm9, %%zmm12\n"
"vfmadd213pd 64(%[C2]), %%zmm9, %%zmm13\n"
"vfmadd213pd 64(%[C3]), %%zmm9, %%zmm14\n"
"vfmadd213pd 64(%[C4]), %%zmm9, %%zmm15\n"
"vfmadd213pd 64(%[C5]), %%zmm9, %%zmm16\n"
"vfmadd213pd 64(%[C6]), %%zmm9, %%zmm17\n"
"vfmadd213pd 64(%[C7]), %%zmm9, %%zmm18\n"
"vmovupd %%zmm11, 64(%[C0])\n"
"vmovupd %%zmm12, 64(%[C1])\n"
"vmovupd %%zmm13, 64(%[C2])\n"
@ -1006,14 +962,14 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A,
"vmovupd %%zmm17, 64(%[C6])\n"
"vmovupd %%zmm18, 64(%[C7])\n"
"vaddpd 128(%[C0]), %%zmm21, %%zmm21\n"
"vaddpd 128(%[C1]), %%zmm22, %%zmm22\n"
"vaddpd 128(%[C2]), %%zmm23, %%zmm23\n"
"vaddpd 128(%[C3]), %%zmm24, %%zmm24\n"
"vaddpd 128(%[C4]), %%zmm25, %%zmm25\n"
"vaddpd 128(%[C5]), %%zmm26, %%zmm26\n"
"vaddpd 128(%[C6]), %%zmm27, %%zmm27\n"
"vaddpd 128(%[C7]), %%zmm28, %%zmm28\n"
"vfmadd213pd 128(%[C0]), %%zmm9, %%zmm21\n"
"vfmadd213pd 128(%[C1]), %%zmm9, %%zmm22\n"
"vfmadd213pd 128(%[C2]), %%zmm9, %%zmm23\n"
"vfmadd213pd 128(%[C3]), %%zmm9, %%zmm24\n"
"vfmadd213pd 128(%[C4]), %%zmm9, %%zmm25\n"
"vfmadd213pd 128(%[C5]), %%zmm9, %%zmm26\n"
"vfmadd213pd 128(%[C6]), %%zmm9, %%zmm27\n"
"vfmadd213pd 128(%[C7]), %%zmm9, %%zmm28\n"
"vmovupd %%zmm21, 128(%[C0])\n"
"vmovupd %%zmm22, 128(%[C1])\n"
"vmovupd %%zmm23, 128(%[C2])\n"
@ -1128,31 +1084,15 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A,
"jg .label16\n"
/* multiply the result by alpha */
"vbroadcastsd (%[alpha]), %%zmm9\n"
"vmulpd %%zmm9, %%zmm1, %%zmm1\n"
"vmulpd %%zmm9, %%zmm2, %%zmm2\n"
"vmulpd %%zmm9, %%zmm3, %%zmm3\n"
"vmulpd %%zmm9, %%zmm4, %%zmm4\n"
"vmulpd %%zmm9, %%zmm5, %%zmm5\n"
"vmulpd %%zmm9, %%zmm6, %%zmm6\n"
"vmulpd %%zmm9, %%zmm7, %%zmm7\n"
"vmulpd %%zmm9, %%zmm8, %%zmm8\n"
"vmulpd %%zmm9, %%zmm11, %%zmm11\n"
"vmulpd %%zmm9, %%zmm12, %%zmm12\n"
"vmulpd %%zmm9, %%zmm13, %%zmm13\n"
"vmulpd %%zmm9, %%zmm14, %%zmm14\n"
"vmulpd %%zmm9, %%zmm15, %%zmm15\n"
"vmulpd %%zmm9, %%zmm16, %%zmm16\n"
"vmulpd %%zmm9, %%zmm17, %%zmm17\n"
"vmulpd %%zmm9, %%zmm18, %%zmm18\n"
/* And store additively in C */
"vaddpd (%[C0]), %%zmm1, %%zmm1\n"
"vaddpd (%[C1]), %%zmm2, %%zmm2\n"
"vaddpd (%[C2]), %%zmm3, %%zmm3\n"
"vaddpd (%[C3]), %%zmm4, %%zmm4\n"
"vaddpd (%[C4]), %%zmm5, %%zmm5\n"
"vaddpd (%[C5]), %%zmm6, %%zmm6\n"
"vaddpd (%[C6]), %%zmm7, %%zmm7\n"
"vaddpd (%[C7]), %%zmm8, %%zmm8\n"
"vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n"
"vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n"
"vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n"
"vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n"
"vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n"
"vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n"
"vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n"
"vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n"
"vmovupd %%zmm1, (%[C0])\n"
"vmovupd %%zmm2, (%[C1])\n"
"vmovupd %%zmm3, (%[C2])\n"
@ -1162,14 +1102,14 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A,
"vmovupd %%zmm7, (%[C6])\n"
"vmovupd %%zmm8, (%[C7])\n"
"vaddpd 64(%[C0]), %%zmm11, %%zmm11\n"
"vaddpd 64(%[C1]), %%zmm12, %%zmm12\n"
"vaddpd 64(%[C2]), %%zmm13, %%zmm13\n"
"vaddpd 64(%[C3]), %%zmm14, %%zmm14\n"
"vaddpd 64(%[C4]), %%zmm15, %%zmm15\n"
"vaddpd 64(%[C5]), %%zmm16, %%zmm16\n"
"vaddpd 64(%[C6]), %%zmm17, %%zmm17\n"
"vaddpd 64(%[C7]), %%zmm18, %%zmm18\n"
"vfmadd213pd 64(%[C0]), %%zmm9, %%zmm11\n"
"vfmadd213pd 64(%[C1]), %%zmm9, %%zmm12\n"
"vfmadd213pd 64(%[C2]), %%zmm9, %%zmm13\n"
"vfmadd213pd 64(%[C3]), %%zmm9, %%zmm14\n"
"vfmadd213pd 64(%[C4]), %%zmm9, %%zmm15\n"
"vfmadd213pd 64(%[C5]), %%zmm9, %%zmm16\n"
"vfmadd213pd 64(%[C6]), %%zmm9, %%zmm17\n"
"vfmadd213pd 64(%[C7]), %%zmm9, %%zmm18\n"
"vmovupd %%zmm11, 64(%[C0])\n"
"vmovupd %%zmm12, 64(%[C1])\n"
"vmovupd %%zmm13, 64(%[C2])\n"
@ -1241,24 +1181,15 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A,
"add $64, %[BO]\n"
"subl $1, %[kloop]\n"
"jg .label1\n"
/* multiply the result by alpha */
"vmulpd %%zmm9, %%zmm1, %%zmm1\n"
"vmulpd %%zmm9, %%zmm2, %%zmm2\n"
"vmulpd %%zmm9, %%zmm3, %%zmm3\n"
"vmulpd %%zmm9, %%zmm4, %%zmm4\n"
"vmulpd %%zmm9, %%zmm5, %%zmm5\n"
"vmulpd %%zmm9, %%zmm6, %%zmm6\n"
"vmulpd %%zmm9, %%zmm7, %%zmm7\n"
"vmulpd %%zmm9, %%zmm8, %%zmm8\n"
/* And store additively in C */
"vaddpd (%[C0]), %%zmm1, %%zmm1\n"
"vaddpd (%[C1]), %%zmm2, %%zmm2\n"
"vaddpd (%[C2]), %%zmm3, %%zmm3\n"
"vaddpd (%[C3]), %%zmm4, %%zmm4\n"
"vaddpd (%[C4]), %%zmm5, %%zmm5\n"
"vaddpd (%[C5]), %%zmm6, %%zmm6\n"
"vaddpd (%[C6]), %%zmm7, %%zmm7\n"
"vaddpd (%[C7]), %%zmm8, %%zmm8\n"
/* multiply the result by alpha and add to the memory */
"vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n"
"vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n"
"vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n"
"vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n"
"vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n"
"vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n"
"vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n"
"vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n"
"vmovupd %%zmm1, (%[C0])\n"
"vmovupd %%zmm2, (%[C1])\n"
"vmovupd %%zmm3, (%[C2])\n"
@ -1267,14 +1198,6 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A,
"vmovupd %%zmm6, (%[C5])\n"
"vmovupd %%zmm7, (%[C6])\n"
"vmovupd %%zmm8, (%[C7])\n"
"prefetchw 64(%[C0])\n"
"prefetchw 64(%[C1])\n"
"prefetchw 64(%[C2])\n"
"prefetchw 64(%[C3])\n"
"prefetchw 64(%[C4])\n"
"prefetchw 64(%[C5])\n"
"prefetchw 64(%[C6])\n"
"prefetchw 64(%[C7])\n"
:
[AO] "+r" (AO),
[BO] "+r" (BO),

View File

@ -0,0 +1,421 @@
/*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdio.h>
#include "common.h"
#include <immintrin.h>
int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){
BLASLONG i, j;
FLOAT *aoffset;
FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4;
FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8;
FLOAT *boffset;
FLOAT ctemp01, ctemp02, ctemp03, ctemp04;
FLOAT ctemp05, ctemp06, ctemp07, ctemp08;
FLOAT ctemp09, ctemp10, ctemp11, ctemp12;
FLOAT ctemp13, ctemp14, ctemp15, ctemp16;
FLOAT ctemp17, ctemp18, ctemp19, ctemp20;
FLOAT ctemp21, ctemp22, ctemp23, ctemp24;
FLOAT ctemp25, ctemp26, ctemp27, ctemp28;
FLOAT ctemp29, ctemp30, ctemp31, ctemp32;
FLOAT ctemp33, ctemp34, ctemp35, ctemp36;
FLOAT ctemp37, ctemp38, ctemp39, ctemp40;
FLOAT ctemp41, ctemp42, ctemp43, ctemp44;
FLOAT ctemp45, ctemp46, ctemp47, ctemp48;
FLOAT ctemp49, ctemp50, ctemp51, ctemp52;
FLOAT ctemp53, ctemp54, ctemp55, ctemp56;
FLOAT ctemp57, ctemp58, ctemp59, ctemp60;
FLOAT ctemp61, ctemp62, ctemp63, ctemp64;
aoffset = a;
boffset = b;
j = (n >> 3);
if (j > 0){
do{
aoffset1 = aoffset;
aoffset2 = aoffset1 + lda;
aoffset3 = aoffset2 + lda;
aoffset4 = aoffset3 + lda;
aoffset5 = aoffset4 + lda;
aoffset6 = aoffset5 + lda;
aoffset7 = aoffset6 + lda;
aoffset8 = aoffset7 + lda;
aoffset += 8 * lda;
i = (m >> 3);
if (i > 0){
do{
__m128d xmm0, xmm1;
xmm0 = _mm_load_pd1(aoffset2 + 0);
xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 0);
_mm_storeu_pd(boffset + 0, xmm0);
ctemp07 = *(aoffset1 + 6);
ctemp08 = *(aoffset1 + 7);
xmm1 = _mm_load_pd1(aoffset4 + 0);
xmm1 = _mm_loadl_pd(xmm1, aoffset3 + 0);
_mm_storeu_pd(boffset + 2, xmm1);
xmm0 = _mm_load_pd1(aoffset6 + 0);
xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 0);
_mm_storeu_pd(boffset + 4, xmm0);
xmm0 = _mm_load_pd1(aoffset8 + 0);
xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 0);
_mm_storeu_pd(boffset + 6, xmm0);
ctemp15 = *(aoffset2 + 6);
ctemp16 = *(aoffset2 + 7);
xmm0 = _mm_load_pd1(aoffset2 + 1);
xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 1);
_mm_storeu_pd(boffset + 8, xmm0);
xmm0 = _mm_load_pd1(aoffset4 + 1);
xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 1);
_mm_storeu_pd(boffset + 10, xmm0);
xmm0 = _mm_load_pd1(aoffset6 + 1);
xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 1);
_mm_storeu_pd(boffset + 12, xmm0);
xmm0 = _mm_load_pd1(aoffset8 + 1);
xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 1);
_mm_storeu_pd(boffset + 14, xmm0);
xmm0 = _mm_load_pd1(aoffset2 + 2);
xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 2);
_mm_storeu_pd(boffset + 16, xmm0);
xmm0 = _mm_load_pd1(aoffset4 + 2);
xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 2);
_mm_storeu_pd(boffset + 18, xmm0);
xmm0 = _mm_load_pd1(aoffset6 + 2);
xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 2);
_mm_storeu_pd(boffset + 20, xmm0);
xmm0 = _mm_load_pd1(aoffset8 + 2);
xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 2);
_mm_storeu_pd(boffset + 22, xmm0);
ctemp23 = *(aoffset3 + 6);
ctemp24 = *(aoffset3 + 7);
xmm0 = _mm_load_pd1(aoffset2 + 3);
xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 3);
_mm_storeu_pd(boffset + 24, xmm0);
xmm0 = _mm_load_pd1(aoffset4 + 3);
xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 3);
_mm_storeu_pd(boffset + 26, xmm0);
xmm0 = _mm_load_pd1(aoffset6 + 3);
xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 3);
_mm_storeu_pd(boffset + 28, xmm0);
xmm0 = _mm_load_pd1(aoffset8 + 3);
xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 3);
_mm_storeu_pd(boffset + 30, xmm0);
ctemp31 = *(aoffset4 + 6);
ctemp32 = *(aoffset4 + 7);
xmm0 = _mm_load_pd1(aoffset2 + 4);
xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 4);
_mm_storeu_pd(boffset + 32, xmm0);
xmm0 = _mm_load_pd1(aoffset4 + 4);
xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 4);
_mm_storeu_pd(boffset + 34, xmm0);
xmm0 = _mm_load_pd1(aoffset6 + 4);
xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 4);
_mm_storeu_pd(boffset + 36, xmm0);
xmm0 = _mm_load_pd1(aoffset8 + 4);
xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 4);
_mm_storeu_pd(boffset + 38, xmm0);
ctemp39 = *(aoffset5 + 6);
ctemp40 = *(aoffset5 + 7);
xmm0 = _mm_load_pd1(aoffset2 + 5);
xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 5);
_mm_storeu_pd(boffset + 40, xmm0);
xmm0 = _mm_load_pd1(aoffset4 + 5);
xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 5);
_mm_storeu_pd(boffset + 42, xmm0);
xmm0 = _mm_load_pd1(aoffset6 + 5);
xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 5);
_mm_storeu_pd(boffset + 44, xmm0);
xmm0 = _mm_load_pd1(aoffset8 + 5);
xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 5);
_mm_storeu_pd(boffset + 46, xmm0);
ctemp47 = *(aoffset6 + 6);
ctemp48 = *(aoffset6 + 7);
ctemp55 = *(aoffset7 + 6);
ctemp56 = *(aoffset7 + 7);
ctemp63 = *(aoffset8 + 6);
ctemp64 = *(aoffset8 + 7);
*(boffset + 48) = ctemp07;
*(boffset + 49) = ctemp15;
*(boffset + 50) = ctemp23;
*(boffset + 51) = ctemp31;
*(boffset + 52) = ctemp39;
*(boffset + 53) = ctemp47;
*(boffset + 54) = ctemp55;
*(boffset + 55) = ctemp63;
*(boffset + 56) = ctemp08;
*(boffset + 57) = ctemp16;
*(boffset + 58) = ctemp24;
*(boffset + 59) = ctemp32;
*(boffset + 60) = ctemp40;
*(boffset + 61) = ctemp48;
*(boffset + 62) = ctemp56;
*(boffset + 63) = ctemp64;
aoffset1 += 8;
aoffset2 += 8;
aoffset3 += 8;
aoffset4 += 8;
aoffset5 += 8;
aoffset6 += 8;
aoffset7 += 8;
aoffset8 += 8;
boffset += 64;
i --;
}while(i > 0);
}
i = (m & 7);
if (i > 0){
do{
ctemp01 = *(aoffset1 + 0);
ctemp09 = *(aoffset2 + 0);
ctemp17 = *(aoffset3 + 0);
ctemp25 = *(aoffset4 + 0);
ctemp33 = *(aoffset5 + 0);
ctemp41 = *(aoffset6 + 0);
ctemp49 = *(aoffset7 + 0);
ctemp57 = *(aoffset8 + 0);
*(boffset + 0) = ctemp01;
*(boffset + 1) = ctemp09;
*(boffset + 2) = ctemp17;
*(boffset + 3) = ctemp25;
*(boffset + 4) = ctemp33;
*(boffset + 5) = ctemp41;
*(boffset + 6) = ctemp49;
*(boffset + 7) = ctemp57;
aoffset1 ++;
aoffset2 ++;
aoffset3 ++;
aoffset4 ++;
aoffset5 ++;
aoffset6 ++;
aoffset7 ++;
aoffset8 ++;
boffset += 8;
i --;
}while(i > 0);
}
j--;
}while(j > 0);
} /* end of if(j > 0) */
if (n & 4){
aoffset1 = aoffset;
aoffset2 = aoffset1 + lda;
aoffset3 = aoffset2 + lda;
aoffset4 = aoffset3 + lda;
aoffset += 4 * lda;
i = (m >> 2);
if (i > 0){
do{
ctemp01 = *(aoffset1 + 0);
ctemp02 = *(aoffset1 + 1);
ctemp03 = *(aoffset1 + 2);
ctemp04 = *(aoffset1 + 3);
ctemp05 = *(aoffset2 + 0);
ctemp06 = *(aoffset2 + 1);
ctemp07 = *(aoffset2 + 2);
ctemp08 = *(aoffset2 + 3);
ctemp09 = *(aoffset3 + 0);
ctemp10 = *(aoffset3 + 1);
ctemp11 = *(aoffset3 + 2);
ctemp12 = *(aoffset3 + 3);
ctemp13 = *(aoffset4 + 0);
ctemp14 = *(aoffset4 + 1);
ctemp15 = *(aoffset4 + 2);
ctemp16 = *(aoffset4 + 3);
*(boffset + 0) = ctemp01;
*(boffset + 1) = ctemp05;
*(boffset + 2) = ctemp09;
*(boffset + 3) = ctemp13;
*(boffset + 4) = ctemp02;
*(boffset + 5) = ctemp06;
*(boffset + 6) = ctemp10;
*(boffset + 7) = ctemp14;
*(boffset + 8) = ctemp03;
*(boffset + 9) = ctemp07;
*(boffset + 10) = ctemp11;
*(boffset + 11) = ctemp15;
*(boffset + 12) = ctemp04;
*(boffset + 13) = ctemp08;
*(boffset + 14) = ctemp12;
*(boffset + 15) = ctemp16;
aoffset1 += 4;
aoffset2 += 4;
aoffset3 += 4;
aoffset4 += 4;
boffset += 16;
i --;
}while(i > 0);
}
i = (m & 3);
if (i > 0){
do{
ctemp01 = *(aoffset1 + 0);
ctemp02 = *(aoffset2 + 0);
ctemp03 = *(aoffset3 + 0);
ctemp04 = *(aoffset4 + 0);
*(boffset + 0) = ctemp01;
*(boffset + 1) = ctemp02;
*(boffset + 2) = ctemp03;
*(boffset + 3) = ctemp04;
aoffset1 ++;
aoffset2 ++;
aoffset3 ++;
aoffset4 ++;
boffset += 4;
i --;
}while(i > 0);
}
} /* end of if(j > 0) */
if (n & 2){
aoffset1 = aoffset;
aoffset2 = aoffset1 + lda;
aoffset += 2 * lda;
i = (m >> 1);
if (i > 0){
do{
ctemp01 = *(aoffset1 + 0);
ctemp02 = *(aoffset1 + 1);
ctemp03 = *(aoffset2 + 0);
ctemp04 = *(aoffset2 + 1);
*(boffset + 0) = ctemp01;
*(boffset + 1) = ctemp03;
*(boffset + 2) = ctemp02;
*(boffset + 3) = ctemp04;
aoffset1 += 2;
aoffset2 += 2;
boffset += 4;
i --;
}while(i > 0);
}
if (m & 1){
ctemp01 = *(aoffset1 + 0);
ctemp02 = *(aoffset2 + 0);
*(boffset + 0) = ctemp01;
*(boffset + 1) = ctemp02;
aoffset1 ++;
aoffset2 ++;
boffset += 2;
}
} /* end of if(j > 0) */
if (n & 1){
aoffset1 = aoffset;
i = m;
if (i > 0){
do{
ctemp01 = *(aoffset1 + 0);
*(boffset + 0) = ctemp01;
aoffset1 ++;
boffset ++;
i --;
}while(i > 0);
}
} /* end of if(j > 0) */
return 0;
}

View File

@ -0,0 +1,417 @@
/*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdio.h>
#include "common.h"
#include <immintrin.h>
int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){
BLASLONG i, j;
FLOAT *aoffset;
FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4;
FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8;
FLOAT *boffset, *boffset1, *boffset2, *boffset3, *boffset4;
FLOAT ctemp01, ctemp02, ctemp03, ctemp04;
FLOAT ctemp05, ctemp06, ctemp07, ctemp08;
aoffset = a;
boffset = b;
#if 0
fprintf(stderr, "M = %d N = %d\n", m, n);
#endif
boffset2 = b + m * (n & ~7);
boffset3 = b + m * (n & ~3);
boffset4 = b + m * (n & ~1);
j = (m >> 3);
if (j > 0){
do{
aoffset1 = aoffset;
aoffset2 = aoffset1 + lda;
aoffset3 = aoffset2 + lda;
aoffset4 = aoffset3 + lda;
aoffset5 = aoffset4 + lda;
aoffset6 = aoffset5 + lda;
aoffset7 = aoffset6 + lda;
aoffset8 = aoffset7 + lda;
aoffset += 8 * lda;
boffset1 = boffset;
boffset += 64;
i = (n >> 3);
if (i > 0){
do{
__m512d row1, row2, row3, row4, row5, row6, row7, row8;
row1 = _mm512_loadu_pd(aoffset1);
aoffset1 += 8;
row2 = _mm512_loadu_pd(aoffset2);
aoffset2 += 8;
row3 = _mm512_loadu_pd(aoffset3);
aoffset3 += 8;
row4 = _mm512_loadu_pd(aoffset4);
aoffset4 += 8;
row5 = _mm512_loadu_pd(aoffset5);
aoffset5 += 8;
row6 = _mm512_loadu_pd(aoffset6);
aoffset6 += 8;
row7 = _mm512_loadu_pd(aoffset7);
aoffset7 += 8;
row8 = _mm512_loadu_pd(aoffset8);
aoffset8 += 8;
_mm512_storeu_pd(boffset1 + 0, row1);
_mm512_storeu_pd(boffset1 + 8, row2);
_mm512_storeu_pd(boffset1 + 16, row3);
_mm512_storeu_pd(boffset1 + 24, row4);
_mm512_storeu_pd(boffset1 + 32, row5);
_mm512_storeu_pd(boffset1 + 40, row6);
_mm512_storeu_pd(boffset1 + 48, row7);
_mm512_storeu_pd(boffset1 + 56, row8);
boffset1 += m * 8;
i --;
}while(i > 0);
}
if (n & 4){
__m256d row1, row2, row3, row4, row5, row6, row7, row8;
row1 = _mm256_loadu_pd(aoffset1);
aoffset1 += 4;
row2 = _mm256_loadu_pd(aoffset2);
aoffset2 += 4;
row3 = _mm256_loadu_pd(aoffset3);
aoffset3 += 4;
row4 = _mm256_loadu_pd(aoffset4);
aoffset4 += 4;
row5 = _mm256_loadu_pd(aoffset5);
aoffset5 += 4;
row6 = _mm256_loadu_pd(aoffset6);
aoffset6 += 4;
row7 = _mm256_loadu_pd(aoffset7);
aoffset7 += 4;
row8 = _mm256_loadu_pd(aoffset8);
aoffset8 += 4;
_mm256_storeu_pd(boffset2 + 0, row1);
_mm256_storeu_pd(boffset2 + 4, row2);
_mm256_storeu_pd(boffset2 + 8, row3);
_mm256_storeu_pd(boffset2 + 12, row4);
_mm256_storeu_pd(boffset2 + 16, row5);
_mm256_storeu_pd(boffset2 + 20, row6);
_mm256_storeu_pd(boffset2 + 24, row7);
_mm256_storeu_pd(boffset2 + 28, row8);
boffset2 += 32;
}
if (n & 2){
__m128d row1, row2, row3, row4, row5, row6, row7, row8;
row1 = _mm_loadu_pd(aoffset1);
aoffset1 += 2;
row2 = _mm_loadu_pd(aoffset2);
aoffset2 += 2;
row3 = _mm_loadu_pd(aoffset3);
aoffset3 += 2;
row4 = _mm_loadu_pd(aoffset4);
aoffset4 += 2;
row5 = _mm_loadu_pd(aoffset5);
aoffset5 += 2;
row6 = _mm_loadu_pd(aoffset6);
aoffset6 += 2;
row7 = _mm_loadu_pd(aoffset7);
aoffset7 += 2;
row8 = _mm_loadu_pd(aoffset8);
aoffset8 += 2;
_mm_storeu_pd(boffset3 + 0, row1);
_mm_storeu_pd(boffset3 + 2, row2);
_mm_storeu_pd(boffset3 + 4, row3);
_mm_storeu_pd(boffset3 + 6, row4);
_mm_storeu_pd(boffset3 + 8, row5);
_mm_storeu_pd(boffset3 + 10, row6);
_mm_storeu_pd(boffset3 + 12, row7);
_mm_storeu_pd(boffset3 + 14, row8);
boffset3 += 16;
}
if (n & 1){
ctemp01 = *(aoffset1 + 0);
aoffset1 ++;
ctemp02 = *(aoffset2 + 0);
aoffset2 ++;
ctemp03 = *(aoffset3 + 0);
aoffset3 ++;
ctemp04 = *(aoffset4 + 0);
aoffset4 ++;
ctemp05 = *(aoffset5 + 0);
aoffset5 ++;
ctemp06 = *(aoffset6 + 0);
aoffset6 ++;
ctemp07 = *(aoffset7 + 0);
aoffset7 ++;
ctemp08 = *(aoffset8 + 0);
aoffset8 ++;
*(boffset4 + 0) = ctemp01;
*(boffset4 + 1) = ctemp02;
*(boffset4 + 2) = ctemp03;
*(boffset4 + 3) = ctemp04;
*(boffset4 + 4) = ctemp05;
*(boffset4 + 5) = ctemp06;
*(boffset4 + 6) = ctemp07;
*(boffset4 + 7) = ctemp08;
boffset4 += 8;
}
j--;
}while(j > 0);
}
if (m & 4){
aoffset1 = aoffset;
aoffset2 = aoffset1 + lda;
aoffset3 = aoffset2 + lda;
aoffset4 = aoffset3 + lda;
aoffset += 4 * lda;
boffset1 = boffset;
boffset += 32;
i = (n >> 3);
if (i > 0){
do{
__m512d row1, row2, row3, row4;
row1 = _mm512_loadu_pd(aoffset1);
aoffset1 += 8;
row2 = _mm512_loadu_pd(aoffset2);
aoffset2 += 8;
row3 = _mm512_loadu_pd(aoffset3);
aoffset3 += 8;
row4 = _mm512_loadu_pd(aoffset4);
aoffset4 += 8;
_mm512_storeu_pd(boffset1 + 0, row1);
_mm512_storeu_pd(boffset1 + 8, row2);
_mm512_storeu_pd(boffset1 + 16, row3);
_mm512_storeu_pd(boffset1 + 24, row4);
boffset1 += 8 * m;
i --;
}while(i > 0);
}
if (n & 4) {
__m256d row1, row2, row3, row4;
row1 = _mm256_loadu_pd(aoffset1);
aoffset1 += 4;
row2 = _mm256_loadu_pd(aoffset2);
aoffset2 += 4;
row3 = _mm256_loadu_pd(aoffset3);
aoffset3 += 4;
row4 = _mm256_loadu_pd(aoffset4);
aoffset4 += 4;
_mm256_storeu_pd(boffset2 + 0, row1);
_mm256_storeu_pd(boffset2 + 4, row2);
_mm256_storeu_pd(boffset2 + 8, row3);
_mm256_storeu_pd(boffset2 + 12, row4);
boffset2 += 16;
}
if (n & 2){
__m128d row1, row2, row3, row4;
row1 = _mm_loadu_pd(aoffset1);
aoffset1 += 2;
row2 = _mm_loadu_pd(aoffset2);
aoffset2 += 2;
row3 = _mm_loadu_pd(aoffset3);
aoffset3 += 2;
row4 = _mm_loadu_pd(aoffset4);
aoffset4 += 2;
_mm_storeu_pd(boffset3 + 0, row1);
_mm_storeu_pd(boffset3 + 2, row2);
_mm_storeu_pd(boffset3 + 4, row3);
_mm_storeu_pd(boffset3 + 6, row4);
boffset3 += 8;
}
if (n & 1){
ctemp01 = *(aoffset1 + 0);
aoffset1 ++;
ctemp02 = *(aoffset2 + 0);
aoffset2 ++;
ctemp03 = *(aoffset3 + 0);
aoffset3 ++;
ctemp04 = *(aoffset4 + 0);
aoffset4 ++;
*(boffset4 + 0) = ctemp01;
*(boffset4 + 1) = ctemp02;
*(boffset4 + 2) = ctemp03;
*(boffset4 + 3) = ctemp04;
boffset4 += 4;
}
}
if (m & 2){
aoffset1 = aoffset;
aoffset2 = aoffset1 + lda;
aoffset += 2 * lda;
boffset1 = boffset;
boffset += 16;
i = (n >> 3);
if (i > 0){
do{
__m512d row1, row2;
row1 = _mm512_loadu_pd(aoffset1);
aoffset1 += 8;
row2 = _mm512_loadu_pd(aoffset2);
aoffset2 += 8;
_mm512_storeu_pd(boffset1 + 0, row1);
_mm512_storeu_pd(boffset1 + 8, row2);
boffset1 += 8 * m;
i --;
}while(i > 0);
}
if (n & 4){
__m256d row1, row2;
row1 = _mm256_loadu_pd(aoffset1);
aoffset1 += 4;
row2 = _mm256_loadu_pd(aoffset2);
aoffset2 += 4;
_mm256_storeu_pd(boffset2 + 0, row1);
_mm256_storeu_pd(boffset2 + 4, row2);
boffset2 += 8;
}
if (n & 2){
__m128d row1, row2;
row1 = _mm_loadu_pd(aoffset1);
aoffset1 += 2;
row2 = _mm_loadu_pd(aoffset2);
aoffset2 += 2;
_mm_storeu_pd(boffset3 + 0, row1);
_mm_storeu_pd(boffset3 + 2, row2);
boffset3 += 4;
}
if (n & 1){
ctemp01 = *(aoffset1 + 0);
aoffset1 ++;
ctemp02 = *(aoffset2 + 0);
aoffset2 ++;
*(boffset4 + 0) = ctemp01;
*(boffset4 + 1) = ctemp02;
boffset4 += 2;
}
}
if (m & 1){
aoffset1 = aoffset;
// aoffset += lda;
boffset1 = boffset;
// boffset += 8;
i = (n >> 3);
if (i > 0){
do{
__m512d row1;
row1 = _mm512_loadu_pd(aoffset1);
aoffset1 += 8;
_mm512_storeu_pd(boffset1 + 0, row1);
boffset1 += 8 * m;
i --;
}while(i > 0);
}
if (n & 4){
__m256d row1;
row1 = _mm256_loadu_pd(aoffset1);
aoffset1 += 4;
_mm256_storeu_pd(boffset2 + 0, row1);
// boffset2 += 4;
}
if (n & 2){
__m128d row1;
row1 = _mm_loadu_pd(aoffset1);
aoffset1 += 2;
_mm_storeu_pd(boffset3 + 0, row1);
// boffset3 += 2;
}
if (n & 1){
ctemp01 = *(aoffset1 + 0);
aoffset1 ++;
*(boffset4 + 0) = ctemp01;
boffset4 ++;
}
}
return 0;
}