Improve the performance of zasum and casum with AVX512 intrinsic

This commit is contained in:
Gengxin Xie 2020-12-01 16:49:26 +08:00
parent ff16329cb7
commit b766c1e9bb
5 changed files with 980 additions and 0 deletions

View File

@ -27,3 +27,6 @@ ZGEMMKERNEL = zgemm_kernel_4x2_skylakex.c
CSCALKERNEL = ../arm/zscal.c
ZSCALKERNEL = ../arm/zscal.c
CASUMKERNEL = casum.c
ZASUMKERNEL = zasum.c

144
kernel/x86_64/casum.c Normal file
View File

@ -0,0 +1,144 @@
#include "common.h"
#ifndef ABS_K
#define ABS_K(a) ((a) > 0 ? (a) : (-(a)))
#endif
#if defined(SKYLAKEX)
#include "casum_microk_skylakex-2.c"
#endif
#ifndef HAVE_CASUM_KERNEL
static FLOAT casum_kernel(BLASLONG n, FLOAT *x1)
{
BLASLONG i=0;
BLASLONG n_8 = n & -8;
FLOAT *x = x1;
FLOAT temp0, temp1, temp2, temp3;
FLOAT temp4, temp5, temp6, temp7;
FLOAT sum0 = 0.0;
FLOAT sum1 = 0.0;
FLOAT sum2 = 0.0;
FLOAT sum3 = 0.0;
FLOAT sum4 = 0.0;
while (i < n_8) {
temp0 = ABS_K(x[0]);
temp1 = ABS_K(x[1]);
temp2 = ABS_K(x[2]);
temp3 = ABS_K(x[3]);
temp4 = ABS_K(x[4]);
temp5 = ABS_K(x[5]);
temp6 = ABS_K(x[6]);
temp7 = ABS_K(x[7]);
sum0 += temp0;
sum1 += temp1;
sum2 += temp2;
sum3 += temp3;
sum0 += temp4;
sum1 += temp5;
sum2 += temp6;
sum3 += temp7;
x+=8;
i+=4;
}
while (i < n) {
sum4 += (ABS_K(x1[0]) + ABS_K(x1[1]));
x1 += 2;
i++;
}
return sum0+sum1+sum2+sum3+sum4;
}
#endif
static FLOAT asum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x)
{
BLASLONG i = 0;
BLASLONG ip = 0;
BLASLONG inc_x2;
FLOAT sumf = 0.0;
if (n <= 0 || inc_x <= 0) return(sumf);
if (inc_x == 1) {
sumf = casum_kernel(n, x);
}
else {
inc_x2 = 2 * inc_x;
while (i < n) {
sumf += ABS_K(x[ip]) + ABS_K(x[ip + 1]);
ip += inc_x2;
i++;
}
}
return(sumf);
}
#if defined(SMP)
static int asum_thread_function(BLASLONG n,
BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy2,
FLOAT *x, BLASLONG inc_x,
FLOAT * dummy3, BLASLONG dummy4,
FLOAT * result, BLASLONG dummy5)
{
*(FLOAT *) result = asum_compute(n, x, inc_x);
return 0;
}
extern int blas_level1_thread_with_value(int mode,
BLASLONG m, BLASLONG n, BLASLONG k, void * alpha,
void *a, BLASLONG lda,
void *b, BLASLONG ldb,
void *c, BLASLONG ldc,
int (*function)(),
int nthread);
#endif
FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
{
#if defined(SMP)
int nthreads;
FLOAT dummy_alpha[2];
#endif
FLOAT sumf = 0.0;
#if defined(SMP)
int num_cpu = num_cpu_avail(1);
if (n <= 10000 || inc_x <= 0)
nthreads = 1;
else
nthreads = num_cpu < n/10000 ? num_cpu : n/10000;
if (nthreads == 1) {
sumf = asum_compute(n, x, inc_x);
}
else {
int mode, i;
char result[MAX_CPU_NUMBER * sizeof(double) *2];
FLOAT *ptr;
#if !defined(DOUBLE)
mode = BLAS_SINGLE | BLAS_COMPLEX;
#else
mode = BLAS_DOUBLE | BLAS_COMPLEX;
#endif
blas_level1_thread_with_return_value(mode, n, 0, 0, dummy_alpha, x, inc_x,
NULL, 0, result, 0, (void *)asum_thread_function, nthreads);
ptr = (FLOAT *)result;
for (i = 0; i < nthreads; i++) {
sumf += (*ptr);
ptr = (FLOAT *)(((char *)ptr) + sizeof(double) *2);
}
}
#else
sumf = asum_compute(n, x, inc_x);
#endif
return(sumf);
}

View File

@ -0,0 +1,349 @@
/* need a new enough GCC for avx512 support */
#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9))
#define HAVE_CASUM_KERNEL 1
#include <immintrin.h>
#include <stdint.h>
static FLOAT casum_kernel(BLASLONG n, FLOAT *x)
{
FLOAT *x1 = x;
FLOAT sumf=0.0;
BLASLONG n2 = n + n;
if (n2 < 64) {
__m128 accum_10, accum_11, accum_12, accum_13;
__m128 abs_mask1;
accum_10 = _mm_setzero_ps();
accum_11 = _mm_setzero_ps();
accum_12 = _mm_setzero_ps();
accum_13 = _mm_setzero_ps();
abs_mask1 = (__m128)_mm_cmpeq_epi8((__m128i) abs_mask1, (__m128i) abs_mask1);
abs_mask1 = (__m128)_mm_srli_epi32((__m128i) abs_mask1, 1);
_mm_prefetch(&x1[0], _MM_HINT_T0);
if (n2 >= 32){
__m128 x00 = _mm_loadu_ps(&x1[ 0]);
__m128 x01 = _mm_loadu_ps(&x1[ 4]);
__m128 x02 = _mm_loadu_ps(&x1[ 8]);
__m128 x03 = _mm_loadu_ps(&x1[12]);
_mm_prefetch(&x1[16], _MM_HINT_T0);
__m128 x04 = _mm_loadu_ps(&x1[16]);
__m128 x05 = _mm_loadu_ps(&x1[20]);
__m128 x06 = _mm_loadu_ps(&x1[24]);
__m128 x07 = _mm_loadu_ps(&x1[28]);
x00 = _mm_and_ps(x00, abs_mask1);
x01 = _mm_and_ps(x01, abs_mask1);
x02 = _mm_and_ps(x02, abs_mask1);
x03 = _mm_and_ps(x03, abs_mask1);
accum_10 = _mm_add_ps(accum_10, x00);
accum_11 = _mm_add_ps(accum_11, x01);
accum_12 = _mm_add_ps(accum_12, x02);
accum_13 = _mm_add_ps(accum_13, x03);
x04 = _mm_and_ps(x04, abs_mask1);
x05 = _mm_and_ps(x05, abs_mask1);
x06 = _mm_and_ps(x06, abs_mask1);
x07 = _mm_and_ps(x07, abs_mask1);
accum_10 = _mm_add_ps(accum_10, x04);
accum_11 = _mm_add_ps(accum_11, x05);
accum_12 = _mm_add_ps(accum_12, x06);
accum_13 = _mm_add_ps(accum_13, x07);
n2 -= 32;
x1 += 32;
}
if (n2 >= 16) {
__m128 x00 = _mm_loadu_ps(&x1[ 0]);
__m128 x01 = _mm_loadu_ps(&x1[ 4]);
__m128 x02 = _mm_loadu_ps(&x1[ 8]);
__m128 x03 = _mm_loadu_ps(&x1[12]);
x00 = _mm_and_ps(x00, abs_mask1);
x01 = _mm_and_ps(x01, abs_mask1);
x02 = _mm_and_ps(x02, abs_mask1);
x03 = _mm_and_ps(x03, abs_mask1);
accum_10 = _mm_add_ps(accum_10, x00);
accum_11 = _mm_add_ps(accum_11, x01);
accum_12 = _mm_add_ps(accum_12, x02);
accum_13 = _mm_add_ps(accum_13, x03);
n2 -= 16;
x1 += 16;
}
if (n2 >= 8) {
__m128 x00 = _mm_loadu_ps(&x1[ 0]);
__m128 x01 = _mm_loadu_ps(&x1[ 4]);
x00 = _mm_and_ps(x00, abs_mask1);
x01 = _mm_and_ps(x01, abs_mask1);
accum_10 = _mm_add_ps(accum_10, x00);
accum_11 = _mm_add_ps(accum_11, x01);
n2 -= 8;
x1 += 8;
}
if (n2 >= 4) {
__m128 x00 = _mm_loadu_ps(&x1[ 0]);
x00 = _mm_and_ps(x00, abs_mask1);
accum_10 = _mm_add_ps(accum_10, x00);
n2 -= 4;
x1 += 4;
}
if (n2) {
sumf += (ABS_K(x1[0]) + ABS_K(x1[1]));
}
accum_10 = _mm_add_ps(accum_10, accum_11);
accum_12 = _mm_add_ps(accum_12, accum_13);
accum_10 = _mm_add_ps(accum_10, accum_12);
accum_10 = _mm_hadd_ps(accum_10, accum_10);
accum_10 = _mm_hadd_ps(accum_10, accum_10);
sumf += accum_10[0];
}
else {
__m512 accum_0, accum_1, accum_2, accum_3;
__m512 x00, x01, x02, x03, x04, x05, x06, x07;
__m512 abs_mask = (__m512)_mm512_set1_epi32(0x7fffffff);
accum_0 = _mm512_setzero_ps();
accum_1 = _mm512_setzero_ps();
accum_2 = _mm512_setzero_ps();
accum_3 = _mm512_setzero_ps();
// alignment has side-effect when the size of input array is not large enough
if (n2 < 256) {
if (n2 >= 128) {
x00 = _mm512_loadu_ps(&x1[ 0]);
x01 = _mm512_loadu_ps(&x1[ 16]);
x02 = _mm512_loadu_ps(&x1[ 32]);
x03 = _mm512_loadu_ps(&x1[ 48]);
x04 = _mm512_loadu_ps(&x1[ 64]);
x05 = _mm512_loadu_ps(&x1[ 80]);
x06 = _mm512_loadu_ps(&x1[ 96]);
x07 = _mm512_loadu_ps(&x1[112]);
x00 = _mm512_and_ps(x00, abs_mask);
x01 = _mm512_and_ps(x01, abs_mask);
x02 = _mm512_and_ps(x02, abs_mask);
x03 = _mm512_and_ps(x03, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
accum_1 = _mm512_add_ps(accum_1, x01);
accum_2 = _mm512_add_ps(accum_2, x02);
accum_3 = _mm512_add_ps(accum_3, x03);
x04 = _mm512_and_ps(x04, abs_mask);
x05 = _mm512_and_ps(x05, abs_mask);
x06 = _mm512_and_ps(x06, abs_mask);
x07 = _mm512_and_ps(x07, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x04);
accum_1 = _mm512_add_ps(accum_1, x05);
accum_2 = _mm512_add_ps(accum_2, x06);
accum_3 = _mm512_add_ps(accum_3, x07);
n2 -= 128;
x1 += 128;
}
if (n2 >= 64) {
x00 = _mm512_loadu_ps(&x1[ 0]);
x01 = _mm512_loadu_ps(&x1[16]);
x02 = _mm512_loadu_ps(&x1[32]);
x03 = _mm512_loadu_ps(&x1[48]);
x00 = _mm512_and_ps(x00, abs_mask);
x01 = _mm512_and_ps(x01, abs_mask);
x02 = _mm512_and_ps(x02, abs_mask);
x03 = _mm512_and_ps(x03, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
accum_1 = _mm512_add_ps(accum_1, x01);
accum_2 = _mm512_add_ps(accum_2, x02);
accum_3 = _mm512_add_ps(accum_3, x03);
n2 -= 64;
x1 += 64;
}
if (n2 >= 32) {
x00 = _mm512_loadu_ps(&x1[ 0]);
x01 = _mm512_loadu_ps(&x1[16]);
x00 = _mm512_and_ps(x00, abs_mask);
x01 = _mm512_and_ps(x01, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
accum_1 = _mm512_add_ps(accum_1, x01);
n2 -= 32;
x1 += 32;
}
if (n2 >= 16) {
x00 = _mm512_loadu_ps(&x1[ 0]);
x00 = _mm512_and_ps(x00, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
n2 -= 16;
x1 += 16;
}
if (n2) {
uint16_t tail_mask16 = (((uint16_t) 0xffff) >> (16 - n2));
x00 = _mm512_maskz_loadu_ps(*((__mmask16*) &tail_mask16), &x1[ 0]);
x00 = _mm512_and_ps(x00, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
}
accum_0 = _mm512_add_ps(accum_0, accum_1);
accum_2 = _mm512_add_ps(accum_2, accum_3);
accum_0 = _mm512_add_ps(accum_0, accum_2);
sumf = _mm512_reduce_add_ps(accum_0);
}
// n2 >= 256, doing alignment
else {
int align_header = ((64 - ((uintptr_t)x1 & (uintptr_t)0x3f)) >> 2) & 0xf;
if (0 != align_header) {
uint16_t align_mask16 = (((uint16_t)0xffff) >> (16 - align_header));
x00 = _mm512_maskz_loadu_ps(*((__mmask16*) &align_mask16), &x1[0]);
x00 = _mm512_and_ps(x00, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
n2 -= align_header;
x1 += align_header;
}
x00 = _mm512_load_ps(&x1[ 0]);
x01 = _mm512_load_ps(&x1[ 16]);
x02 = _mm512_load_ps(&x1[ 32]);
x03 = _mm512_load_ps(&x1[ 48]);
x04 = _mm512_load_ps(&x1[ 64]);
x05 = _mm512_load_ps(&x1[ 80]);
x06 = _mm512_load_ps(&x1[ 96]);
x07 = _mm512_load_ps(&x1[112]);
n2 -= 128;
x1 += 128;
while (n2 >= 128) {
x00 = _mm512_and_ps(x00, abs_mask);
x01 = _mm512_and_ps(x01, abs_mask);
x02 = _mm512_and_ps(x02, abs_mask);
x03 = _mm512_and_ps(x03, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
x00 = _mm512_load_ps(&x1[ 0]);
accum_1 = _mm512_add_ps(accum_1, x01);
x01 = _mm512_load_ps(&x1[ 16]);
accum_2 = _mm512_add_ps(accum_2, x02);
x02 = _mm512_load_ps(&x1[ 32]);
accum_3 = _mm512_add_ps(accum_3, x03);
x03 = _mm512_load_ps(&x1[ 48]);
x04 = _mm512_and_ps(x04, abs_mask);
x05 = _mm512_and_ps(x05, abs_mask);
x06 = _mm512_and_ps(x06, abs_mask);
x07 = _mm512_and_ps(x07, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x04);
x04 = _mm512_load_ps(&x1[ 64]);
accum_1 = _mm512_add_ps(accum_1, x05);
x05 = _mm512_load_ps(&x1[ 80]);
accum_2 = _mm512_add_ps(accum_2, x06);
x06 = _mm512_load_ps(&x1[ 96]);
accum_3 = _mm512_add_ps(accum_3, x07);
x07 = _mm512_load_ps(&x1[112]);
n2 -= 128;
x1 += 128;
}
x00 = _mm512_and_ps(x00, abs_mask);
x01 = _mm512_and_ps(x01, abs_mask);
x02 = _mm512_and_ps(x02, abs_mask);
x03 = _mm512_and_ps(x03, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
accum_1 = _mm512_add_ps(accum_1, x01);
accum_2 = _mm512_add_ps(accum_2, x02);
accum_3 = _mm512_add_ps(accum_3, x03);
x04 = _mm512_and_ps(x04, abs_mask);
x05 = _mm512_and_ps(x05, abs_mask);
x06 = _mm512_and_ps(x06, abs_mask);
x07 = _mm512_and_ps(x07, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x04);
accum_1 = _mm512_add_ps(accum_1, x05);
accum_2 = _mm512_add_ps(accum_2, x06);
accum_3 = _mm512_add_ps(accum_3, x07);
if (n2 >= 64) {
x00 = _mm512_load_ps(&x1[ 0]);
x01 = _mm512_load_ps(&x1[16]);
x02 = _mm512_load_ps(&x1[32]);
x03 = _mm512_load_ps(&x1[48]);
x00 = _mm512_and_ps(x00, abs_mask);
x01 = _mm512_and_ps(x01, abs_mask);
x02 = _mm512_and_ps(x02, abs_mask);
x03 = _mm512_and_ps(x03, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
accum_1 = _mm512_add_ps(accum_1, x01);
accum_2 = _mm512_add_ps(accum_2, x02);
accum_3 = _mm512_add_ps(accum_3, x03);
n2 -= 64;
x1 += 64;
}
if (n2 >= 32) {
x00 = _mm512_load_ps(&x1[ 0]);
x01 = _mm512_load_ps(&x1[16]);
x00 = _mm512_and_ps(x00, abs_mask);
x01 = _mm512_and_ps(x01, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
accum_1 = _mm512_add_ps(accum_1, x01);
n2 -= 32;
x1 += 32;
}
if (n2 >= 16) {
x00 = _mm512_load_ps(&x1[ 0]);
x00 = _mm512_and_ps(x00, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
n2 -= 16;
x1 += 16;
}
if (n2) {
uint16_t tail_mask16 = (((uint16_t) 0xffff) >> (16 - n2));
x00 = _mm512_maskz_load_ps(*((__mmask16*) &tail_mask16), &x1[ 0]);
x00 = _mm512_and_ps(x00, abs_mask);
accum_0 = _mm512_add_ps(accum_0, x00);
}
accum_0 = _mm512_add_ps(accum_0, accum_1);
accum_2 = _mm512_add_ps(accum_2, accum_3);
accum_0 = _mm512_add_ps(accum_0, accum_2);
sumf = _mm512_reduce_add_ps(accum_0);
}
}
return sumf;
}
#endif

144
kernel/x86_64/zasum.c Normal file
View File

@ -0,0 +1,144 @@
#include "common.h"
#ifndef ABS_K
#define ABS_K(a) ((a) > 0 ? (a) : (-(a)))
#endif
#if defined(SKYLAKEX)
#include "zasum_microk_skylakex-2.c"
#endif
#ifndef HAVE_ZASUM_KERNEL
static FLOAT zasum_kernel(BLASLONG n, FLOAT *x)
{
BLASLONG i=0;
BLASLONG n_8 = n & -8;
FLOAT *x1 = x;
FLOAT temp0, temp1, temp2, temp3;
FLOAT temp4, temp5, temp6, temp7;
FLOAT sum0 = 0.0;
FLOAT sum1 = 0.0;
FLOAT sum2 = 0.0;
FLOAT sum3 = 0.0;
FLOAT sum4 = 0.0;
while (i < n_8) {
temp0 = ABS_K(x1[0]);
temp1 = ABS_K(x1[1]);
temp2 = ABS_K(x1[2]);
temp3 = ABS_K(x1[3]);
temp4 = ABS_K(x1[4]);
temp5 = ABS_K(x1[5]);
temp6 = ABS_K(x1[6]);
temp7 = ABS_K(x1[7]);
sum0 += temp0;
sum1 += temp1;
sum2 += temp2;
sum3 += temp3;
sum0 += temp4;
sum1 += temp5;
sum2 += temp6;
sum3 += temp7;
x1+=8;
i+=4;
}
while (i < n) {
sum4 += ABS_K(x1[0]) + ABS_K(x1[1]);
x1 += 2;
i++;
}
return sum0+sum1+sum2+sum3+sum4;
}
#endif
static FLOAT asum_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x)
{
BLASLONG i = 0;
BLASLONG ip = 0;
BLASLONG inc_x2;
FLOAT sumf = 0.0;
if (n <= 0 || inc_x <= 0) return(sumf);
if (inc_x == 1) {
sumf = zasum_kernel(n, x);
}
else {
inc_x2 = 2 * inc_x;
while (i < n) {
sumf += ABS_K(x[ip]) + ABS_K(x[ip + 1]);
ip += inc_x2;
i++;
}
}
return(sumf);
}
#if defined(SMP)
static int asum_thread_function(BLASLONG n,
BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy2,
FLOAT *x, BLASLONG inc_x,
FLOAT * dummy3, BLASLONG dummy4,
FLOAT * result, BLASLONG dummy5)
{
*(FLOAT *) result = asum_compute(n, x, inc_x);
return 0;
}
extern int blas_level1_thread_with_value(int mode,
BLASLONG m, BLASLONG n, BLASLONG k, void * alpha,
void *a, BLASLONG lda,
void *b, BLASLONG ldb,
void *c, BLASLONG ldc,
int (*function)(),
int nthread);
#endif
FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
{
#if defined(SMP)
int nthreads;
FLOAT dummy_alpha[2];
#endif
FLOAT sumf = 0.0;
#if defined(SMP)
int num_cpu = num_cpu_avail(1);
if (n <= 10000 || inc_x <= 0)
nthreads = 1;
else
nthreads = num_cpu < n/10000 ? num_cpu : n/10000;
if (nthreads == 1) {
sumf = asum_compute(n, x, inc_x);
}
else {
int mode, i;
char result[MAX_CPU_NUMBER * sizeof(double) *2];
FLOAT *ptr;
#if !defined(DOUBLE)
mode = BLAS_SINGLE | BLAS_COMPLEX;
#else
mode = BLAS_DOUBLE | BLAS_COMPLEX;
#endif
blas_level1_thread_with_return_value(mode, n, 0, 0, dummy_alpha, x, inc_x,
NULL, 0, result, 0, (void *)asum_thread_function, nthreads);
ptr = (FLOAT *)result;
for (i = 0; i < nthreads; i++) {
sumf += (*ptr);
ptr = (FLOAT *)(((char *)ptr) + sizeof(double) *2);
}
}
#else
sumf = asum_compute(n, x, inc_x);
#endif
return(sumf);
}

View File

@ -0,0 +1,340 @@
/* need a new enough GCC for avx512 support */
#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX512CD__)) || (defined(__clang__) && __clang_major__ >= 9))
#define HAVE_ZASUM_KERNEL 1
#include <immintrin.h>
#include <stdint.h>
static FLOAT zasum_kernel(BLASLONG n, FLOAT *x)
{
FLOAT *x1 = x;
FLOAT sumf=0.0;
BLASLONG n2 = n + n;
if (n2 < 32) {
__m128d accum_10, accum_11, accum_12, accum_13;
__m128d abs_mask1;
accum_10 = _mm_setzero_pd();
accum_11 = _mm_setzero_pd();
accum_12 = _mm_setzero_pd();
accum_13 = _mm_setzero_pd();
// abs_mask1 = (__m128d)_mm_set1_epi64x(0x7fffffffffffffff);
abs_mask1 = (__m128d)_mm_cmpeq_epi8((__m128i) abs_mask1, (__m128i) abs_mask1);
abs_mask1 = (__m128d)_mm_srli_epi64((__m128i) abs_mask1, 1);
_mm_prefetch(&x1[0], _MM_HINT_T0);
if (n2 >= 16){
__m128d x00 = _mm_loadu_pd(&x1[ 0]);
__m128d x01 = _mm_loadu_pd(&x1[ 2]);
__m128d x02 = _mm_loadu_pd(&x1[ 4]);
__m128d x03 = _mm_loadu_pd(&x1[ 6]);
_mm_prefetch(&x1[8], _MM_HINT_T0);
__m128d x04 = _mm_loadu_pd(&x1[ 8]);
__m128d x05 = _mm_loadu_pd(&x1[10]);
__m128d x06 = _mm_loadu_pd(&x1[12]);
__m128d x07 = _mm_loadu_pd(&x1[14]);
x00 = _mm_and_pd(x00, abs_mask1);
x01 = _mm_and_pd(x01, abs_mask1);
x02 = _mm_and_pd(x02, abs_mask1);
x03 = _mm_and_pd(x03, abs_mask1);
accum_10 = _mm_add_pd(accum_10, x00);
accum_11 = _mm_add_pd(accum_11, x01);
accum_12 = _mm_add_pd(accum_12, x02);
accum_13 = _mm_add_pd(accum_13, x03);
x04 = _mm_and_pd(x04, abs_mask1);
x05 = _mm_and_pd(x05, abs_mask1);
x06 = _mm_and_pd(x06, abs_mask1);
x07 = _mm_and_pd(x07, abs_mask1);
accum_10 = _mm_add_pd(accum_10, x04);
accum_11 = _mm_add_pd(accum_11, x05);
accum_12 = _mm_add_pd(accum_12, x06);
accum_13 = _mm_add_pd(accum_13, x07);
x1 += 16;
n2 -= 16;
}
if (n2 >= 8) {
__m128d x00 = _mm_loadu_pd(&x1[ 0]);
__m128d x01 = _mm_loadu_pd(&x1[ 2]);
__m128d x02 = _mm_loadu_pd(&x1[ 4]);
__m128d x03 = _mm_loadu_pd(&x1[ 6]);
x00 = _mm_and_pd(x00, abs_mask1);
x01 = _mm_and_pd(x01, abs_mask1);
x02 = _mm_and_pd(x02, abs_mask1);
x03 = _mm_and_pd(x03, abs_mask1);
accum_10 = _mm_add_pd(accum_10, x00);
accum_11 = _mm_add_pd(accum_11, x01);
accum_12 = _mm_add_pd(accum_12, x02);
accum_13 = _mm_add_pd(accum_13, x03);
n2 -= 8;
x1 += 8;
}
if (n2 >= 4) {
__m128d x00 = _mm_loadu_pd(&x1[ 0]);
__m128d x01 = _mm_loadu_pd(&x1[ 2]);
x00 = _mm_and_pd(x00, abs_mask1);
x01 = _mm_and_pd(x01, abs_mask1);
accum_10 = _mm_add_pd(accum_10, x00);
accum_11 = _mm_add_pd(accum_11, x01);
n2 -= 4;
x1 += 4;
}
if (n2) {
__m128d x00 = _mm_loadu_pd(&x1[ 0]);
x00 = _mm_and_pd(x00, abs_mask1);
accum_10 = _mm_add_pd(accum_10, x00);
}
accum_10 = _mm_add_pd(accum_10, accum_11);
accum_12 = _mm_add_pd(accum_12, accum_13);
accum_10 = _mm_add_pd(accum_10, accum_12);
accum_10 = _mm_hadd_pd(accum_10, accum_10);
sumf = accum_10[0];
}
else {
__m512d accum_0, accum_1, accum_2, accum_3;
__m512d x00, x01, x02, x03, x04, x05, x06, x07;
__m512d abs_mask = (__m512d)_mm512_set1_epi64(0x7fffffffffffffff);
accum_0 = _mm512_setzero_pd();
accum_1 = _mm512_setzero_pd();
accum_2 = _mm512_setzero_pd();
accum_3 = _mm512_setzero_pd();
// alignment has side-effect when the size of input array is not large enough
if (n2 < 128) {
if (n2 >= 64) {
x00 = _mm512_loadu_pd(&x1[ 0]);
x01 = _mm512_loadu_pd(&x1[ 8]);
x02 = _mm512_loadu_pd(&x1[16]);
x03 = _mm512_loadu_pd(&x1[24]);
x04 = _mm512_loadu_pd(&x1[32]);
x05 = _mm512_loadu_pd(&x1[40]);
x06 = _mm512_loadu_pd(&x1[48]);
x07 = _mm512_loadu_pd(&x1[56]);
x00 = _mm512_and_pd(x00, abs_mask);
x01 = _mm512_and_pd(x01, abs_mask);
x02 = _mm512_and_pd(x02, abs_mask);
x03 = _mm512_and_pd(x03, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
accum_1 = _mm512_add_pd(accum_1, x01);
accum_2 = _mm512_add_pd(accum_2, x02);
accum_3 = _mm512_add_pd(accum_3, x03);
x04 = _mm512_and_pd(x04, abs_mask);
x05 = _mm512_and_pd(x05, abs_mask);
x06 = _mm512_and_pd(x06, abs_mask);
x07 = _mm512_and_pd(x07, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x04);
accum_1 = _mm512_add_pd(accum_1, x05);
accum_2 = _mm512_add_pd(accum_2, x06);
accum_3 = _mm512_add_pd(accum_3, x07);
n2 -= 64;
x1 += 64;
}
if (n2 >= 32) {
x00 = _mm512_loadu_pd(&x1[ 0]);
x01 = _mm512_loadu_pd(&x1[ 8]);
x02 = _mm512_loadu_pd(&x1[16]);
x03 = _mm512_loadu_pd(&x1[24]);
x00 = _mm512_and_pd(x00, abs_mask);
x01 = _mm512_and_pd(x01, abs_mask);
x02 = _mm512_and_pd(x02, abs_mask);
x03 = _mm512_and_pd(x03, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
accum_1 = _mm512_add_pd(accum_1, x01);
accum_2 = _mm512_add_pd(accum_2, x02);
accum_3 = _mm512_add_pd(accum_3, x03);
n2 -= 32;
x1 += 32;
}
if (n2 >= 16) {
x00 = _mm512_loadu_pd(&x1[ 0]);
x01 = _mm512_loadu_pd(&x1[ 8]);
x00 = _mm512_and_pd(x00, abs_mask);
x01 = _mm512_and_pd(x01, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
accum_1 = _mm512_add_pd(accum_1, x01);
n2 -= 16;
x1 += 16;
}
if (n2 >= 8) {
x00 = _mm512_loadu_pd(&x1[ 0]);
x00 = _mm512_and_pd(x00, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
n2 -= 8;
x1 += 8;
}
if (n2) {
unsigned char tail_mask8 = (((unsigned char) 0xff) >> (8 - n2));
x00 = _mm512_maskz_loadu_pd(*((__mmask8*) &tail_mask8), &x1[ 0]);
x00 = _mm512_and_pd(x00, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
}
accum_0 = _mm512_add_pd(accum_0, accum_1);
accum_2 = _mm512_add_pd(accum_2, accum_3);
accum_0 = _mm512_add_pd(accum_0, accum_2);
sumf = _mm512_reduce_add_pd(accum_0);
}
// n2 >= 128, doing alignment
else {
int align_header = ((64 - ((uintptr_t)x1 & (uintptr_t)0x3f)) >> 3) & 0x7;
if (0 != align_header) {
unsigned char align_mask8 = (((unsigned char)0xff) >> (8 - align_header));
x00 = _mm512_maskz_loadu_pd(*((__mmask8*) &align_mask8), &x1[0]);
x00 = _mm512_and_pd(x00, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
n2 -= align_header;
x1 += align_header;
}
x00 = _mm512_load_pd(&x1[ 0]);
x01 = _mm512_load_pd(&x1[ 8]);
x02 = _mm512_load_pd(&x1[16]);
x03 = _mm512_load_pd(&x1[24]);
x04 = _mm512_load_pd(&x1[32]);
x05 = _mm512_load_pd(&x1[40]);
x06 = _mm512_load_pd(&x1[48]);
x07 = _mm512_load_pd(&x1[56]);
n2 -= 64;
x1 += 64;
while (n2 >= 64) {
x00 = _mm512_and_pd(x00, abs_mask);
x01 = _mm512_and_pd(x01, abs_mask);
x02 = _mm512_and_pd(x02, abs_mask);
x03 = _mm512_and_pd(x03, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
x00 = _mm512_load_pd(&x1[ 0]);
accum_1 = _mm512_add_pd(accum_1, x01);
x01 = _mm512_load_pd(&x1[ 8]);
accum_2 = _mm512_add_pd(accum_2, x02);
x02 = _mm512_load_pd(&x1[16]);
accum_3 = _mm512_add_pd(accum_3, x03);
x03 = _mm512_load_pd(&x1[24]);
x04 = _mm512_and_pd(x04, abs_mask);
x05 = _mm512_and_pd(x05, abs_mask);
x06 = _mm512_and_pd(x06, abs_mask);
x07 = _mm512_and_pd(x07, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x04);
x04 = _mm512_load_pd(&x1[32]);
accum_1 = _mm512_add_pd(accum_1, x05);
x05 = _mm512_load_pd(&x1[40]);
accum_2 = _mm512_add_pd(accum_2, x06);
x06 = _mm512_load_pd(&x1[48]);
accum_3 = _mm512_add_pd(accum_3, x07);
x07 = _mm512_load_pd(&x1[56]);
n2 -= 64;
x1 += 64;
}
x00 = _mm512_and_pd(x00, abs_mask);
x01 = _mm512_and_pd(x01, abs_mask);
x02 = _mm512_and_pd(x02, abs_mask);
x03 = _mm512_and_pd(x03, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
accum_1 = _mm512_add_pd(accum_1, x01);
accum_2 = _mm512_add_pd(accum_2, x02);
accum_3 = _mm512_add_pd(accum_3, x03);
x04 = _mm512_and_pd(x04, abs_mask);
x05 = _mm512_and_pd(x05, abs_mask);
x06 = _mm512_and_pd(x06, abs_mask);
x07 = _mm512_and_pd(x07, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x04);
accum_1 = _mm512_add_pd(accum_1, x05);
accum_2 = _mm512_add_pd(accum_2, x06);
accum_3 = _mm512_add_pd(accum_3, x07);
if (n2 >= 32) {
x00 = _mm512_load_pd(&x1[ 0]);
x01 = _mm512_load_pd(&x1[ 8]);
x02 = _mm512_load_pd(&x1[16]);
x03 = _mm512_load_pd(&x1[24]);
x00 = _mm512_and_pd(x00, abs_mask);
x01 = _mm512_and_pd(x01, abs_mask);
x02 = _mm512_and_pd(x02, abs_mask);
x03 = _mm512_and_pd(x03, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
accum_1 = _mm512_add_pd(accum_1, x01);
accum_2 = _mm512_add_pd(accum_2, x02);
accum_3 = _mm512_add_pd(accum_3, x03);
n2 -= 32;
x1 += 32;
}
if (n2 >= 16) {
x00 = _mm512_load_pd(&x1[ 0]);
x01 = _mm512_load_pd(&x1[ 8]);
x00 = _mm512_and_pd(x00, abs_mask);
x01 = _mm512_and_pd(x01, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
accum_1 = _mm512_add_pd(accum_1, x01);
n2 -= 16;
x1 += 16;
}
if (n2 >= 8) {
x00 = _mm512_load_pd(&x1[ 0]);
x00 = _mm512_and_pd(x00, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
n2 -= 8;
x1 += 8;
}
if (n2) {
unsigned char tail_mask8 = (((unsigned char) 0xff) >> (8 - n2));
x00 = _mm512_maskz_load_pd(*((__mmask8*) &tail_mask8), &x1[ 0]);
x00 = _mm512_and_pd(x00, abs_mask);
accum_0 = _mm512_add_pd(accum_0, x00);
}
accum_0 = _mm512_add_pd(accum_0, accum_1);
accum_2 = _mm512_add_pd(accum_2, accum_3);
accum_0 = _mm512_add_pd(accum_0, accum_2);
sumf = _mm512_reduce_add_pd(accum_0);
}
}
return sumf;
}
#endif