Optimize the performance of daxpy by using universal intrinsics

This commit is contained in:
Qiyu8 2020-09-22 10:38:35 +08:00
parent 0f112077e6
commit 325b539c26
5 changed files with 131 additions and 16 deletions

51
kernel/simd/intrin.h Normal file
View File

@ -0,0 +1,51 @@
#ifndef _INTRIN_H_
#define _INTRIN_H_
#ifdef __cplusplus
extern "C" {
#endif
// include head
/** SSE **/
#ifdef HAVE_SSE
#include <xmmintrin.h>
#endif
/** SSE2 **/
#ifdef HAVE_SSE2
#include <emmintrin.h>
#endif
/** SSE3 **/
#ifdef HAVE_SSE3
#include <pmmintrin.h>
#endif
/** SSSE3 **/
#ifdef HAVE_SSSE3
#include <tmmintrin.h>
#endif
/** SSE41 **/
#ifdef HAVE_SSE4_1
#include <smmintrin.h>
#endif
/** AVX **/
#ifdef HAVE_AVX
#include <immintrin.h>
#endif
// distribute
#if defined(HAVE_AVX512VL) || defined(HAVE_AVX512BF16)
#include "intrin_avx512.h"
#elif defined(HAVE_AVX2)
#include "intrin_avx.h"
#elif defined(HAVE_SSE2)
#include "intrin_sse.h"
#endif
#ifndef V_SIMD
#define V_SIMD 0
#define V_SIMD_F64 0
#endif
#ifdef __cplusplus
}
#endif
#endif // _INTRIN_H_

19
kernel/simd/intrin_avx.h Normal file
View File

@ -0,0 +1,19 @@
#define V_SIMD 256
#define V_SIMD_F64 1
/*
Data Type
*/
typedef __m256 v_f32;
#define v_nlanes_f32 8
/*
arithmetic
*/
#define v_add_f32 _mm256_add_ps
#define v_mul_f32 _mm256_mul_ps
/*
memory
*/
// unaligned load
#define v_loadu_f32 _mm256_loadu_ps
#define v_storeu_f32 _mm256_storeu_ps
#define v_setall_f32(VAL) _mm256_set1_ps(VAL)

View File

@ -0,0 +1,19 @@
#define V_SIMD 512
#define V_SIMD_F64 1
/*
Data Type
*/
typedef __m512 v_f32;
#define v_nlanes_f32 16
/*
arithmetic
*/
#define v_add_f32 _mm512_add_ps
#define v_mul_f32 _mm512_mul_ps
/*
memory
*/
// unaligned load
#define v_loadu_f32(PTR) _mm512_loadu_ps((const __m512*)(PTR))
#define v_storeu_f32(PTR) _mm512_storeu_ps((const __m512*)(PTR))
#define v_setall_f32(VAL) _mm512_set1_ps(VAL)

19
kernel/simd/intrin_sse.h Normal file
View File

@ -0,0 +1,19 @@
#define V_SIMD 128
#define V_SIMD_F64 1
/*
Data Type
*/
typedef __m128 v_f32;
#define v_nlanes_f32 4
/*
arithmetic
*/
#define v_add_f32 _mm_add_ps
#define v_mul_f32 _mm_mul_ps
/*
memory
*/
// unaligned load
#define v_loadu_f32 _mm_loadu_ps
#define v_storeu_f32 _mm_storeu_ps
#define v_setall_f32(VAL) _mm_set1_ps(VAL)

View File

@ -45,14 +45,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "daxpy_microk_sandy-2.c"
#endif
#ifndef HAVE_KERNEL_8
#include"../simd/intrin.h"
static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
{
BLASLONG register i = 0;
FLOAT a = *alpha;
#if V_SIMD
v_f32 __alpha, tmp;
__alpha = v_setall_f32(*alpha);
const int vstep = v_nlanes_f32;
for (; i < n; i += vstep) {
tmp = v_add_f32(v_loadu_f32(y + i), v_mul_f32(__alpha, v_loadu_f32( x + i )));
v_storeu_f32(y + i, tmp);
}
#else
while(i < n)
{
y[i] += a * x[i];
@ -64,9 +72,8 @@ static void daxpy_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
y[i+6] += a * x[i+6];
y[i+7] += a * x[i+7];
i+=8 ;
}
#endif
}
#endif