From c4c591ac5afc10b5619d1c58b10d5095dc82a2ff Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Tue, 10 Nov 2020 16:16:38 +0800 Subject: [PATCH 01/10] fix sum optimize issues --- kernel/arm/sum.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/kernel/arm/sum.c b/kernel/arm/sum.c index 63584b95c..a486a1868 100644 --- a/kernel/arm/sum.c +++ b/kernel/arm/sum.c @@ -42,24 +42,27 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) n *= inc_x; if (inc_x == 1) { -#if V_SIMD +#if V_SIMD && (!defined(DOUBLE) || (defined(DOUBLE) && V_SIMD_F64 && V_SIMD > 128)) #ifdef DOUBLE const int vstep = v_nlanes_f64; - const int unrollx2 = n & (-vstep * 2); + const int unrollx4 = n & (-vstep * 4); const int unrollx = n & -vstep; v_f64 vsum0 = v_zero_f64(); v_f64 vsum1 = v_zero_f64(); - while (i < unrollx2) - { - vsum0 = v_add_f64(vsum0, v_loadu_f64(x)); - vsum1 = v_add_f64(vsum1, v_loadu_f64(x + vstep)); - i += vstep * 2; - } - vsum0 = v_add_f64(vsum0, vsum1); - while (i < unrollx) + v_f64 vsum2 = v_zero_f64(); + v_f64 vsum3 = v_zero_f64(); + for (; i < unrollx4; i += vstep * 4) + { + vsum0 = v_add_f64(vsum0, v_loadu_f64(x + i)); + vsum1 = v_add_f64(vsum1, v_loadu_f64(x + i + vstep)); + vsum2 = v_add_f64(vsum2, v_loadu_f64(x + i + vstep * 2)); + vsum3 = v_add_f64(vsum3, v_loadu_f64(x + i + vstep * 3)); + } + vsum0 = v_add_f64( + v_add_f64(vsum0, vsum1), v_add_f64(vsum2, vsum3)); + for (; i < unrollx; i += vstep) { vsum0 = v_add_f64(vsum0, v_loadu_f64(x + i)); - i += vstep; } sumf = v_sum_f64(vsum0); #else @@ -70,20 +73,18 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) v_f32 vsum1 = v_zero_f32(); v_f32 vsum2 = v_zero_f32(); v_f32 vsum3 = v_zero_f32(); - while (i < unrollx4) + for (; i < unrollx4; i += vstep * 4) { - vsum0 = v_add_f32(vsum0, v_loadu_f32(x)); - vsum1 = v_add_f32(vsum1, v_loadu_f32(x + vstep)); - vsum2 = v_add_f32(vsum2, v_loadu_f32(x + vstep * 2)); - vsum3 = v_add_f32(vsum3, v_loadu_f32(x + vstep * 3)); - i += vstep * 4; + vsum0 = v_add_f32(vsum0, v_loadu_f32(x + i)); + vsum1 = v_add_f32(vsum1, v_loadu_f32(x + i + vstep)); + vsum2 = v_add_f32(vsum2, v_loadu_f32(x + i + vstep * 2)); + vsum3 = v_add_f32(vsum3, v_loadu_f32(x + i + vstep * 3)); } vsum0 = v_add_f32( v_add_f32(vsum0, vsum1), v_add_f32(vsum2, vsum3)); - while (i < unrollx) + for (; i < unrollx; i += vstep) { vsum0 = v_add_f32(vsum0, v_loadu_f32(x + i)); - i += vstep; } sumf = v_sum_f32(vsum0); #endif From 8c0b206d4cf9909017a52919a41406ee303f472e Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 11 Nov 2020 14:33:12 +0800 Subject: [PATCH 02/10] Optimize the performance of rot by using universal intrinsics --- kernel/simd/intrin_avx.h | 10 ++++++ kernel/simd/intrin_avx512.h | 5 +++ kernel/simd/intrin_neon.h | 10 ++++++ kernel/simd/intrin_sse.h | 13 +++++++ kernel/x86_64/drot.c | 68 ++++++++++++++++++++++++++++++++++- kernel/x86_64/srot.c | 70 ++++++++++++++++++++++++++++++++++++- 6 files changed, 174 insertions(+), 2 deletions(-) diff --git a/kernel/simd/intrin_avx.h b/kernel/simd/intrin_avx.h index 3f79646e0..fbe531417 100644 --- a/kernel/simd/intrin_avx.h +++ b/kernel/simd/intrin_avx.h @@ -12,6 +12,8 @@ typedef __m256d v_f64; ***************************/ #define v_add_f32 _mm256_add_ps #define v_add_f64 _mm256_add_pd +#define v_sub_f32 _mm256_sub_ps +#define v_sub_f64 _mm256_sub_pd #define v_mul_f32 _mm256_mul_ps #define v_mul_f64 _mm256_mul_pd @@ -19,12 +21,20 @@ typedef __m256d v_f64; // multiply and add, a*b + c #define v_muladd_f32 _mm256_fmadd_ps #define v_muladd_f64 _mm256_fmadd_pd + // multiply and subtract, a*b - c + #define v_mulsub_f32 _mm256_fmsub_ps + #define v_mulsub_f64 _mm256_fmsub_pd #else // multiply and add, a*b + c BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) { return v_add_f32(v_mul_f32(a, b), c); } BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c) { return v_add_f64(v_mul_f64(a, b), c); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return v_sub_f32(v_mul_f32(a, b), c); } + BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c) + { return v_sub_f64(v_mul_f64(a, b), c); } #endif // !HAVE_FMA3 // Horizontal add: Calculates the sum of all vector elements. diff --git a/kernel/simd/intrin_avx512.h b/kernel/simd/intrin_avx512.h index f00af53e9..8f38eedd9 100644 --- a/kernel/simd/intrin_avx512.h +++ b/kernel/simd/intrin_avx512.h @@ -12,11 +12,16 @@ typedef __m512d v_f64; ***************************/ #define v_add_f32 _mm512_add_ps #define v_add_f64 _mm512_add_pd +#define v_sub_f32 _mm512_sub_ps +#define v_sub_f64 _mm512_sub_pd #define v_mul_f32 _mm512_mul_ps #define v_mul_f64 _mm512_mul_pd // multiply and add, a*b + c #define v_muladd_f32 _mm512_fmadd_ps #define v_muladd_f64 _mm512_fmadd_pd +// multiply and subtract, a*b - c +#define v_mulsub_f32 _mm512_fmsub_ps +#define v_mulsub_f64 _mm512_fmsub_pd BLAS_FINLINE float v_sum_f32(v_f32 a) { __m512 h64 = _mm512_shuffle_f32x4(a, a, _MM_SHUFFLE(3, 2, 3, 2)); diff --git a/kernel/simd/intrin_neon.h b/kernel/simd/intrin_neon.h index 22cef10ca..cd44599fe 100644 --- a/kernel/simd/intrin_neon.h +++ b/kernel/simd/intrin_neon.h @@ -18,6 +18,8 @@ typedef float32x4_t v_f32; ***************************/ #define v_add_f32 vaddq_f32 #define v_add_f64 vaddq_f64 +#define v_sub_f32 vsubq_f32 +#define v_sub_f64 vsubq_f64 #define v_mul_f32 vmulq_f32 #define v_mul_f64 vmulq_f64 @@ -26,16 +28,24 @@ typedef float32x4_t v_f32; // multiply and add, a*b + c BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) { return vfmaq_f32(c, a, b); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return vfmaq_f32(vnegq_f32(c), a, b); } #else // multiply and add, a*b + c BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) { return vmlaq_f32(c, a, b); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return vmlaq_f32(vnegq_f32(c), a, b); } #endif // FUSED F64 #if V_SIMD_F64 BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c) { return vfmaq_f64(c, a, b); } + BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c) + { return vfmaq_f64(vnegq_f64(c), a, b); } #endif // Horizontal add: Calculates the sum of all vector elements. diff --git a/kernel/simd/intrin_sse.h b/kernel/simd/intrin_sse.h index 06a3fe78b..6a542072e 100644 --- a/kernel/simd/intrin_sse.h +++ b/kernel/simd/intrin_sse.h @@ -12,22 +12,35 @@ typedef __m128d v_f64; ***************************/ #define v_add_f32 _mm_add_ps #define v_add_f64 _mm_add_pd +#define v_sub_f32 _mm_sub_ps +#define v_sub_f64 _mm_sub_pd #define v_mul_f32 _mm_mul_ps #define v_mul_f64 _mm_mul_pd #ifdef HAVE_FMA3 // multiply and add, a*b + c #define v_muladd_f32 _mm_fmadd_ps #define v_muladd_f64 _mm_fmadd_pd + // multiply and subtract, a*b - c + #define v_mulsub_f32 _mm_fmsub_ps + #define v_mulsub_f64 _mm_fmsub_pd #elif defined(HAVE_FMA4) // multiply and add, a*b + c #define v_muladd_f32 _mm_macc_ps #define v_muladd_f64 _mm_macc_pd + // multiply and subtract, a*b - c + #define v_mulsub_f32 _mm_msub_ps + #define v_mulsub_f64 _mm_msub_pd #else // multiply and add, a*b + c BLAS_FINLINE v_f32 v_muladd_f32(v_f32 a, v_f32 b, v_f32 c) { return v_add_f32(v_mul_f32(a, b), c); } BLAS_FINLINE v_f64 v_muladd_f64(v_f64 a, v_f64 b, v_f64 c) { return v_add_f64(v_mul_f64(a, b), c); } + // multiply and subtract, a*b - c + BLAS_FINLINE v_f32 v_mulsub_f32(v_f32 a, v_f32 b, v_f32 c) + { return v_sub_f32(v_mul_f32(a, b), c); } + BLAS_FINLINE v_f64 v_mulsub_f64(v_f64 a, v_f64 b, v_f64 c) + { return v_sub_f64(v_mul_f64(a, b), c); } #endif // HAVE_FMA3 // Horizontal add: Calculates the sum of all vector elements. diff --git a/kernel/x86_64/drot.c b/kernel/x86_64/drot.c index a312b7ff9..66e9ff907 100644 --- a/kernel/x86_64/drot.c +++ b/kernel/x86_64/drot.c @@ -7,10 +7,76 @@ #endif #ifndef HAVE_DROT_KERNEL +#include "../simd/intrin.h" static void drot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) { BLASLONG i = 0; +#if V_SIMD_F64 && V_SIMD > 256 + const int vstep = v_nlanes_f64; + const int unrollx4 = n & (-vstep * 4); + const int unrollx = n & -vstep; + + v_f64 __c = v_setall_f64(c); + v_f64 __s = v_setall_f64(s); + v_f64 vx0, vx1, vx2, vx3; + v_f64 vy0, vy1, vy2, vy3; + v_f64 vt0, vt1, vt2, vt3; + + for (; i < unrollx4; i += vstep * 4) { + vx0 = v_loadu_f64(x + i); + vx1 = v_loadu_f64(x + i + vstep); + vx2 = v_loadu_f64(x + i + vstep * 2); + vx3 = v_loadu_f64(x + i + vstep * 3); + vy0 = v_loadu_f64(y + i); + vy1 = v_loadu_f64(y + i + vstep); + vy2 = v_loadu_f64(y + i + vstep * 2); + vy3 = v_loadu_f64(y + i + vstep * 3); + + vt0 = v_mul_f64(__s, vy0); + vt1 = v_mul_f64(__s, vy1); + vt2 = v_mul_f64(__s, vy2); + vt3 = v_mul_f64(__s, vy3); + + vt0 = v_muladd_f64(__c, vx0, vt0); + vt1 = v_muladd_f64(__c, vx1, vt1); + vt2 = v_muladd_f64(__c, vx2, vt2); + vt3 = v_muladd_f64(__c, vx3, vt3); + + v_storeu_f64(x + i, vt0); + v_storeu_f64(x + i + vstep, vt1); + v_storeu_f64(x + i + vstep * 2, vt2); + v_storeu_f64(x + i + vstep * 3, vt3); + + vt0 = v_mul_f64(__s, vx0); + vt1 = v_mul_f64(__s, vx1); + vt2 = v_mul_f64(__s, vx2); + vt3 = v_mul_f64(__s, vx3); + + vt0 = v_mulsub_f64(__c, vy0, vt0); + vt1 = v_mulsub_f64(__c, vy1, vt1); + vt2 = v_mulsub_f64(__c, vy2, vt2); + vt3 = v_mulsub_f64(__c, vy3, vt3); + + v_storeu_f64(y + i, vt0); + v_storeu_f64(y + i + vstep, vt1); + v_storeu_f64(y + i + vstep * 2, vt2); + v_storeu_f64(y + i + vstep * 3, vt3); + } + + for (; i < unrollx; i += vstep) { + vx0 = v_loadu_f64(x + i); + vy0 = v_loadu_f64(y + i); + + vt0 = v_mul_f64(__s, vy0); + vt0 = v_muladd_f64(__c, vx0, vt0); + v_storeu_f64(x + i, vt0); + + vt0 = v_mul_f64(__s, vx0); + vt0 = v_mulsub_f64(__c, vy0, vt0); + v_storeu_f64(y + i, vt0); + } +#else FLOAT f0, f1, f2, f3; FLOAT x0, x1, x2, x3; FLOAT g0, g1, g2, g3; @@ -53,7 +119,7 @@ static void drot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) yp += 4; i += 4; } - +#endif while (i < n) { FLOAT temp = c*x[i] + s*y[i]; y[i] = c*y[i] - s*x[i]; diff --git a/kernel/x86_64/srot.c b/kernel/x86_64/srot.c index 021c20d82..d9583cdfa 100644 --- a/kernel/x86_64/srot.c +++ b/kernel/x86_64/srot.c @@ -7,10 +7,78 @@ #endif #ifndef HAVE_SROT_KERNEL +#include"../simd/intrin.h" static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) { BLASLONG i = 0; + +#if V_SIMD + const int vstep = v_nlanes_f32; + const int unrollx4 = n & (-vstep * 4); + const int unrollx = n & -vstep; + + v_f32 __c = v_setall_f32(c); + v_f32 __s = v_setall_f32(s); + v_f32 vx0, vx1, vx2, vx3; + v_f32 vy0, vy1, vy2, vy3; + v_f32 vt0, vt1, vt2, vt3; + + for (; i < unrollx4; i += vstep * 4) { + vx0 = v_loadu_f32(x + i); + vx1 = v_loadu_f32(x + i + vstep); + vx2 = v_loadu_f32(x + i + vstep * 2); + vx3 = v_loadu_f32(x + i + vstep * 3); + vy0 = v_loadu_f32(y + i); + vy1 = v_loadu_f32(y + i + vstep); + vy2 = v_loadu_f32(y + i + vstep * 2); + vy3 = v_loadu_f32(y + i + vstep * 3); + + vt0 = v_mul_f32(__s, vy0); + vt1 = v_mul_f32(__s, vy1); + vt2 = v_mul_f32(__s, vy2); + vt3 = v_mul_f32(__s, vy3); + + vt0 = v_muladd_f32(__c, vx0, vt0); + vt1 = v_muladd_f32(__c, vx1, vt1); + vt2 = v_muladd_f32(__c, vx2, vt2); + vt3 = v_muladd_f32(__c, vx3, vt3); + + v_storeu_f32(x + i, vt0); + v_storeu_f32(x + i + vstep, vt1); + v_storeu_f32(x + i + vstep * 2, vt2); + v_storeu_f32(x + i + vstep * 3, vt3); + + vt0 = v_mul_f32(__s, vx0); + vt1 = v_mul_f32(__s, vx1); + vt2 = v_mul_f32(__s, vx2); + vt3 = v_mul_f32(__s, vx3); + + vt0 = v_mulsub_f32(__c, vy0, vt0); + vt1 = v_mulsub_f32(__c, vy1, vt1); + vt2 = v_mulsub_f32(__c, vy2, vt2); + vt3 = v_mulsub_f32(__c, vy3, vt3); + + v_storeu_f32(y + i, vt0); + v_storeu_f32(y + i + vstep, vt1); + v_storeu_f32(y + i + vstep * 2, vt2); + v_storeu_f32(y + i + vstep * 3, vt3); + + } + + for (; i < unrollx; i += vstep) { + vx0 = v_loadu_f32(x + i); + vy0 = v_loadu_f32(y + i); + + vt0 = v_mul_f32(__s, vy0); + vt0 = v_muladd_f32(__c, vx0, vt0); + v_storeu_f32(x + i, vt0); + + vt0 = v_mul_f32(__s, vx0); + vt0 = v_mulsub_f32(__c, vy0, vt0); + v_storeu_f32(y + i, vt0); + } +#else FLOAT f0, f1, f2, f3; FLOAT x0, x1, x2, x3; FLOAT g0, g1, g2, g3; @@ -20,7 +88,6 @@ static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) FLOAT* yp = y; BLASLONG n1 = n & (~7); - while (i < n1) { x0 = xp[0]; y0 = yp[0]; @@ -53,6 +120,7 @@ static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) yp += 4; i += 4; } +#endif while (i < n) { FLOAT temp = c*x[i] + s*y[i]; From 5bc0a7583fed3328f176b69419ae12a063f2f4e0 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 11 Nov 2020 15:18:01 +0800 Subject: [PATCH 03/10] only FMA3 and vector larger than 128 have positive effects. --- kernel/x86_64/srot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/x86_64/srot.c b/kernel/x86_64/srot.c index d9583cdfa..4273f7fe7 100644 --- a/kernel/x86_64/srot.c +++ b/kernel/x86_64/srot.c @@ -13,7 +13,7 @@ static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) { BLASLONG i = 0; -#if V_SIMD +#if V_SIMD && (HAVE_FMA3 || V_SIMD > 128) const int vstep = v_nlanes_f32; const int unrollx4 = n & (-vstep * 4); const int unrollx = n & -vstep; From a87e537b8cd5844159dd5806204470a945be695d Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Wed, 11 Nov 2020 15:53:48 +0800 Subject: [PATCH 04/10] modify macro --- kernel/x86_64/srot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/x86_64/srot.c b/kernel/x86_64/srot.c index 4273f7fe7..3de586cb8 100644 --- a/kernel/x86_64/srot.c +++ b/kernel/x86_64/srot.c @@ -13,7 +13,7 @@ static void srot_kernel(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT c, FLOAT s) { BLASLONG i = 0; -#if V_SIMD && (HAVE_FMA3 || V_SIMD > 128) +#if V_SIMD && (defined(HAVE_FMA3) || V_SIMD > 128) const int vstep = v_nlanes_f32; const int unrollx4 = n & (-vstep * 4); const int unrollx = n & -vstep; From e5c2ceb6750c4e649aef87e06bd87ed4fcbdc6a5 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 12 Nov 2020 17:35:17 +0800 Subject: [PATCH 05/10] fix the CI failure of lack the head --- kernel/simd/intrin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/simd/intrin.h b/kernel/simd/intrin.h index ef8fcb865..3802a91e1 100644 --- a/kernel/simd/intrin.h +++ b/kernel/simd/intrin.h @@ -47,7 +47,7 @@ extern "C" { #endif /** AVX **/ -#ifdef HAVE_AVX +#if defined(HAVE_AVX) || defined(HAVE_FMA3) #include #endif From e0dac6b53b27b2d79404577d17fdee8b2303e123 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Thu, 12 Nov 2020 20:31:03 +0800 Subject: [PATCH 06/10] fix the CI failure of target specific option mismatch --- kernel/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/Makefile b/kernel/Makefile index fb1d5d39a..fd9105fee 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -5,6 +5,10 @@ endif TOPDIR = .. include $(TOPDIR)/Makefile.system +ifdef HAVE_FMA3 +CFLAGS += -mfma +endif + ifeq ($(ARCH), power) ifeq ($(C_COMPILER), CLANG) override CFLAGS += -fno-integrated-as From ae0b1dea19bf836fb0c8af3630ccfcbbf4b8e37f Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Fri, 13 Nov 2020 10:20:24 +0800 Subject: [PATCH 07/10] modify system.cmake to enable fma flag --- cmake/system.cmake | 2 +- kernel/Makefile | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/cmake/system.cmake b/cmake/system.cmake index 66e95c6d3..68df2d900 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -174,7 +174,7 @@ if (DEFINED TARGET) endif() if (DEFINED HAVE_AVX) if (NOT NO_AVX) - set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx") + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx -mfma") endif() endif() if (DEFINED HAVE_AVX2) diff --git a/kernel/Makefile b/kernel/Makefile index fd9105fee..fb1d5d39a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -5,10 +5,6 @@ endif TOPDIR = .. include $(TOPDIR)/Makefile.system -ifdef HAVE_FMA3 -CFLAGS += -mfma -endif - ifeq ($(ARCH), power) ifeq ($(C_COMPILER), CLANG) override CFLAGS += -fno-integrated-as From ec4d77c47c46358521c3b38e42eb8bfebcb94ec3 Mon Sep 17 00:00:00 2001 From: Martin Kroeker Date: Fri, 13 Nov 2020 09:16:34 +0100 Subject: [PATCH 08/10] Add -mfma for HAVE_FMA3 in the non-DYNAMIC_ARCH case as well --- cmake/cc.cmake | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmake/cc.cmake b/cmake/cc.cmake index b963940d6..76952152b 100644 --- a/cmake/cc.cmake +++ b/cmake/cc.cmake @@ -124,6 +124,9 @@ if (NOT DYNAMIC_ARCH) if (HAVE_AVX) set (CCOMMON_OPT "${CCOMMON_OPT} -mavx") endif () + if (HAVE_FMA3) + set (CCOMMON_OPT "${CCOMMON_OPT} -mfma") + endif () if (HAVE_SSE) set (CCOMMON_OPT "${CCOMMON_OPT} -msse") endif () From b00a0de1323732a1b82c15bc4f0b0bac3e01c262 Mon Sep 17 00:00:00 2001 From: Qiyu8 Date: Mon, 16 Nov 2020 09:14:56 +0800 Subject: [PATCH 09/10] remove the -mfma flag in when the host has AVX. --- cmake/system.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/system.cmake b/cmake/system.cmake index 68df2d900..66e95c6d3 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -174,7 +174,7 @@ if (DEFINED TARGET) endif() if (DEFINED HAVE_AVX) if (NOT NO_AVX) - set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx -mfma") + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx") endif() endif() if (DEFINED HAVE_AVX2) From 60005eb47b5d30dcf35edff8c824a9f9fd9f6e6c Mon Sep 17 00:00:00 2001 From: Alexander Grund Date: Thu, 19 Nov 2020 14:39:00 +0100 Subject: [PATCH 10/10] Don't overwrite blas_thread_buffer if already set After a fork it is possible that blas_thread_buffer has already allocated memory buffers: goto_set_num_threads does allocate those already and it may be called by num_cpu_avail in case the OpenBLAS NUM_THREADS differ from the OMP num threads. This leads to a memory leak which can cause subsequent execution of BLAS kernels to fail. Fixes #2993 --- driver/others/blas_server_omp.c | 48 +++++++++++++++------------------ 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/driver/others/blas_server_omp.c b/driver/others/blas_server_omp.c index a8b3e9a4b..a576127aa 100644 --- a/driver/others/blas_server_omp.c +++ b/driver/others/blas_server_omp.c @@ -76,10 +76,28 @@ static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #endif -void goto_set_num_threads(int num_threads) { +static void adjust_thread_buffers() { int i=0, j=0; + //adjust buffer for each thread + for(i=0; i < MAX_PARALLEL_NUMBER; i++) { + for(j=0; j < blas_cpu_number; j++){ + if(blas_thread_buffer[i][j] == NULL){ + blas_thread_buffer[i][j] = blas_memory_alloc(2); + } + } + for(; j < MAX_CPU_NUMBER; j++){ + if(blas_thread_buffer[i][j] != NULL){ + blas_memory_free(blas_thread_buffer[i][j]); + blas_thread_buffer[i][j] = NULL; + } + } + } +} + +void goto_set_num_threads(int num_threads) { + if (num_threads < 1) num_threads = blas_num_threads; if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER; @@ -92,20 +110,7 @@ void goto_set_num_threads(int num_threads) { omp_set_num_threads(blas_cpu_number); - //adjust buffer for each thread - for(i=0; i