From d3bf5a5401e623e107a23fb70151c7102cbd14c7 Mon Sep 17 00:00:00 2001 From: sh-zheng <2294474733@qq.com> Date: Mon, 22 May 2023 22:39:45 +0800 Subject: [PATCH] Combine two reduction operations of zhe/symv into one, with tail undisturbed setted. --- kernel/riscv64/zhemv_LM_rvv.c | 78 +++++++++++++++++------------------ kernel/riscv64/zhemv_UV_rvv.c | 78 +++++++++++++++++------------------ kernel/riscv64/zsymv_L_rvv.c | 56 ++++++++++++------------- kernel/riscv64/zsymv_U_rvv.c | 58 +++++++++++++------------- 4 files changed, 135 insertions(+), 135 deletions(-) diff --git a/kernel/riscv64/zhemv_LM_rvv.c b/kernel/riscv64/zhemv_LM_rvv.c index e025120e5..95c6a377c 100644 --- a/kernel/riscv64/zhemv_LM_rvv.c +++ b/kernel/riscv64/zhemv_LM_rvv.c @@ -36,12 +36,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSSEV_FLOAT __riscv_vsse32_v_f32m4 #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 #define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 #define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu #else #define VSETVL(n) __riscv_vsetvl_e64m4(n) #define VSETVL_MAX __riscv_vsetvlmax_e64m1() @@ -52,12 +54,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSSEV_FLOAT __riscv_vsse64_v_f64m4 #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 #define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 #define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu #endif int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG incx, FLOAT *y, BLASLONG incy, FLOAT *buffer){ @@ -143,49 +147,45 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, B iy += inc_yv; ia += inc_av; } + + if(i < m){ + unsigned int gvl_rem = VSETVL(m-i); + va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl_rem); + va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl_rem); + vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl_rem); + vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl_rem); +#ifndef HEMVREV + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); + vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl_rem); + vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl_rem); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); +#else + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); + vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl_rem); + vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl_rem); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); +#endif + VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl_rem); + VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl_rem); + + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl_rem); + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl_rem); +#ifndef HEMVREV + vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); + vr0 = VFMACCVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); + vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); + vr1 = VFNMSACVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); +#else + vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); + vr0 = VFNMSACVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); + vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); + vr1 = VFMACCVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); +#endif + } v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); temp_r2 = VFMVFS_FLOAT(v_res); v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); temp_i2 = VFMVFS_FLOAT(v_res); - if(i < m){ - gvl = VSETVL(m-i); - va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); - va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); - vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); - vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); -#ifndef HEMVREV - vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); - vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); - vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); - vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); -#else - vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); - vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); - vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); - vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); -#endif - VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); - VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); - - vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); - vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); -#ifndef HEMVREV - vr0 = VFMULVV_FLOAT(vx0, va0, gvl); - vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); - vr1 = VFMULVV_FLOAT(vx1, va0, gvl); - vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); -#else - vr0 = VFMULVV_FLOAT(vx0, va0, gvl); - vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); - vr1 = VFMULVV_FLOAT(vx1, va0, gvl); - vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); -#endif - - v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); - temp_r2 += VFMVFS_FLOAT(v_res); - v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); - temp_i2 += VFMVFS_FLOAT(v_res); - } } y[jy] += alpha_r * temp_r2 - alpha_i * temp_i2; y[jy+1] += alpha_r * temp_i2 + alpha_i * temp_r2; diff --git a/kernel/riscv64/zhemv_UV_rvv.c b/kernel/riscv64/zhemv_UV_rvv.c index 0e1ea5436..ec06622fc 100644 --- a/kernel/riscv64/zhemv_UV_rvv.c +++ b/kernel/riscv64/zhemv_UV_rvv.c @@ -36,12 +36,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSSEV_FLOAT __riscv_vsse32_v_f32m4 #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 #define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 #define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu #else #define VSETVL(n) __riscv_vsetvl_e64m4(n) #define VSETVL_MAX __riscv_vsetvlmax_e64m1() @@ -52,12 +54,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VSSEV_FLOAT __riscv_vsse64_v_f64m4 #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 #define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 #define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu #endif int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG incx, FLOAT *y, BLASLONG incy, FLOAT *buffer){ @@ -142,49 +146,45 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, B iy += inc_yv; ia += inc_av; } + + if(i < j){ + unsigned int gvl_rem = VSETVL(j-i); + va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl_rem); + va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl_rem); + vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl_rem); + vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl_rem); +#ifndef HEMVREV + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); + vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl_rem); + vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl_rem); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); +#else + vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl_rem); + vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl_rem); + vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl_rem); + vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl_rem); +#endif + VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl_rem); + VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl_rem); + + vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl_rem); + vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl_rem); +#ifndef HEMVREV + vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); + vr0 = VFMACCVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); + vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); + vr1 = VFNMSACVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); +#else + vr0 = VFMACCVV_FLOAT_TU(vr0, vx0, va0, gvl_rem); + vr0 = VFNMSACVV_FLOAT_TU(vr0, vx1, va1, gvl_rem); + vr1 = VFMACCVV_FLOAT_TU(vr1, vx1, va0, gvl_rem); + vr1 = VFMACCVV_FLOAT_TU(vr1, vx0, va1, gvl_rem); +#endif + } v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); temp_r2 = VFMVFS_FLOAT(v_res); v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); temp_i2 = VFMVFS_FLOAT(v_res); - if(i < j){ - gvl = VSETVL(j-i); - va0 = VLSEV_FLOAT(&a_ptr[ia], stride_a, gvl); - va1 = VLSEV_FLOAT(&a_ptr[ia+1], stride_a, gvl); - vy0 = VLSEV_FLOAT(&y[iy], stride_y, gvl); - vy1 = VLSEV_FLOAT(&y[iy+1], stride_y, gvl); -#ifndef HEMVREV - vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); - vy0 = VFNMSACVF_FLOAT(vy0, temp_i1, va1, gvl); - vy1 = VFMACCVF_FLOAT(vy1, temp_r1, va1, gvl); - vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); -#else - vy0 = VFMACCVF_FLOAT(vy0, temp_r1, va0, gvl); - vy0 = VFMACCVF_FLOAT(vy0, temp_i1, va1, gvl); - vy1 = VFNMSACVF_FLOAT(vy1, temp_r1, va1, gvl); - vy1 = VFMACCVF_FLOAT(vy1, temp_i1, va0, gvl); -#endif - VSSEV_FLOAT(&y[iy], stride_y, vy0, gvl); - VSSEV_FLOAT(&y[iy+1], stride_y, vy1, gvl); - - vx0 = VLSEV_FLOAT(&x[ix], stride_x, gvl); - vx1 = VLSEV_FLOAT(&x[ix+1], stride_x, gvl); -#ifndef HEMVREV - vr0 = VFMULVV_FLOAT(vx0, va0, gvl); - vr0 = VFMACCVV_FLOAT(vr0, vx1, va1, gvl); - vr1 = VFMULVV_FLOAT(vx1, va0, gvl); - vr1 = VFNMSACVV_FLOAT(vr1, vx0, va1, gvl); -#else - vr0 = VFMULVV_FLOAT(vx0, va0, gvl); - vr0 = VFNMSACVV_FLOAT(vr0, vx1, va1, gvl); - vr1 = VFMULVV_FLOAT(vx1, va0, gvl); - vr1 = VFMACCVV_FLOAT(vr1, vx0, va1, gvl); -#endif - - v_res = VFREDSUM_FLOAT(vr0, v_z0, gvl); - temp_r2 += VFMVFS_FLOAT(v_res); - v_res = VFREDSUM_FLOAT(vr1, v_z0, gvl); - temp_i2 += VFMVFS_FLOAT(v_res); - } } y[jy] += temp_r1 * a_ptr[ja]; y[jy+1] += temp_i1 * a_ptr[ja]; diff --git a/kernel/riscv64/zsymv_L_rvv.c b/kernel/riscv64/zsymv_L_rvv.c index 3bf621094..cefdea7f6 100644 --- a/kernel/riscv64/zsymv_L_rvv.c +++ b/kernel/riscv64/zsymv_L_rvv.c @@ -38,6 +38,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 @@ -57,6 +59,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 @@ -133,38 +137,34 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, ix += inc_xv; iy += inc_yv; } + + if(i < m){ + unsigned int gvl_rem = VSETVL(m-i); + vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl_rem); + vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl_rem); + va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl_rem); + va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl_rem); + + vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl_rem); + vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl_rem); + vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl_rem); + vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl_rem); + + VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl_rem); + VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl_rem); + + vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl_rem); + vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl_rem); + vr_r = VFMACCVV_FLOAT_TU(vr_r, vx_r, va_r, gvl_rem); + vr_r = VFNMSACVV_FLOAT_TU(vr_r, vx_i, va_i, gvl_rem); + vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_r, va_i, gvl_rem); + vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_i, va_r, gvl_rem); + + } v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); temp2[0] = VFMVFS_FLOAT_M1(v_res); v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); temp2[1] = VFMVFS_FLOAT_M1(v_res); - - if(i < m){ - gvl = VSETVL(m-i); - vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl); - vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl); - va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl); - va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl); - - vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl); - vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl); - vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl); - vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl); - - VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl); - VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl); - - vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl); - vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl); - vr_r = VFMULVV_FLOAT(vx_r, va_r, gvl); - vr_r = VFNMSACVV_FLOAT(vr_r, vx_i, va_i, gvl); - vr_i = VFMULVV_FLOAT(vx_r, va_i, gvl); - vr_i = VFMACCVV_FLOAT(vr_i, vx_i, va_r, gvl); - - v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); - temp2[0] += VFMVFS_FLOAT_M1(v_res); - v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); - temp2[1] += VFMVFS_FLOAT_M1(v_res); - } } y[2 * jy] += alpha_r * temp2[0] - alpha_i * temp2[1]; y[2 * jy + 1] += alpha_r * temp2[1] + alpha_i * temp2[0]; diff --git a/kernel/riscv64/zsymv_U_rvv.c b/kernel/riscv64/zsymv_U_rvv.c index de1564f75..67b5a649c 100644 --- a/kernel/riscv64/zsymv_U_rvv.c +++ b/kernel/riscv64/zsymv_U_rvv.c @@ -38,6 +38,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f32m4_tu +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f32m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 @@ -56,6 +58,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 #define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 #define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 +#define VFMACCVV_FLOAT_TU __riscv_vfmacc_vv_f64m4_tu +#define VFNMSACVV_FLOAT_TU __riscv_vfnmsac_vv_f64m4_tu #define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 #define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 #define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 @@ -129,39 +133,35 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha_r, FLOAT alpha_i, ix += inc_xv; iy += inc_yv; } + + if(i < j){ + unsigned int gvl_rem = VSETVL(j-i); + vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl_rem); + vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl_rem); + + va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl_rem); + va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl_rem); + + vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl_rem); + vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl_rem); + vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl_rem); + vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl_rem); + + VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl_rem); + VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl_rem); + + vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl_rem); + vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl_rem); + vr_r = VFMACCVV_FLOAT_TU(vr_r, vx_r, va_r, gvl_rem); + vr_r = VFNMSACVV_FLOAT_TU(vr_r, vx_i, va_i, gvl_rem); + vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_r, va_i, gvl_rem); + vr_i = VFMACCVV_FLOAT_TU(vr_i, vx_i, va_r, gvl_rem); + + } v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); temp2[0] = VFMVFS_FLOAT_M1(v_res); v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); temp2[1] = VFMVFS_FLOAT_M1(v_res); - - if(i < j){ - gvl = VSETVL(j-i); - vy_r = VLSEV_FLOAT(&y[2 * iy], stride_y, gvl); - vy_i = VLSEV_FLOAT(&y[2 * iy + 1], stride_y, gvl); - - va_r = VLSEV_FLOAT(&a_ptr[2 * i], 2 * sizeof(FLOAT), gvl); - va_i = VLSEV_FLOAT(&a_ptr[2 * i + 1], 2 * sizeof(FLOAT), gvl); - - vy_r = VFMACCVF_FLOAT(vy_r, temp1[0], va_r, gvl); - vy_r = VFNMSACVF_FLOAT(vy_r, temp1[1], va_i, gvl); - vy_i = VFMACCVF_FLOAT(vy_i, temp1[0], va_i, gvl); - vy_i = VFMACCVF_FLOAT(vy_i, temp1[1], va_r, gvl); - - VSSEV_FLOAT(&y[2 * iy], stride_y, vy_r, gvl); - VSSEV_FLOAT(&y[2 * iy + 1], stride_y, vy_i, gvl); - - vx_r = VLSEV_FLOAT(&x[2 * ix], stride_x, gvl); - vx_i = VLSEV_FLOAT(&x[2 * ix + 1], stride_x, gvl); - vr_r = VFMULVV_FLOAT(vx_r, va_r, gvl); - vr_r = VFNMSACVV_FLOAT(vr_r, vx_i, va_i, gvl); - vr_i = VFMULVV_FLOAT(vx_r, va_i, gvl); - vr_i = VFMACCVV_FLOAT(vr_i, vx_i, va_r, gvl); - - v_res = VFREDSUM_FLOAT(vr_r, v_z0, gvl); - temp2[0] += VFMVFS_FLOAT_M1(v_res); - v_res = VFREDSUM_FLOAT(vr_i, v_z0, gvl); - temp2[1] += VFMVFS_FLOAT_M1(v_res); - } } y[2 * jy] += temp1[0] * a_ptr[j * 2] - temp1[1] * a_ptr[j * 2 + 1] + alpha_r * temp2[0] - alpha_i * temp2[1];