diff --git a/kernel/riscv64/amax_rvv.c b/kernel/riscv64/amax_rvv.c index c9c6e7f73..be0bdbea0 100644 --- a/kernel/riscv64/amax_rvv.c +++ b/kernel/riscv64/amax_rvv.c @@ -29,33 +29,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMAXVV_FLOAT vfmax_vv_f32m8 -#define VFABSV_FLOAT vfabs_v_f32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMAXVV_FLOAT vfmax_vv_f64m8 -#define VFABSV_FLOAT vfabs_v_f64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -95,7 +95,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDMAXVS_FLOAT(v_res, vmax, v_res, vlmax); + v_res = VFREDMAXVS_FLOAT(vmax, v_res, vlmax); maxf = VFMVFS_FLOAT_M1(v_res); return(maxf); diff --git a/kernel/riscv64/amin_rvv.c b/kernel/riscv64/amin_rvv.c index 370b6c338..d4926084b 100644 --- a/kernel/riscv64/amin_rvv.c +++ b/kernel/riscv64/amin_rvv.c @@ -29,33 +29,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMINVV_FLOAT vfmin_vv_f32m8 -#define VFABSV_FLOAT vfabs_v_f32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMINVV_FLOAT vfmin_vv_f64m8 -#define VFABSV_FLOAT vfabs_v_f64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -95,7 +95,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDMINVS_FLOAT(v_res, vmin, v_res, vlmax); + v_res = VFREDMINVS_FLOAT(vmin, v_res, vlmax); minf = VFMVFS_FLOAT_M1(v_res); return(minf); diff --git a/kernel/riscv64/asum_rvv.c b/kernel/riscv64/asum_rvv.c index 4f711c9be..691591e22 100644 --- a/kernel/riscv64/asum_rvv.c +++ b/kernel/riscv64/asum_rvv.c @@ -28,33 +28,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFADDVV_FLOAT vfadd_vv_f32m8 -#define VFABSV_FLOAT vfabs_v_f32m8 -#define VFREDSUMVS_FLOAT vfredusum_vs_f32m8_f32m1 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m8 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m8_f32m1 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFADDVV_FLOAT vfadd_vv_f64m8 -#define VFABSV_FLOAT vfabs_v_f64m8 -#define VFREDSUMVS_FLOAT vfredusum_vs_f64m8_f64m1 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m8 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m8_f64m1 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -93,7 +93,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDSUMVS_FLOAT(v_res, vsum, v_res, vlmax); + v_res = VFREDSUMVS_FLOAT(vsum, v_res, vlmax); asumf = VFMVFS_FLOAT_M1(v_res); return(asumf); } diff --git a/kernel/riscv64/axpby_rvv.c b/kernel/riscv64/axpby_rvv.c index 7c35c563d..a1dbdb0e4 100644 --- a/kernel/riscv64/axpby_rvv.c +++ b/kernel/riscv64/axpby_rvv.c @@ -28,25 +28,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMACCVF_FLOAT vfmacc_vf_f32m8 -#define VFMULVF_FLOAT vfmul_vf_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMACCVF_FLOAT vfmacc_vf_f64m8 -#define VFMULVF_FLOAT vfmul_vf_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 #endif int CNAME(BLASLONG n, FLOAT alpha, FLOAT *x, BLASLONG inc_x, FLOAT beta, FLOAT *y, BLASLONG inc_y) diff --git a/kernel/riscv64/axpy_rvv.c b/kernel/riscv64/axpy_rvv.c index 3986f4e21..8bc2f30de 100644 --- a/kernel/riscv64/axpy_rvv.c +++ b/kernel/riscv64/axpy_rvv.c @@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMACCVF_FLOAT vfmacc_vf_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMACCVF_FLOAT vfmacc_vf_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8 #endif int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) diff --git a/kernel/riscv64/copy_rvv.c b/kernel/riscv64/copy_rvv.c index 5d5a8bd04..041fd2dae 100644 --- a/kernel/riscv64/copy_rvv.c +++ b/kernel/riscv64/copy_rvv.c @@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 #endif int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) diff --git a/kernel/riscv64/dot_rvv.c b/kernel/riscv64/dot_rvv.c index 60dcac2f5..3276695b6 100644 --- a/kernel/riscv64/dot_rvv.c +++ b/kernel/riscv64/dot_rvv.c @@ -37,24 +37,24 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) if ( n <= 0 ) return(dot); - size_t vlmax = vsetvlmax_e64m8(); - vfloat64m8_t vr = vfmv_v_f_f64m8(0, vlmax); + size_t vlmax = __riscv_vsetvlmax_e64m8(); + vfloat64m8_t vr = __riscv_vfmv_v_f_f64m8(0, vlmax); if(inc_x == 1 && inc_y == 1) { for (size_t vl; n > 0; n -= vl, x += vl, y += vl) { - vl = vsetvl_e64m8(n); + vl = __riscv_vsetvl_e64m8(n); #if !defined(DOUBLE) - vfloat32m4_t vx = vle32_v_f32m4(x, vl); - vfloat32m4_t vy = vle32_v_f32m4(y, vl); + vfloat32m4_t vx = __riscv_vle32_v_f32m4(x, vl); + vfloat32m4_t vy = __riscv_vle32_v_f32m4(y, vl); - vr = vfwmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl); #else - vfloat64m8_t vx = vle64_v_f64m8(x, vl); - vfloat64m8_t vy = vle64_v_f64m8(y, vl); + vfloat64m8_t vx = __riscv_vle64_v_f64m8(x, vl); + vfloat64m8_t vy = __riscv_vle64_v_f64m8(y, vl); - vr = vfmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl); #endif } @@ -63,18 +63,18 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) BLASLONG stride_y = inc_y * sizeof(FLOAT); for (size_t vl; n > 0; n -= vl, x += vl, y += vl*inc_y) { - vl = vsetvl_e64m8(n); + vl = __riscv_vsetvl_e64m8(n); #if !defined(DOUBLE) - vfloat32m4_t vx = vle32_v_f32m4(x, vl); - vfloat32m4_t vy = vlse32_v_f32m4(y, stride_y, vl); + vfloat32m4_t vx = __riscv_vle32_v_f32m4(x, vl); + vfloat32m4_t vy = __riscv_vlse32_v_f32m4(y, stride_y, vl); - vr = vfwmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl); #else - vfloat64m8_t vx = vle64_v_f64m8(x, vl); - vfloat64m8_t vy = vlse64_v_f64m8(y, stride_y, vl); + vfloat64m8_t vx = __riscv_vle64_v_f64m8(x, vl); + vfloat64m8_t vy = __riscv_vlse64_v_f64m8(y, stride_y, vl); - vr = vfmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl); #endif } } else if (1 == inc_y) { @@ -82,18 +82,18 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) BLASLONG stride_x = inc_x * sizeof(FLOAT); for (size_t vl; n > 0; n -= vl, x += vl*inc_x, y += vl) { - vl = vsetvl_e64m8(n); + vl = __riscv_vsetvl_e64m8(n); #if !defined(DOUBLE) - vfloat32m4_t vx = vlse32_v_f32m4(x, stride_x, vl); - vfloat32m4_t vy = vle32_v_f32m4(y, vl); + vfloat32m4_t vx = __riscv_vlse32_v_f32m4(x, stride_x, vl); + vfloat32m4_t vy = __riscv_vle32_v_f32m4(y, vl); - vr = vfwmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl); #else - vfloat64m8_t vx = vlse64_v_f64m8(x, stride_x, vl); - vfloat64m8_t vy = vle64_v_f64m8(y, vl); + vfloat64m8_t vx = __riscv_vlse64_v_f64m8(x, stride_x, vl); + vfloat64m8_t vy = __riscv_vle64_v_f64m8(y, vl); - vr = vfmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl); #endif } } else { @@ -102,25 +102,25 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) BLASLONG stride_y = inc_y * sizeof(FLOAT); for (size_t vl; n > 0; n -= vl, x += vl*inc_x, y += vl*inc_y) { - vl = vsetvl_e64m8(n); + vl = __riscv_vsetvl_e64m8(n); #if !defined(DOUBLE) - vfloat32m4_t vx = vlse32_v_f32m4(x, stride_x, vl); - vfloat32m4_t vy = vlse32_v_f32m4(y, stride_y, vl); + vfloat32m4_t vx = __riscv_vlse32_v_f32m4(x, stride_x, vl); + vfloat32m4_t vy = __riscv_vlse32_v_f32m4(y, stride_y, vl); - vr = vfwmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl); #else - vfloat64m8_t vx = vlse64_v_f64m8(x, stride_x, vl); - vfloat64m8_t vy = vlse64_v_f64m8(y, stride_y, vl); + vfloat64m8_t vx = __riscv_vlse64_v_f64m8(x, stride_x, vl); + vfloat64m8_t vy = __riscv_vlse64_v_f64m8(y, stride_y, vl); - vr = vfmacc_vv_f64m8(vr, vx, vy, vl); + vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl); #endif } } - vfloat64m1_t vec_zero = vfmv_v_f_f64m1(0, vlmax); - vfloat64m1_t vec_sum = vfredusum_vs_f64m8_f64m1(vec_zero, vr, vec_zero, vlmax); - dot = vfmv_f_s_f64m1_f64(vec_sum); + vfloat64m1_t vec_zero = __riscv_vfmv_v_f_f64m1(0, vlmax); + vfloat64m1_t vec_sum = __riscv_vfredusum_vs_f64m8_f64m1(vr, vec_zero, vlmax); + dot = __riscv_vfmv_f_s_f64m1_f64(vec_sum); return(dot); } diff --git a/kernel/riscv64/gemm_beta_rvv.c b/kernel/riscv64/gemm_beta_rvv.c index 34d1ea078..f3cf6491d 100644 --- a/kernel/riscv64/gemm_beta_rvv.c +++ b/kernel/riscv64/gemm_beta_rvv.c @@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMULVF_FLOAT vfmul_vf_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMULVF_FLOAT vfmul_vf_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 #endif // Optimizes the implementation in ../generic/gemm_beta.c diff --git a/kernel/riscv64/gemm_ncopy_8_rvv.c b/kernel/riscv64/gemm_ncopy_8_rvv.c index 525b223c2..3030d67fb 100644 --- a/kernel/riscv64/gemm_ncopy_8_rvv.c +++ b/kernel/riscv64/gemm_ncopy_8_rvv.c @@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m1(n) -#define FLOAT_V_T vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m1 -#define VSEV_FLOAT vse32_v_f32m1 -#define VSSEG2_FLOAT vsseg2e32_v_f32m1 -#define VSSEG4_FLOAT vsseg4e32_v_f32m1 -#define VSSEG8_FLOAT vsseg8e32_v_f32m1 +#define VSETVL(n) __riscv_vsetvl_e32m1(n) +#define FLOAT_V_T vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m1 +#define VSEV_FLOAT __riscv_vse32_v_f32m1 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1 +#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1 +#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1 #else -#define VSETVL(n) vsetvl_e64m1(n) -#define FLOAT_V_T vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m1 -#define VSEV_FLOAT vse64_v_f64m1 -#define VSSEG2_FLOAT vsseg2e64_v_f64m1 -#define VSSEG4_FLOAT vsseg4e64_v_f64m1 -#define VSSEG8_FLOAT vsseg8e64_v_f64m1 +#define VSETVL(n) __riscv_vsetvl_e64m1(n) +#define FLOAT_V_T vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m1 +#define VSEV_FLOAT __riscv_vse64_v_f64m1 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1 +#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1 +#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1 #endif // Optimizes the implementation in ../generic/gemm_ncopy_8.c diff --git a/kernel/riscv64/gemm_ncopy_rvv_v1.c b/kernel/riscv64/gemm_ncopy_rvv_v1.c index 2c5230752..2d6db15e5 100644 --- a/kernel/riscv64/gemm_ncopy_rvv_v1.c +++ b/kernel/riscv64/gemm_ncopy_rvv_v1.c @@ -28,17 +28,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b) diff --git a/kernel/riscv64/gemm_tcopy_8_rvv.c b/kernel/riscv64/gemm_tcopy_8_rvv.c index 81c1f962b..080a87312 100644 --- a/kernel/riscv64/gemm_tcopy_8_rvv.c +++ b/kernel/riscv64/gemm_tcopy_8_rvv.c @@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m1(n) -#define FLOAT_V_T vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m1 -#define VLSEV_FLOAT vlse32_v_f32m1 -#define VSEV_FLOAT vse32_v_f32m1 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m1 -#define VSSEG2_FLOAT vsseg2e32_v_f32m1 -#define VLSSEG4_FLOAT vlsseg4e32_v_f32m1 -#define VSSEG4_FLOAT vsseg4e32_v_f32m1 -#define VLSSEG8_FLOAT vlsseg8e32_v_f32m1 -#define VSSEG8_FLOAT vsseg8e32_v_f32m1 +#define VSETVL(n) __riscv_vsetvl_e32m1(n) +#define FLOAT_V_T vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m1 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m1 +#define VSEV_FLOAT __riscv_vse32_v_f32m1 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m1 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1 +#define VLSSEG4_FLOAT __riscv_vlsseg4e32_v_f32m1 +#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1 +#define VLSSEG8_FLOAT __riscv_vlsseg8e32_v_f32m1 +#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1 #else -#define VSETVL(n) vsetvl_e64m1(n) -#define FLOAT_V_T vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m1 -#define VLSEV_FLOAT vlse64_v_f64m1 -#define VSEV_FLOAT vse64_v_f64m1 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m1 -#define VSSEG2_FLOAT vsseg2e64_v_f64m1 -#define VLSSEG4_FLOAT vlsseg4e64_v_f64m1 -#define VSSEG4_FLOAT vsseg4e64_v_f64m1 -#define VLSSEG8_FLOAT vlsseg8e64_v_f64m1 -#define VSSEG8_FLOAT vsseg8e64_v_f64m1 +#define VSETVL(n) __riscv_vsetvl_e64m1(n) +#define FLOAT_V_T vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m1 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m1 +#define VSEV_FLOAT __riscv_vse64_v_f64m1 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m1 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1 +#define VLSSEG4_FLOAT __riscv_vlsseg4e64_v_f64m1 +#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1 +#define VLSSEG8_FLOAT __riscv_vlsseg8e64_v_f64m1 +#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1 #endif int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) diff --git a/kernel/riscv64/gemm_tcopy_rvv_v1.c b/kernel/riscv64/gemm_tcopy_rvv_v1.c index a291b70b8..c5fb6479f 100644 --- a/kernel/riscv64/gemm_tcopy_rvv_v1.c +++ b/kernel/riscv64/gemm_tcopy_rvv_v1.c @@ -28,15 +28,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) diff --git a/kernel/riscv64/gemmkernel_rvv_v1x8.c b/kernel/riscv64/gemmkernel_rvv_v1x8.c index 5cd509f93..471b3158f 100644 --- a/kernel/riscv64/gemmkernel_rvv_v1x8.c +++ b/kernel/riscv64/gemmkernel_rvv_v1x8.c @@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VFMVVF_FLOAT vfmv_v_f_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 #endif int CNAME(BLASLONG bm, BLASLONG bn, BLASLONG bk, FLOAT alpha, IFLOAT* ba, IFLOAT* bb, FLOAT* C, BLASLONG ldc diff --git a/kernel/riscv64/gemv_n_rvv.c b/kernel/riscv64/gemv_n_rvv.c index 9d2dee615..1366eb5ad 100644 --- a/kernel/riscv64/gemv_n_rvv.c +++ b/kernel/riscv64/gemv_n_rvv.c @@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMACCVF_FLOAT vfmacc_vf_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMACCVF_FLOAT vfmacc_vf_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8 #endif int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) diff --git a/kernel/riscv64/gemv_t_rvv.c b/kernel/riscv64/gemv_t_rvv.c index a80af81b6..f0c834866 100644 --- a/kernel/riscv64/gemv_t_rvv.c +++ b/kernel/riscv64/gemv_t_rvv.c @@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1 -#define VFMACCVV_FLOAT vfmacc_vv_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1 -#define VFMACCVV_FLOAT vfmacc_vv_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) @@ -63,7 +63,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO FLOAT_V_T va, vx, vr; FLOAT_V_T_M1 v_res, v_z0; size_t vlmax = VSETVL_MAX_M1; - v_res = VFMVVF_FLOAT_M1(0, vlmax); v_z0 = VFMVVF_FLOAT_M1(0, vlmax); vlmax = VSETVL_MAX; @@ -83,7 +82,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO vr = VFMACCVV_FLOAT(vr, va, vx, vl); } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); *y += alpha * VFMVFS_FLOAT_M1(v_res); y += inc_y; a += lda; @@ -107,7 +106,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO vr = VFMACCVV_FLOAT(vr, va, vx, vl); } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); *y += alpha * VFMVFS_FLOAT_M1(v_res); y += inc_y; a += lda; diff --git a/kernel/riscv64/iamax_rvv.c b/kernel/riscv64/iamax_rvv.c index 8b33b3bcb..ef7850a55 100644 --- a/kernel/riscv64/iamax_rvv.c +++ b/kernel/riscv64/iamax_rvv.c @@ -28,57 +28,57 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if defined(DOUBLE) -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1 -#define MASK_T vbool8_t -#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8 -#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8 -#define VMFGEVF_FLOAT vmfge_vf_f64m8_b8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFABSV_FLOAT vfabs_v_f64m8 -#define VFMAXVV_FLOAT vfmax_vv_f64m8 -#define VFIRSTM vfirst_m_b8 -#define UINT_V_T vuint64m8_t -#define VIDV_MASK_UINT vid_v_u64m8_m -#define VIDV_UINT vid_v_u64m8 -#define VADDVX_MASK_UINT vadd_vx_u64m8_m -#define VADDVX_UINT vadd_vx_u64m8 -#define VMVVX_UINT vmv_v_x_u64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VSLIDEDOWN_UINT vslidedown_vx_u64m8 -#define VMVVXS_UINT vmv_x_s_u64m8_u64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1 +#define MASK_T vbool8_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8 +#define VMFGEVF_FLOAT __riscv_vmfge_vf_f64m8_b8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m8 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8 +#define VFIRSTM __riscv_vfirst_m_b8 +#define UINT_V_T vuint64m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m +#define VIDV_UINT __riscv_vid_v_u64m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m +#define VADDVX_UINT __riscv_vadd_vx_u64m8 +#define VMVVX_UINT __riscv_vmv_v_x_u64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64 #else -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1 -#define MASK_T vbool4_t -#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4 -#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4 -#define VMFGEVF_FLOAT vmfge_vf_f32m8_b4 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFABSV_FLOAT vfabs_v_f32m8 -#define VFMAXVV_FLOAT vfmax_vv_f32m8 -#define VFIRSTM vfirst_m_b4 -#define UINT_V_T vuint32m8_t -#define VIDV_MASK_UINT vid_v_u32m8_m -#define VIDV_UINT vid_v_u32m8 -#define VADDVX_MASK_UINT vadd_vx_u32m8_m -#define VADDVX_UINT vadd_vx_u32m8 -#define VMVVX_UINT vmv_v_x_u32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VSLIDEDOWN_UINT vslidedown_vx_u32m8 -#define VMVVXS_UINT vmv_x_s_u32m8_u32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1 +#define MASK_T vbool4_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4 +#define VMFGEVF_FLOAT __riscv_vmfge_vf_f32m8_b4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m8 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8 +#define VFIRSTM __riscv_vfirst_m_b4 +#define UINT_V_T vuint32m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m +#define VIDV_UINT __riscv_vid_v_u32m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m +#define VADDVX_UINT __riscv_vadd_vx_u32m8 +#define VMVVX_UINT __riscv_vmv_v_x_u32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32 #endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -106,8 +106,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) //index where element greater than v_max mask = VMFLTVV_FLOAT(v_max, vx, vl); - v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl); - v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl); + v_max_index = VIDV_MASK_UINT(mask, vl); + v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl); //update v_max v_max = VFMAXVV_FLOAT(v_max, vx, vl); @@ -125,8 +125,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) //index where element greater than v_max mask = VMFLTVV_FLOAT(v_max, vx, vl); - v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl); - v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl); + v_max_index = VIDV_MASK_UINT(mask, vl); + v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl); //update v_max v_max = VFMAXVV_FLOAT(v_max, vx, vl); @@ -134,16 +134,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - FLOAT_V_T_M1 v_res, v_z0; + FLOAT_V_T_M1 v_res; v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_z0 = VFMVVF_FLOAT_M1(0, vlmax); - v_res = VFREDMAXVS_FLOAT(v_res, v_max, v_z0, vlmax); + v_res = VFREDMAXVS_FLOAT(v_max, v_res, vlmax); maxf = VFMVFS_FLOAT_M1(v_res); mask = VMFGEVF_FLOAT(v_max, maxf, vlmax); max_index = VFIRSTM(mask, vlmax); - v_max_index = VSLIDEDOWN_UINT(v_max_index, v_max_index, max_index, vlmax); + v_max_index = VSLIDEDOWN_UINT(v_max_index, max_index, vlmax); max_index = VMVVXS_UINT(v_max_index); return(max_index+1); diff --git a/kernel/riscv64/iamin_rvv.c b/kernel/riscv64/iamin_rvv.c index 585b37186..56a086fed 100644 --- a/kernel/riscv64/iamin_rvv.c +++ b/kernel/riscv64/iamin_rvv.c @@ -29,57 +29,57 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if defined(DOUBLE) -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1 -#define MASK_T vbool8_t -#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8 -#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8 -#define VMFLEVF_FLOAT vmfle_vf_f64m8_b8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFABSV_FLOAT vfabs_v_f64m8 -#define VFMINVV_FLOAT vfmin_vv_f64m8 -#define VFIRSTM vfirst_m_b8 -#define UINT_V_T vuint64m8_t -#define VIDV_MASK_UINT vid_v_u64m8_m -#define VIDV_UINT vid_v_u64m8 -#define VADDVX_MASK_UINT vadd_vx_u64m8_m -#define VADDVX_UINT vadd_vx_u64m8 -#define VMVVX_UINT vmv_v_x_u64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VSLIDEDOWN_UINT vslidedown_vx_u64m8 -#define VMVVXS_UINT vmv_x_s_u64m8_u64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1 +#define MASK_T vbool8_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8 +#define VMFLEVF_FLOAT __riscv_vmfle_vf_f64m8_b8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m8 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8 +#define VFIRSTM __riscv_vfirst_m_b8 +#define UINT_V_T vuint64m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m +#define VIDV_UINT __riscv_vid_v_u64m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m +#define VADDVX_UINT __riscv_vadd_vx_u64m8 +#define VMVVX_UINT __riscv_vmv_v_x_u64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64 #else -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1 -#define MASK_T vbool4_t -#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4 -#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4 -#define VMFLEVF_FLOAT vmfle_vf_f32m8_b4 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFABSV_FLOAT vfabs_v_f32m8 -#define VFMINVV_FLOAT vfmin_vv_f32m8 -#define VFIRSTM vfirst_m_b4 -#define UINT_V_T vuint32m8_t -#define VIDV_MASK_UINT vid_v_u32m8_m -#define VIDV_UINT vid_v_u32m8 -#define VADDVX_MASK_UINT vadd_vx_u32m8_m -#define VADDVX_UINT vadd_vx_u32m8 -#define VMVVX_UINT vmv_v_x_u32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VSLIDEDOWN_UINT vslidedown_vx_u32m8 -#define VMVVXS_UINT vmv_x_s_u32m8_u32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1 +#define MASK_T vbool4_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4 +#define VMFLEVF_FLOAT __riscv_vmfle_vf_f32m8_b4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m8 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8 +#define VFIRSTM __riscv_vfirst_m_b4 +#define UINT_V_T vuint32m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m +#define VIDV_UINT __riscv_vid_v_u32m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m +#define VADDVX_UINT __riscv_vadd_vx_u32m8 +#define VMVVX_UINT __riscv_vmv_v_x_u32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32 #endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -107,8 +107,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) // index where element less than v_min mask = VMFLTVV_FLOAT(vx, v_min, vl); - v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl); - v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl); + v_min_index = VIDV_MASK_UINT(mask, vl); + v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl); //update v_min and start_index j v_min = VFMINVV_FLOAT(v_min, vx, vl); @@ -126,8 +126,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) // index where element less than v_min mask = VMFLTVV_FLOAT(vx, v_min, vl); - v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl); - v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl); + v_min_index = VIDV_MASK_UINT(mask, vl); + v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl); //update v_min and start_index j v_min = VFMINVV_FLOAT(v_min, vx, vl); @@ -135,16 +135,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - FLOAT_V_T_M1 v_res, v_max; - v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_max = VFMVVF_FLOAT_M1(FLT_MAX, vlmax); + FLOAT_V_T_M1 v_res; + v_res = VFMVVF_FLOAT_M1(FLT_MAX, vlmax); - v_res = VFREDMINVS_FLOAT(v_res, v_min, v_max, vlmax); + v_res = VFREDMINVS_FLOAT(v_min, v_res, vlmax); minf = VFMVFS_FLOAT_M1(v_res); mask = VMFLEVF_FLOAT(v_min, minf, vlmax); min_index = VFIRSTM(mask, vlmax); - v_min_index = VSLIDEDOWN_UINT(v_min_index, v_min_index, min_index, vlmax); + v_min_index = VSLIDEDOWN_UINT(v_min_index, min_index, vlmax); min_index = VMVVXS_UINT(v_min_index); return(min_index+1); diff --git a/kernel/riscv64/imax_rvv.c b/kernel/riscv64/imax_rvv.c index d84ad968e..5b60a56f7 100644 --- a/kernel/riscv64/imax_rvv.c +++ b/kernel/riscv64/imax_rvv.c @@ -29,55 +29,55 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if defined(DOUBLE) -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1 -#define MASK_T vbool8_t -#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8 -#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8 -#define VMFGEVF_FLOAT vmfge_vf_f64m8_b8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMAXVV_FLOAT vfmax_vv_f64m8 -#define VFIRSTM vfirst_m_b8 -#define UINT_V_T vuint64m8_t -#define VIDV_MASK_UINT vid_v_u64m8_m -#define VIDV_UINT vid_v_u64m8 -#define VADDVX_MASK_UINT vadd_vx_u64m8_m -#define VADDVX_UINT vadd_vx_u64m8 -#define VMVVX_UINT vmv_v_x_u64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VSLIDEDOWN_UINT vslidedown_vx_u64m8 -#define VMVVXS_UINT vmv_x_s_u64m8_u64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1 +#define MASK_T vbool8_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8 +#define VMFGEVF_FLOAT __riscv_vmfge_vf_f64m8_b8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8 +#define VFIRSTM __riscv_vfirst_m_b8 +#define UINT_V_T vuint64m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m +#define VIDV_UINT __riscv_vid_v_u64m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m +#define VADDVX_UINT __riscv_vadd_vx_u64m8 +#define VMVVX_UINT __riscv_vmv_v_x_u64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64 #else -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1 -#define MASK_T vbool4_t -#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4 -#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4 -#define VMFGEVF_FLOAT vmfge_vf_f32m8_b4 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMAXVV_FLOAT vfmax_vv_f32m8 -#define VFIRSTM vfirst_m_b4 -#define UINT_V_T vuint32m8_t -#define VIDV_MASK_UINT vid_v_u32m8_m -#define VIDV_UINT vid_v_u32m8 -#define VADDVX_MASK_UINT vadd_vx_u32m8_m -#define VADDVX_UINT vadd_vx_u32m8 -#define VMVVX_UINT vmv_v_x_u32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VSLIDEDOWN_UINT vslidedown_vx_u32m8 -#define VMVVXS_UINT vmv_x_s_u32m8_u32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1 +#define MASK_T vbool4_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4 +#define VMFGEVF_FLOAT __riscv_vmfge_vf_f32m8_b4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8 +#define VFIRSTM __riscv_vfirst_m_b4 +#define UINT_V_T vuint32m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m +#define VIDV_UINT __riscv_vid_v_u32m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m +#define VADDVX_UINT __riscv_vadd_vx_u32m8 +#define VMVVX_UINT __riscv_vmv_v_x_u32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32 #endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -104,8 +104,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) //index where element greater than v_max mask = VMFLTVV_FLOAT(v_max, vx, vl); - v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl); - v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl); + v_max_index = VIDV_MASK_UINT(mask, vl); + v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl); //update v_max and start_index j v_max = VFMAXVV_FLOAT(v_max, vx, vl); @@ -122,8 +122,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) //index where element greater than v_max mask = VMFLTVV_FLOAT(v_max, vx, vl); - v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl); - v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl); + v_max_index = VIDV_MASK_UINT(mask, vl); + v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl); //update v_max and start_index j v_max = VFMAXVV_FLOAT(v_max, vx, vl); @@ -131,16 +131,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - FLOAT_V_T_M1 v_res, v_min; - v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_min = VFMVVF_FLOAT_M1(-FLT_MAX, vlmax); + FLOAT_V_T_M1 v_res; + v_res = VFMVVF_FLOAT_M1(-FLT_MAX, vlmax); - v_res = VFREDMAXVS_FLOAT(v_res, v_max, v_min, vlmax); + v_res = VFREDMAXVS_FLOAT(v_max, v_res, vlmax); maxf = VFMVFS_FLOAT_M1(v_res); mask = VMFGEVF_FLOAT(v_max, maxf, vlmax); max_index = VFIRSTM(mask, vlmax); - v_max_index = VSLIDEDOWN_UINT(v_max_index, v_max_index, max_index, vlmax); + v_max_index = VSLIDEDOWN_UINT(v_max_index, max_index, vlmax); max_index = VMVVXS_UINT(v_max_index); return(max_index+1); diff --git a/kernel/riscv64/imin_rvv.c b/kernel/riscv64/imin_rvv.c index fb734f6f8..b49544a1b 100644 --- a/kernel/riscv64/imin_rvv.c +++ b/kernel/riscv64/imin_rvv.c @@ -29,55 +29,55 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if defined(DOUBLE) -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1 -#define MASK_T vbool8_t -#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8 -#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8 -#define VMFLEVF_FLOAT vmfle_vf_f64m8_b8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMINVV_FLOAT vfmin_vv_f64m8 -#define VFIRSTM vfirst_m_b8 -#define UINT_V_T vuint64m8_t -#define VIDV_MASK_UINT vid_v_u64m8_m -#define VIDV_UINT vid_v_u64m8 -#define VADDVX_MASK_UINT vadd_vx_u64m8_m -#define VADDVX_UINT vadd_vx_u64m8 -#define VMVVX_UINT vmv_v_x_u64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VSLIDEDOWN_UINT vslidedown_vx_u64m8 -#define VMVVXS_UINT vmv_x_s_u64m8_u64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1 +#define MASK_T vbool8_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8 +#define VMFLEVF_FLOAT __riscv_vmfle_vf_f64m8_b8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8 +#define VFIRSTM __riscv_vfirst_m_b8 +#define UINT_V_T vuint64m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m +#define VIDV_UINT __riscv_vid_v_u64m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m +#define VADDVX_UINT __riscv_vadd_vx_u64m8 +#define VMVVX_UINT __riscv_vmv_v_x_u64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64 #else -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1 -#define MASK_T vbool4_t -#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4 -#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4 -#define VMFLEVF_FLOAT vmfle_vf_f32m8_b4 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMINVV_FLOAT vfmin_vv_f32m8 -#define VFIRSTM vfirst_m_b4 -#define UINT_V_T vuint32m8_t -#define VIDV_MASK_UINT vid_v_u32m8_m -#define VIDV_UINT vid_v_u32m8 -#define VADDVX_MASK_UINT vadd_vx_u32m8_m -#define VADDVX_UINT vadd_vx_u32m8 -#define VMVVX_UINT vmv_v_x_u32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VSLIDEDOWN_UINT vslidedown_vx_u32m8 -#define VMVVXS_UINT vmv_x_s_u32m8_u32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1 +#define MASK_T vbool4_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4 +#define VMFLEVF_FLOAT __riscv_vmfle_vf_f32m8_b4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8 +#define VFIRSTM __riscv_vfirst_m_b4 +#define UINT_V_T vuint32m8_t +#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m +#define VIDV_UINT __riscv_vid_v_u32m8 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m +#define VADDVX_UINT __riscv_vadd_vx_u32m8 +#define VMVVX_UINT __riscv_vmv_v_x_u32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8 +#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32 #endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -104,8 +104,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) // index where element less than v_min mask = VMFLTVV_FLOAT(vx, v_min, vl); - v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl); - v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl); + v_min_index = VIDV_MASK_UINT(mask, vl); + v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl); //update v_min and start_index j v_min = VFMINVV_FLOAT(v_min, vx, vl); @@ -122,8 +122,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) // index where element less than v_min mask = VMFLTVV_FLOAT(vx, v_min, vl); - v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl); - v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl); + v_min_index = VIDV_MASK_UINT(mask, vl); + v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl); //update v_min and start_index j v_min = VFMINVV_FLOAT(v_min, vx, vl); @@ -131,16 +131,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - FLOAT_V_T_M1 v_res, v_max; - v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_max = VFMVVF_FLOAT_M1(FLT_MAX, vlmax); + FLOAT_V_T_M1 v_res; + v_res = VFMVVF_FLOAT_M1(FLT_MAX, vlmax); - v_res = VFREDMINVS_FLOAT(v_res, v_min, v_max, vlmax); + v_res = VFREDMINVS_FLOAT(v_min, v_res, vlmax); minf = VFMVFS_FLOAT_M1(v_res); mask = VMFLEVF_FLOAT(v_min, minf, vlmax); min_index = VFIRSTM(mask, vlmax); - v_min_index = VSLIDEDOWN_UINT(v_min_index, v_min_index, min_index, vlmax); + v_min_index = VSLIDEDOWN_UINT(v_min_index, min_index, vlmax); min_index = VMVVXS_UINT(v_min_index); return(min_index+1); diff --git a/kernel/riscv64/izamax_rvv.c b/kernel/riscv64/izamax_rvv.c index 9cb332cbb..e61d0cbec 100644 --- a/kernel/riscv64/izamax_rvv.c +++ b/kernel/riscv64/izamax_rvv.c @@ -28,63 +28,63 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if defined(DOUBLE) -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m4 -#define VLSEV_FLOAT vlse64_v_f64m4 -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDMAXVS_FLOAT vfredmax_vs_f64m4_f64m1 -#define MASK_T vbool16_t -#define VMFLTVF_FLOAT vmflt_vf_f64m4_b16 -#define VMFLTVV_FLOAT vmflt_vv_f64m4_b16 -#define VMFGEVF_FLOAT vmfge_vf_f64m4_b16 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFABSV_FLOAT vfabs_v_f64m4 -#define VFMAXVV_FLOAT vfmax_vv_f64m4 -#define VFADDVV_FLOAT vfadd_vv_f64m4 -#define VFIRSTM vfirst_m_b16 -#define UINT_V_T vuint64m4_t -#define VIDV_MASK_UINT vid_v_u64m4_m -#define VIDV_UINT vid_v_u64m4 -#define VADDVX_MASK_UINT vadd_vx_u64m4_m -#define VADDVX_UINT vadd_vx_u64m4 -#define VMVVX_UINT vmv_v_x_u64m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VSLIDEDOWN_UINT vslidedown_vx_u64m4 -#define VMVVXS_UINT vmv_x_s_u64m4_u64 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m4 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m4 +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m4_f64m1 +#define MASK_T vbool16_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m4_b16 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m4_b16 +#define VMFGEVF_FLOAT __riscv_vmfge_vf_f64m4_b16 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m4 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4 +#define VFIRSTM __riscv_vfirst_m_b16 +#define UINT_V_T vuint64m4_t +#define VIDV_MASK_UINT __riscv_vid_v_u64m4_m +#define VIDV_UINT __riscv_vid_v_u64m4 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m4_m +#define VADDVX_UINT __riscv_vadd_vx_u64m4 +#define VMVVX_UINT __riscv_vmv_v_x_u64m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m4 +#define VMVVXS_UINT __riscv_vmv_x_s_u64m4_u64 #else -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m4 -#define VLSEV_FLOAT vlse32_v_f32m4 -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDMAXVS_FLOAT vfredmax_vs_f32m4_f32m1 -#define MASK_T vbool8_t -#define VMFLTVF_FLOAT vmflt_vf_f32m4_b8 -#define VMFLTVV_FLOAT vmflt_vv_f32m4_b8 -#define VMFGEVF_FLOAT vmfge_vf_f32m4_b8 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFABSV_FLOAT vfabs_v_f32m4 -#define VFMAXVV_FLOAT vfmax_vv_f32m4 -#define VFADDVV_FLOAT vfadd_vv_f32m4 -#define VFIRSTM vfirst_m_b8 -#define UINT_V_T vuint32m4_t -#define VIDV_MASK_UINT vid_v_u32m4_m -#define VIDV_UINT vid_v_u32m4 -#define VADDVX_MASK_UINT vadd_vx_u32m4_m -#define VADDVX_UINT vadd_vx_u32m4 -#define VMVVX_UINT vmv_v_x_u32m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VSLIDEDOWN_UINT vslidedown_vx_u32m4 -#define VMVVXS_UINT vmv_x_s_u32m4_u32 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m4 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m4 +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m4_f32m1 +#define MASK_T vbool8_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m4_b8 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m4_b8 +#define VMFGEVF_FLOAT __riscv_vmfge_vf_f32m4_b8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m4 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4 +#define VFIRSTM __riscv_vfirst_m_b8 +#define UINT_V_T vuint32m4_t +#define VIDV_MASK_UINT __riscv_vid_v_u32m4_m +#define VIDV_UINT __riscv_vid_v_u32m4 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m4_m +#define VADDVX_UINT __riscv_vadd_vx_u32m4 +#define VMVVX_UINT __riscv_vmv_v_x_u32m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m4 +#define VMVVXS_UINT __riscv_vmv_x_s_u32m4_u32 #endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -116,8 +116,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) //index where element greater than v_max mask = VMFLTVV_FLOAT(v_max, vx0, vl); - v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl); - v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl); + v_max_index = VIDV_MASK_UINT(mask, vl); + v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl); //update v_max and start_index j v_max = VFMAXVV_FLOAT(v_max, vx0, vl); @@ -138,24 +138,23 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) //index where element greater than v_max mask = VMFLTVV_FLOAT(v_max, vx0, vl); - v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl); - v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl); + v_max_index = VIDV_MASK_UINT(mask, vl); + v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl); //update v_max and start_index j v_max = VFMAXVV_FLOAT(v_max, vx0, vl); } } - FLOAT_V_T_M1 v_res, v_z0; + FLOAT_V_T_M1 v_res; v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_z0 = VFMVVF_FLOAT_M1(0, vlmax); - v_res = VFREDMAXVS_FLOAT(v_res, v_max, v_z0, vlmax); + v_res = VFREDMAXVS_FLOAT(v_max, v_res, vlmax); maxf = VFMVFS_FLOAT_M1(v_res); mask = VMFGEVF_FLOAT(v_max, maxf, vlmax); max_index = VFIRSTM(mask, vlmax); - v_max_index = VSLIDEDOWN_UINT(v_max_index, v_max_index, max_index, vlmax); + v_max_index = VSLIDEDOWN_UINT(v_max_index, max_index, vlmax); max_index = VMVVXS_UINT(v_max_index); return(max_index+1); diff --git a/kernel/riscv64/izamin_rvv.c b/kernel/riscv64/izamin_rvv.c index 69771e5aa..297b3c99a 100644 --- a/kernel/riscv64/izamin_rvv.c +++ b/kernel/riscv64/izamin_rvv.c @@ -29,59 +29,59 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if defined(DOUBLE) -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDMINVS_FLOAT vfredmin_vs_f64m4_f64m1 -#define MASK_T vbool16_t -#define VMFLTVF_FLOAT vmflt_vf_f64m4_b16 -#define VMFLTVV_FLOAT vmflt_vv_f64m4_b16 -#define VMFLEVF_FLOAT vmfle_vf_f64m4_b16 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFABSV_FLOAT vfabs_v_f64m4 -#define VFMINVV_FLOAT vfmin_vv_f64m4 -#define VFADDVV_FLOAT vfadd_vv_f64m4 -#define VFIRSTM vfirst_m_b16 -#define UINT_V_T vuint64m4_t -#define VIDV_MASK_UINT vid_v_u64m4_m -#define VIDV_UINT vid_v_u64m4 -#define VADDVX_MASK_UINT vadd_vx_u64m4_m -#define VADDVX_UINT vadd_vx_u64m4 -#define VMVVX_UINT vmv_v_x_u64m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VSLIDEDOWN_UINT vslidedown_vx_u64m4 -#define VMVVXS_UINT vmv_x_s_u64m4_u64 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m4_f64m1 +#define MASK_T vbool16_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m4_b16 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m4_b16 +#define VMFLEVF_FLOAT __riscv_vmfle_vf_f64m4_b16 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m4 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4 +#define VFIRSTM __riscv_vfirst_m_b16 +#define UINT_V_T vuint64m4_t +#define VIDV_MASK_UINT __riscv_vid_v_u64m4_m +#define VIDV_UINT __riscv_vid_v_u64m4 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m4_m +#define VADDVX_UINT __riscv_vadd_vx_u64m4 +#define VMVVX_UINT __riscv_vmv_v_x_u64m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m4 +#define VMVVXS_UINT __riscv_vmv_x_s_u64m4_u64 #else -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDMINVS_FLOAT vfredmin_vs_f32m4_f32m1 -#define MASK_T vbool8_t -#define VMFLTVF_FLOAT vmflt_vf_f32m4_b8 -#define VMFLTVV_FLOAT vmflt_vv_f32m4_b8 -#define VMFLEVF_FLOAT vmfle_vf_f32m4_b8 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFABSV_FLOAT vfabs_v_f32m4 -#define VFMINVV_FLOAT vfmin_vv_f32m4 -#define VFADDVV_FLOAT vfadd_vv_f32m4 -#define VFIRSTM vfirst_m_b8 -#define UINT_V_T vuint32m4_t -#define VIDV_MASK_UINT vid_v_u32m4_m -#define VIDV_UINT vid_v_u32m4 -#define VADDVX_MASK_UINT vadd_vx_u32m4_m -#define VADDVX_UINT vadd_vx_u32m4 -#define VMVVX_UINT vmv_v_x_u32m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VSLIDEDOWN_UINT vslidedown_vx_u32m4 -#define VMVVXS_UINT vmv_x_s_u32m4_u32 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m4_f32m1 +#define MASK_T vbool8_t +#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m4_b8 +#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m4_b8 +#define VMFLEVF_FLOAT __riscv_vmfle_vf_f32m4_b8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m4 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4 +#define VFIRSTM __riscv_vfirst_m_b8 +#define UINT_V_T vuint32m4_t +#define VIDV_MASK_UINT __riscv_vid_v_u32m4_m +#define VIDV_UINT __riscv_vid_v_u32m4 +#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m4_m +#define VADDVX_UINT __riscv_vadd_vx_u32m4 +#define VMVVX_UINT __riscv_vmv_v_x_u32m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m4 +#define VMVVXS_UINT __riscv_vmv_x_s_u32m4_u32 #endif BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -113,8 +113,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) // index where element less than v_min mask = VMFLTVV_FLOAT(vx0, v_min, vl); - v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl); - v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl); + v_min_index = VIDV_MASK_UINT(mask, vl); + v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl); //update v_min and start_index j v_min = VFMINVV_FLOAT(v_min, vx0, vl); @@ -136,8 +136,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) // index where element less than v_min mask = VMFLTVV_FLOAT(vx0, v_min, vl); - v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl); - v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl); + v_min_index = VIDV_MASK_UINT(mask, vl); + v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl); //update v_min and start_index j v_min = VFMINVV_FLOAT(v_min, vx0, vl); @@ -145,16 +145,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - FLOAT_V_T_M1 v_res, v_max; - v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_max = VFMVVF_FLOAT_M1(FLT_MAX, vlmax); + FLOAT_V_T_M1 v_res; + v_res = VFMVVF_FLOAT_M1(FLT_MAX, vlmax); - v_res = VFREDMINVS_FLOAT(v_res, v_min, v_max, vlmax); + v_res = VFREDMINVS_FLOAT(v_min, v_res, vlmax); minf = VFMVFS_FLOAT_M1(v_res); mask = VMFLEVF_FLOAT(v_min, minf, vlmax); min_index = VFIRSTM(mask, vlmax); - v_min_index = VSLIDEDOWN_UINT(v_min_index, v_min_index, min_index, vlmax); + v_min_index = VSLIDEDOWN_UINT(v_min_index, min_index, vlmax); min_index = VMVVXS_UINT(v_min_index); return(min_index+1); diff --git a/kernel/riscv64/max_rvv.c b/kernel/riscv64/max_rvv.c index 5b1380d2b..9315321f4 100644 --- a/kernel/riscv64/max_rvv.c +++ b/kernel/riscv64/max_rvv.c @@ -29,31 +29,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMAXVV_FLOAT vfmax_vv_f32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMAXVV_FLOAT vfmax_vv_f64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -91,7 +91,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDMAXVS_FLOAT(v_res, vmax, v_res, vlmax); + v_res = VFREDMAXVS_FLOAT(vmax, v_res, vlmax); maxf = VFMVFS_FLOAT_M1(v_res); return(maxf); diff --git a/kernel/riscv64/min_rvv.c b/kernel/riscv64/min_rvv.c index bddcc0ba7..158b682fd 100644 --- a/kernel/riscv64/min_rvv.c +++ b/kernel/riscv64/min_rvv.c @@ -29,31 +29,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMINVV_FLOAT vfmin_vv_f32m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMINVV_FLOAT vfmin_vv_f64m8 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -91,7 +91,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDMINVS_FLOAT(v_res, vmin, v_res, vlmax); + v_res = VFREDMINVS_FLOAT(vmin, v_res, vlmax); minf = VFMVFS_FLOAT_M1(v_res); return(minf); diff --git a/kernel/riscv64/nrm2_rvv.c b/kernel/riscv64/nrm2_rvv.c index 979c31648..42abfa119 100644 --- a/kernel/riscv64/nrm2_rvv.c +++ b/kernel/riscv64/nrm2_rvv.c @@ -29,30 +29,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1 -#define VFMACCVV_FLOAT vfmacc_vv_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #define ABS fabsf #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1 -#define VFMACCVV_FLOAT vfmacc_vv_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #define ABS fabs #endif @@ -95,7 +95,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } } - v_res = VFREDSUM_FLOAT(v_res, vr, v_res, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_res, vlmax); ssq = VFMVFS_FLOAT_M1(v_res); diff --git a/kernel/riscv64/rot_rvv.c b/kernel/riscv64/rot_rvv.c index 7bf5e4270..90f81d5e2 100644 --- a/kernel/riscv64/rot_rvv.c +++ b/kernel/riscv64/rot_rvv.c @@ -28,25 +28,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMACCVF_FLOAT vfmacc_vf_f32m8 -#define VFMULVF_FLOAT vfmul_vf_f32m8 -#define VFMSACVF_FLOAT vfmsac_vf_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMACCVF_FLOAT vfmacc_vf_f64m8 -#define VFMULVF_FLOAT vfmul_vf_f64m8 -#define VFMSACVF_FLOAT vfmsac_vf_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m8 #endif int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) diff --git a/kernel/riscv64/scal_rvv.c b/kernel/riscv64/scal_rvv.c index d2c0378bf..2e2cfd31e 100644 --- a/kernel/riscv64/scal_rvv.c +++ b/kernel/riscv64/scal_rvv.c @@ -28,23 +28,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMULVF_FLOAT vfmul_vf_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMULVF_FLOAT vfmul_vf_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 #endif int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) diff --git a/kernel/riscv64/sum_rvv.c b/kernel/riscv64/sum_rvv.c index 1db0d09dd..9715faf22 100644 --- a/kernel/riscv64/sum_rvv.c +++ b/kernel/riscv64/sum_rvv.c @@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFADDVV_FLOAT vfadd_vv_f32m8 -#define VFREDSUMVS_FLOAT vfredusum_vs_f32m8_f32m1 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m8 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m8_f32m1 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFADDVV_FLOAT vfadd_vv_f64m8 -#define VFREDSUMVS_FLOAT vfredusum_vs_f64m8_f64m1 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m8 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m8_f64m1 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -89,7 +89,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDSUMVS_FLOAT(v_res, vsum, v_res, vlmax); + v_res = VFREDSUMVS_FLOAT(vsum, v_res, vlmax); sumf = VFMVFS_FLOAT_M1(v_res); return(sumf); } diff --git a/kernel/riscv64/swap_rvv.c b/kernel/riscv64/swap_rvv.c index 2cf92f6ad..893d70554 100644 --- a/kernel/riscv64/swap_rvv.c +++ b/kernel/riscv64/swap_rvv.c @@ -28,23 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 #endif int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) diff --git a/kernel/riscv64/symm_lcopy_rvv_v1.c b/kernel/riscv64/symm_lcopy_rvv_v1.c index f0def9617..a615db44d 100644 --- a/kernel/riscv64/symm_lcopy_rvv_v1.c +++ b/kernel/riscv64/symm_lcopy_rvv_v1.c @@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define INT_V_T vint32m2_t -#define VID_V_INT vid_v_i32m2 -#define VADD_VX_INT vadd_vx_i32m2 -#define VMSGT_VX_INT vmsgt_vx_i32m2_b16 -#define VBOOL_T vbool16_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define INT_V_T vint32m2_t +#define VID_V_INT __riscv_vid_v_i32m2 +#define VADD_VX_INT __riscv_vadd_vx_i32m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 +#define VBOOL_T vbool16_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define INT_V_T vint64m2_t -#define VID_V_INT vid_v_i64m2 -#define VADD_VX_INT vadd_vx_i64m2 -#define VMSGT_VX_INT vmsgt_vx_i64m2_b32 -#define VBOOL_T vbool32_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define INT_V_T vint64m2_t +#define VID_V_INT __riscv_vid_v_i64m2 +#define VADD_VX_INT __riscv_vadd_vx_i64m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 +#define VBOOL_T vbool32_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 #endif // Optimizes the implementation in ../generic/symm_lcopy_4.c @@ -87,7 +87,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON vindex = VADD_VX_INT(vindex_max, offset, vl); vbool = VMSGT_VX_INT(vindex, 0, vl); - vb = VMERGE_VVM_FLOAT(vbool, va2, va1, vl); + vb = VMERGE_VVM_FLOAT(va2, va1, vbool, vl); VSEV_FLOAT(b, vb, vl); b += vl; diff --git a/kernel/riscv64/symm_ucopy_rvv_v1.c b/kernel/riscv64/symm_ucopy_rvv_v1.c index 958506df3..464f97b3a 100644 --- a/kernel/riscv64/symm_ucopy_rvv_v1.c +++ b/kernel/riscv64/symm_ucopy_rvv_v1.c @@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define INT_V_T vint32m2_t -#define VID_V_INT vid_v_i32m2 -#define VADD_VX_INT vadd_vx_i32m2 -#define VMSGT_VX_INT vmsgt_vx_i32m2_b16 -#define VBOOL_T vbool16_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define INT_V_T vint32m2_t +#define VID_V_INT __riscv_vid_v_i32m2 +#define VADD_VX_INT __riscv_vadd_vx_i32m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 +#define VBOOL_T vbool16_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define INT_V_T vint64m2_t -#define VID_V_INT vid_v_i64m2 -#define VADD_VX_INT vadd_vx_i64m2 -#define VMSGT_VX_INT vmsgt_vx_i64m2_b32 -#define VBOOL_T vbool32_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define INT_V_T vint64m2_t +#define VID_V_INT __riscv_vid_v_i64m2 +#define VADD_VX_INT __riscv_vadd_vx_i64m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 +#define VBOOL_T vbool32_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 #endif // Optimizes the implementation in ../generic/symm_ucopy_4.c @@ -87,7 +87,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON vindex = VADD_VX_INT(vindex_max, offset, vl); vbool = VMSGT_VX_INT(vindex, 0, vl); - vb = VMERGE_VVM_FLOAT(vbool, va2, va1, vl); + vb = VMERGE_VVM_FLOAT(va2, va1, vbool, vl); VSEV_FLOAT(b, vb, vl); b += vl; diff --git a/kernel/riscv64/symv_L_rvv.c b/kernel/riscv64/symv_L_rvv.c index 737abaae3..e87ab22ae 100644 --- a/kernel/riscv64/symv_L_rvv.c +++ b/kernel/riscv64/symv_L_rvv.c @@ -28,43 +28,43 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T_M1 vfloat32m1_t -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMACCVV_FLOAT vfmacc_vv_f32m8 -#define VFMACCVF_FLOAT vfmacc_vf_f32m8 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m8 -#define VFMULVF_FLOAT vfmul_vf_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMSACVF_FLOAT vfmsac_vf_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T_M1 vfloat32m1_t +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T_M1 vfloat64m1_t -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMACCVV_FLOAT vfmacc_vv_f64m8 -#define VFMACCVF_FLOAT vfmacc_vf_f64m8 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m8 -#define VFMULVF_FLOAT vfmul_vf_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMSACVF_FLOAT vfmsac_vf_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T_M1 vfloat64m1_t +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) @@ -77,7 +77,6 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA FLOAT_V_T_M1 v_res, v_z0; size_t vlmax = VSETVL_MAX_M1, vl; - v_res = VFMVVF_FLOAT_M1(0, vlmax); v_z0 = VFMVVF_FLOAT_M1(0, vlmax); vlmax = VSETVL_MAX; @@ -105,7 +104,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA vr = VFMACCVV_FLOAT(vr, vx, va, vl); } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); y[j] += alpha * VFMVFS_FLOAT_M1(v_res); a_ptr += lda; @@ -137,7 +136,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA iy += inc_yv; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); y[jy] += alpha * VFMVFS_FLOAT_M1(v_res); jy += inc_y; @@ -172,7 +171,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA ix += inc_xv; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); y[j] += alpha * VFMVFS_FLOAT_M1(v_res); jx += inc_x; @@ -211,7 +210,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA ix += inc_xv; iy += inc_yv; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); y[jy] += alpha * VFMVFS_FLOAT_M1(v_res); jx += inc_x; diff --git a/kernel/riscv64/symv_U_rvv.c b/kernel/riscv64/symv_U_rvv.c index cb923be5d..3fbc33c89 100644 --- a/kernel/riscv64/symv_U_rvv.c +++ b/kernel/riscv64/symv_U_rvv.c @@ -29,43 +29,43 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T_M1 vfloat32m1_t -#define FLOAT_V_T vfloat32m8_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VSEV_FLOAT vse32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VSSEV_FLOAT vsse32_v_f32m8 -#define VFMACCVV_FLOAT vfmacc_vv_f32m8 -#define VFMACCVF_FLOAT vfmacc_vf_f32m8 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m8 -#define VFMULVF_FLOAT vfmul_vf_f32m8 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMSACVF_FLOAT vfmsac_vf_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T_M1 vfloat32m1_t +#define FLOAT_V_T vfloat32m8_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VSEV_FLOAT __riscv_vse32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m8 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T_M1 vfloat64m1_t -#define FLOAT_V_T vfloat64m8_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VSEV_FLOAT vse64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VSSEV_FLOAT vsse64_v_f64m8 -#define VFMACCVV_FLOAT vfmacc_vv_f64m8 -#define VFMACCVF_FLOAT vfmacc_vf_f64m8 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m8 -#define VFMULVF_FLOAT vfmul_vf_f64m8 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMSACVF_FLOAT vfmsac_vf_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T_M1 vfloat64m1_t +#define FLOAT_V_T vfloat64m8_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VSEV_FLOAT __riscv_vse64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m8 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m8 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) @@ -77,7 +77,6 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA FLOAT *a_ptr = a; FLOAT_V_T_M1 v_res, v_z0; size_t vl_max = VSETVL_MAX_M1, vl; - v_res = VFMVVF_FLOAT_M1(0, vl_max); v_z0 = VFMVVF_FLOAT_M1(0, vl_max); vl_max = VSETVL_MAX; @@ -105,7 +104,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA vx = VLEV_FLOAT(&x[i], vl); vr = VFMACCVV_FLOAT(vr, vx, va, vl); } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max); + v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max); y[j] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res); a_ptr += lda; @@ -137,7 +136,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA iy += inc_yv; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max); + v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max); y[jy] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res); a_ptr += lda; @@ -171,7 +170,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA ix += inc_xv; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max); + v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max); y[j] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res); a_ptr += lda; @@ -209,7 +208,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA ix += inc_xv; iy += inc_yv; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max); + v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max); y[jy] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res); a_ptr += lda; diff --git a/kernel/riscv64/trmm_lncopy_rvv_v1.c b/kernel/riscv64/trmm_lncopy_rvv_v1.c index 3457ca3e1..4135a9b62 100644 --- a/kernel/riscv64/trmm_lncopy_rvv_v1.c +++ b/kernel/riscv64/trmm_lncopy_rvv_v1.c @@ -30,29 +30,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif // Optimizes the implementation in ../arm64/tmmm_lncopy_sve_v1.c @@ -116,10 +116,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON { va1 = VLSEV_FLOAT(ao, stride_lda, vl); vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl); + vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl); #endif VSEV_FLOAT(b, vb, vl); ao++; diff --git a/kernel/riscv64/trmm_ltcopy_rvv_v1.c b/kernel/riscv64/trmm_ltcopy_rvv_v1.c index 2fe8cf79e..580714fde 100644 --- a/kernel/riscv64/trmm_ltcopy_rvv_v1.c +++ b/kernel/riscv64/trmm_ltcopy_rvv_v1.c @@ -30,27 +30,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif // Optimizes the implementation in ../arm64/tmmm_ltcopy_sve_v1.c @@ -111,10 +111,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON { va1 = VLEV_FLOAT(ao, vl); vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl); + vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl); #endif VSEV_FLOAT(b, vb, vl); ao += lda; diff --git a/kernel/riscv64/trmm_uncopy_rvv_v1.c b/kernel/riscv64/trmm_uncopy_rvv_v1.c index b64cd840d..852ab7f11 100644 --- a/kernel/riscv64/trmm_uncopy_rvv_v1.c +++ b/kernel/riscv64/trmm_uncopy_rvv_v1.c @@ -30,29 +30,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif // Optimizes the implementation in ../arm64/tmmm_uncopy_sve_v1.c @@ -114,10 +114,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON { va1 = VLSEV_FLOAT(ao, stride_lda, vl); vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl); + vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl); #endif VSEV_FLOAT(b, vb, vl); ao++; diff --git a/kernel/riscv64/trmm_utcopy_rvv_v1.c b/kernel/riscv64/trmm_utcopy_rvv_v1.c index b96daae5b..e0b6d362d 100644 --- a/kernel/riscv64/trmm_utcopy_rvv_v1.c +++ b/kernel/riscv64/trmm_utcopy_rvv_v1.c @@ -32,27 +32,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif // Optimizes the implementation in ../arm64/tmmm_utcopy_sve_v1.c @@ -113,10 +113,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON { va1 = VLEV_FLOAT(ao, vl); vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl); + vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl); #endif VSEV_FLOAT(b, vb, vl); ao += lda; diff --git a/kernel/riscv64/trmmkernel_rvv_v1x8.c b/kernel/riscv64/trmmkernel_rvv_v1x8.c index 97b14650c..393b24bce 100644 --- a/kernel/riscv64/trmmkernel_rvv_v1x8.c +++ b/kernel/riscv64/trmmkernel_rvv_v1x8.c @@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VFMVVF_FLOAT vfmv_v_f_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFMULVF_FLOAT vfmul_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFMULVF_FLOAT vfmul_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2 #endif diff --git a/kernel/riscv64/trsm_kernel_LN_rvv_v1.c b/kernel/riscv64/trsm_kernel_LN_rvv_v1.c index 2cba06b38..886af0c3b 100644 --- a/kernel/riscv64/trsm_kernel_LN_rvv_v1.c +++ b/kernel/riscv64/trsm_kernel_LN_rvv_v1.c @@ -28,34 +28,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VSSEV_FLOAT vsse32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSSEG2_FLOAT vssseg2e32_v_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2 -#define VFMULVF_FLOAT vfmul_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSSEG2_FLOAT __riscv_vssseg2e32_v_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VSSEV_FLOAT vsse64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSSEG2_FLOAT vssseg2e64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2 -#define VFMULVF_FLOAT vfmul_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSSEG2_FLOAT __riscv_vssseg2e64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2 #endif diff --git a/kernel/riscv64/trsm_kernel_LT_rvv_v1.c b/kernel/riscv64/trsm_kernel_LT_rvv_v1.c index 492a5631f..ddeef966c 100644 --- a/kernel/riscv64/trsm_kernel_LT_rvv_v1.c +++ b/kernel/riscv64/trsm_kernel_LT_rvv_v1.c @@ -28,34 +28,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VSSEV_FLOAT vsse32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSSEG2_FLOAT vssseg2e32_v_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2 -#define VFMULVF_FLOAT vfmul_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSSEG2_FLOAT __riscv_vssseg2e32_v_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VSSEV_FLOAT vsse64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSSEG2_FLOAT vssseg2e64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2 -#define VFMULVF_FLOAT vfmul_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSSEG2_FLOAT __riscv_vssseg2e64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2 #endif diff --git a/kernel/riscv64/trsm_kernel_RN_rvv_v1.c b/kernel/riscv64/trsm_kernel_RN_rvv_v1.c index 4751ae012..4c83bbaa3 100644 --- a/kernel/riscv64/trsm_kernel_RN_rvv_v1.c +++ b/kernel/riscv64/trsm_kernel_RN_rvv_v1.c @@ -28,34 +28,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSSEV_FLOAT vsse32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSSEG2_FLOAT vssseg2e32_v_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2 -#define VFMULVF_FLOAT vfmul_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSSEG2_FLOAT __riscv_vssseg2e32_v_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSSEV_FLOAT vsse64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSSEG2_FLOAT vssseg2e64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2 -#define VFMULVF_FLOAT vfmul_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSSEG2_FLOAT __riscv_vssseg2e64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2 #endif static FLOAT dm1 = -1.; diff --git a/kernel/riscv64/trsm_kernel_RT_rvv_v1.c b/kernel/riscv64/trsm_kernel_RT_rvv_v1.c index 93a9e6916..b368eefb9 100644 --- a/kernel/riscv64/trsm_kernel_RT_rvv_v1.c +++ b/kernel/riscv64/trsm_kernel_RT_rvv_v1.c @@ -28,28 +28,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2 -#define VFMULVF_FLOAT vfmul_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2 -#define VFMULVF_FLOAT vfmul_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2 #endif diff --git a/kernel/riscv64/trsm_lncopy_rvv_v1.c b/kernel/riscv64/trsm_lncopy_rvv_v1.c index bacfb2b08..41c84be25 100644 --- a/kernel/riscv64/trsm_lncopy_rvv_v1.c +++ b/kernel/riscv64/trsm_lncopy_rvv_v1.c @@ -29,27 +29,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VSEV_FLOAT_M vse32_v_f32m2_m -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VSEV_FLOAT_M vse64_v_f64m2_m -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 #endif diff --git a/kernel/riscv64/trsm_ltcopy_rvv_v1.c b/kernel/riscv64/trsm_ltcopy_rvv_v1.c index 0fc7c9f24..003bd3465 100644 --- a/kernel/riscv64/trsm_ltcopy_rvv_v1.c +++ b/kernel/riscv64/trsm_ltcopy_rvv_v1.c @@ -29,27 +29,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VSEV_FLOAT_M vse32_v_f32m2_m -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VSEV_FLOAT_M vse64_v_f64m2_m -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 #endif #ifndef UNIT diff --git a/kernel/riscv64/trsm_uncopy_rvv_v1.c b/kernel/riscv64/trsm_uncopy_rvv_v1.c index ee869a795..6cca5d49c 100644 --- a/kernel/riscv64/trsm_uncopy_rvv_v1.c +++ b/kernel/riscv64/trsm_uncopy_rvv_v1.c @@ -30,27 +30,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VSEV_FLOAT_M vse32_v_f32m2_m -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VSEV_FLOAT_M vse64_v_f64m2_m -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 #endif diff --git a/kernel/riscv64/trsm_utcopy_rvv_v1.c b/kernel/riscv64/trsm_utcopy_rvv_v1.c index a324b0fa6..bc058525f 100644 --- a/kernel/riscv64/trsm_utcopy_rvv_v1.c +++ b/kernel/riscv64/trsm_utcopy_rvv_v1.c @@ -29,27 +29,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VSEV_FLOAT_M vse32_v_f32m2_m -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VSEV_FLOAT_M vse64_v_f64m2_m -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 #endif diff --git a/kernel/riscv64/zamax_rvv.c b/kernel/riscv64/zamax_rvv.c index 1917042be..615b7519c 100644 --- a/kernel/riscv64/zamax_rvv.c +++ b/kernel/riscv64/zamax_rvv.c @@ -29,35 +29,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDMAXVS_FLOAT vfredmax_vs_f32m4_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMAXVV_FLOAT vfmax_vv_f32m4 -#define VFADDVV_FLOAT vfadd_vv_f32m4 -#define VFABSV_FLOAT vfabs_v_f32m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m4_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDMAXVS_FLOAT vfredmax_vs_f64m4_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMAXVV_FLOAT vfmax_vv_f64m4 -#define VFADDVV_FLOAT vfadd_vv_f64m4 -#define VFABSV_FLOAT vfabs_v_f64m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m4_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -106,7 +106,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDMAXVS_FLOAT(v_res, vmax, v_res, vlmax); + v_res = VFREDMAXVS_FLOAT(vmax, v_res, vlmax); maxf = VFMVFS_FLOAT_M1(v_res); return(maxf); diff --git a/kernel/riscv64/zamin_rvv.c b/kernel/riscv64/zamin_rvv.c index 3f027383a..a0d36d46f 100644 --- a/kernel/riscv64/zamin_rvv.c +++ b/kernel/riscv64/zamin_rvv.c @@ -29,35 +29,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDMINVS_FLOAT vfredmin_vs_f32m4_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMINVV_FLOAT vfmin_vv_f32m4 -#define VFADDVV_FLOAT vfadd_vv_f32m4 -#define VFABSV_FLOAT vfabs_v_f32m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m4_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDMINVS_FLOAT vfredmin_vs_f64m4_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMINVV_FLOAT vfmin_vv_f64m4 -#define VFADDVV_FLOAT vfadd_vv_f64m4 -#define VFABSV_FLOAT vfabs_v_f64m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m4_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -105,7 +105,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - v_res = VFREDMINVS_FLOAT(v_res, vmin, v_res, vlmax); + v_res = VFREDMINVS_FLOAT(vmin, v_res, vlmax); minf = VFMVFS_FLOAT_M1(v_res); return(minf); diff --git a/kernel/riscv64/zasum_rvv.c b/kernel/riscv64/zasum_rvv.c index 7876646b3..1d2f0e1fe 100644 --- a/kernel/riscv64/zasum_rvv.c +++ b/kernel/riscv64/zasum_rvv.c @@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m8(n) -#define VSETVL_MAX vsetvlmax_e32m8() -#define FLOAT_V_T vfloat32m8_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m8 -#define VLSEV_FLOAT vlse32_v_f32m8 -#define VFREDSUMVS_FLOAT vfredusum_vs_f32m8_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VFADDVV_FLOAT vfadd_vv_f32m8 -#define VFABSV_FLOAT vfabs_v_f32m8 +#define VSETVL(n) __riscv_vsetvl_e32m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m8() +#define FLOAT_V_T vfloat32m8_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m8 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m8 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m8_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m8 #else -#define VSETVL(n) vsetvl_e64m8(n) -#define VSETVL_MAX vsetvlmax_e64m8() -#define FLOAT_V_T vfloat64m8_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m8 -#define VLSEV_FLOAT vlse64_v_f64m8 -#define VFREDSUMVS_FLOAT vfredusum_vs_f64m8_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m8 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VFADDVV_FLOAT vfadd_vv_f64m8 -#define VFABSV_FLOAT vfabs_v_f64m8 +#define VSETVL(n) __riscv_vsetvl_e64m8(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m8() +#define FLOAT_V_T vfloat64m8_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m8 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m8 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m8_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m8 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m8 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -99,9 +99,8 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - FLOAT_V_T_M1 v_z0 = VFMVVF_FLOAT_M1(0, vlmax); FLOAT_V_T_M1 v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_res = VFREDSUMVS_FLOAT(v_res, v_sum, v_z0, vlmax); + v_res = VFREDSUMVS_FLOAT(v_sum, v_res, vlmax); asumf += VFMVFS_FLOAT_M1(v_res); return(asumf); diff --git a/kernel/riscv64/zaxpby_rvv.c b/kernel/riscv64/zaxpby_rvv.c index 66f52d9d0..e0da55311 100644 --- a/kernel/riscv64/zaxpby_rvv.c +++ b/kernel/riscv64/zaxpby_rvv.c @@ -33,33 +33,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define FLOAT_V_T vfloat32m4_t -#define VLSEV_FLOAT vlse32_v_f32m4 -#define VSSEV_FLOAT vsse32_v_f32m4 -#define VFMACCVF_FLOAT vfmacc_vf_f32m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMULVF_FLOAT vfmul_vf_f32m4 -#define VFMSACVF_FLOAT vfmsac_vf_f32m4 -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VSSEG_FLOAT vsseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VSSSEG_FLOAT vssseg2e32_v_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define FLOAT_V_T vfloat32m4_t +#define VLSEV_FLOAT __riscv_vlse32_v_f32m4 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m4 +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define FLOAT_V_T vfloat64m4_t -#define VLSEV_FLOAT vlse64_v_f64m4 -#define VSSEV_FLOAT vsse64_v_f64m4 -#define VFMACCVF_FLOAT vfmacc_vf_f64m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMULVF_FLOAT vfmul_vf_f64m4 -#define VFMSACVF_FLOAT vfmsac_vf_f64m4 -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VSSEG_FLOAT vsseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VSSSEG_FLOAT vssseg2e64_v_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define FLOAT_V_T vfloat64m4_t +#define VLSEV_FLOAT __riscv_vlse64_v_f64m4 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4 +#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m4 +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4 #endif int CNAME(BLASLONG n, FLOAT alpha_r, FLOAT alpha_i, FLOAT *x, BLASLONG inc_x, FLOAT beta_r, FLOAT beta_i,FLOAT *y, BLASLONG inc_y) diff --git a/kernel/riscv64/zaxpy_rvv.c b/kernel/riscv64/zaxpy_rvv.c index 777bcb728..3f75898e0 100644 --- a/kernel/riscv64/zaxpy_rvv.c +++ b/kernel/riscv64/zaxpy_rvv.c @@ -28,23 +28,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define FLOAT_V_T vfloat32m4_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VSSEG_FLOAT vsseg2e32_v_f32m4 -#define VSSSEG_FLOAT vssseg2e32_v_f32m4 -#define VFMACCVF_FLOAT vfmacc_vf_f32m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define FLOAT_V_T vfloat32m4_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4 +#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define FLOAT_V_T vfloat64m4_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VSSEG_FLOAT vsseg2e64_v_f64m4 -#define VSSSEG_FLOAT vssseg2e64_v_f64m4 -#define VFMACCVF_FLOAT vfmacc_vf_f64m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define FLOAT_V_T vfloat64m4_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4 +#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 #endif int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) diff --git a/kernel/riscv64/zcopy_rvv.c b/kernel/riscv64/zcopy_rvv.c index 5d8322bbb..bd94810ce 100644 --- a/kernel/riscv64/zcopy_rvv.c +++ b/kernel/riscv64/zcopy_rvv.c @@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL_M8(n) vsetvl_e32m8(n) -#define FLOAT_V_T_M8 vfloat32m8_t -#define VLEV_FLOAT_M8 vle32_v_f32m8 -#define VSEV_FLOAT_M8 vse32_v_f32m8 +#define VSETVL_M8(n) __riscv_vsetvl_e32m8(n) +#define FLOAT_V_T_M8 vfloat32m8_t +#define VLEV_FLOAT_M8 __riscv_vle32_v_f32m8 +#define VSEV_FLOAT_M8 __riscv_vse32_v_f32m8 -#define VSETVL_M4(n) vsetvl_e32m4(n) -#define FLOAT_V_T_M4 vfloat32m4_t -#define VLSEG_FLOAT_M4 vlseg2e32_v_f32m4 -#define VSSEG_FLOAT_M4 vsseg2e32_v_f32m4 -#define VLSSEG_FLOAT_M4 vlsseg2e32_v_f32m4 -#define VSSSEG_FLOAT_M4 vssseg2e32_v_f32m4 +#define VSETVL_M4(n) __riscv_vsetvl_e32m4(n) +#define FLOAT_V_T_M4 vfloat32m4_t +#define VLSEG_FLOAT_M4 __riscv_vlseg2e32_v_f32m4 +#define VSSEG_FLOAT_M4 __riscv_vsseg2e32_v_f32m4 +#define VLSSEG_FLOAT_M4 __riscv_vlsseg2e32_v_f32m4 +#define VSSSEG_FLOAT_M4 __riscv_vssseg2e32_v_f32m4 #else -#define VSETVL_M8(n) vsetvl_e64m8(n) -#define FLOAT_V_T_M8 vfloat64m8_t -#define VLEV_FLOAT_M8 vle64_v_f64m8 -#define VSEV_FLOAT_M8 vse64_v_f64m8 +#define VSETVL_M8(n) __riscv_vsetvl_e64m8(n) +#define FLOAT_V_T_M8 vfloat64m8_t +#define VLEV_FLOAT_M8 __riscv_vle64_v_f64m8 +#define VSEV_FLOAT_M8 __riscv_vse64_v_f64m8 -#define VSETVL_M4(n) vsetvl_e64m4(n) -#define FLOAT_V_T_M4 vfloat64m4_t -#define VLSEG_FLOAT_M4 vlseg2e64_v_f64m4 -#define VSSEG_FLOAT_M4 vsseg2e64_v_f64m4 -#define VLSSEG_FLOAT_M4 vlsseg2e64_v_f64m4 -#define VSSSEG_FLOAT_M4 vssseg2e64_v_f64m4 +#define VSETVL_M4(n) __riscv_vsetvl_e64m4(n) +#define FLOAT_V_T_M4 vfloat64m4_t +#define VLSEG_FLOAT_M4 __riscv_vlseg2e64_v_f64m4 +#define VSSEG_FLOAT_M4 __riscv_vsseg2e64_v_f64m4 +#define VLSSEG_FLOAT_M4 __riscv_vlsseg2e64_v_f64m4 +#define VSSSEG_FLOAT_M4 __riscv_vssseg2e64_v_f64m4 #endif int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) diff --git a/kernel/riscv64/zdot_rvv.c b/kernel/riscv64/zdot_rvv.c index 7eae6f608..1543c513d 100644 --- a/kernel/riscv64/zdot_rvv.c +++ b/kernel/riscv64/zdot_rvv.c @@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDSUM_FLOAT vfredusum_vs_f32m4_f32m1 -#define VFMACCVV_FLOAT vfmacc_vv_f32m4 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMULVV_FLOAT vfmul_vv_f32m4 -#define VFMSACVV_FLOAT vfmsac_vv_f32m4 -#define VFNMSACVV_FLOAT vfnmsac_vv_f32m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4 +#define VFMSACVV_FLOAT __riscv_vfmsac_vv_f32m4 +#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDSUM_FLOAT vfredusum_vs_f64m4_f64m1 -#define VFMACCVV_FLOAT vfmacc_vv_f64m4 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMULVV_FLOAT vfmul_vv_f64m4 -#define VFMSACVV_FLOAT vfmsac_vv_f64m4 -#define VFNMSACVV_FLOAT vfnmsac_vv_f64m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4 +#define VFMSACVV_FLOAT __riscv_vfmsac_vv_f64m4 +#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) @@ -72,7 +72,6 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA FLOAT_V_T vr0, vr1, vx0, vx1, vy0, vy1; FLOAT_V_T_M1 v_res, v_z0; size_t vlmax_m1 = VSETVL_MAX_M1; - v_res = VFMVVF_FLOAT_M1(0, vlmax_m1); v_z0 = VFMVVF_FLOAT_M1(0, vlmax_m1); size_t vlmax = VSETVL_MAX; @@ -161,9 +160,9 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA } } - v_res = VFREDSUM_FLOAT(v_res, vr0, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr0, v_z0, vlmax); CREAL(result) = VFMVFS_FLOAT_M1(v_res); - v_res = VFREDSUM_FLOAT(v_res, vr1, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr1, v_z0, vlmax); CIMAG(result) = VFMVFS_FLOAT_M1(v_res); return(result); diff --git a/kernel/riscv64/zgemm_beta_rvv.c b/kernel/riscv64/zgemm_beta_rvv.c index a89752d18..b94b5f4bf 100644 --- a/kernel/riscv64/zgemm_beta_rvv.c +++ b/kernel/riscv64/zgemm_beta_rvv.c @@ -39,23 +39,23 @@ #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define FLOAT_V_T vfloat32m4_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VSSEG_FLOAT vsseg2e32_v_f32m4 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMULVF_FLOAT vfmul_vf_f32m4 -#define VFADDVV_FLOAT vfadd_vv_f32m4 -#define VFSUBVV_FLOAT vfsub_vv_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define FLOAT_V_T vfloat32m4_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4 +#define VFSUBVV_FLOAT __riscv_vfsub_vv_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define FLOAT_V_T vfloat64m4_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VSSEG_FLOAT vsseg2e64_v_f64m4 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMULVF_FLOAT vfmul_vf_f64m4 -#define VFADDVV_FLOAT vfadd_vv_f64m4 -#define VFSUBVV_FLOAT vfsub_vv_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define FLOAT_V_T vfloat64m4_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4 +#define VFSUBVV_FLOAT __riscv_vfsub_vv_f64m4 #endif int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, diff --git a/kernel/riscv64/zgemm_ncopy_4_rvv.c b/kernel/riscv64/zgemm_ncopy_4_rvv.c index 389ee5d57..d50a4b8d5 100644 --- a/kernel/riscv64/zgemm_ncopy_4_rvv.c +++ b/kernel/riscv64/zgemm_ncopy_4_rvv.c @@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m1(n) -#define FLOAT_V_T vfloat32m1_t -#define VLSEG2_FLOAT vlseg2e32_v_f32m1 -#define VSSEG2_FLOAT vsseg2e32_v_f32m1 -#define VSSEG4_FLOAT vsseg4e32_v_f32m1 -#define VSSEG8_FLOAT vsseg8e32_v_f32m1 +#define VSETVL(n) __riscv_vsetvl_e32m1(n) +#define FLOAT_V_T vfloat32m1_t +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m1 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1 +#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1 +#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1 #else -#define VSETVL(n) vsetvl_e64m1(n) -#define FLOAT_V_T vfloat64m1_t -#define VLSEG2_FLOAT vlseg2e64_v_f64m1 -#define VSSEG2_FLOAT vsseg2e64_v_f64m1 -#define VSSEG4_FLOAT vsseg4e64_v_f64m1 -#define VSSEG8_FLOAT vsseg8e64_v_f64m1 +#define VSETVL(n) __riscv_vsetvl_e64m1(n) +#define FLOAT_V_T vfloat64m1_t +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m1 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1 +#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1 +#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1 #endif // Optimizes the implementation in ../generic/zgemm_ncopy_4.c diff --git a/kernel/riscv64/zgemm_ncopy_rvv_v1.c b/kernel/riscv64/zgemm_ncopy_rvv_v1.c index df039bab6..1d3b8d3b7 100644 --- a/kernel/riscv64/zgemm_ncopy_rvv_v1.c +++ b/kernel/riscv64/zgemm_ncopy_rvv_v1.c @@ -29,15 +29,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){ diff --git a/kernel/riscv64/zgemm_tcopy_4_rvv.c b/kernel/riscv64/zgemm_tcopy_4_rvv.c index 1b34039c8..8c35b5616 100644 --- a/kernel/riscv64/zgemm_tcopy_4_rvv.c +++ b/kernel/riscv64/zgemm_tcopy_4_rvv.c @@ -28,27 +28,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m1(n) -#define FLOAT_V_T vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m1 -#define VSEV_FLOAT vse32_v_f32m1 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m1 -#define VLSSEG4_FLOAT vlsseg4e32_v_f32m1 -#define VLSSEG8_FLOAT vlsseg8e32_v_f32m1 -#define VSSEG2_FLOAT vsseg2e32_v_f32m1 -#define VSSEG4_FLOAT vsseg4e32_v_f32m1 -#define VSSEG8_FLOAT vsseg8e32_v_f32m1 +#define VSETVL(n) __riscv_vsetvl_e32m1(n) +#define FLOAT_V_T vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m1 +#define VSEV_FLOAT __riscv_vse32_v_f32m1 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m1 +#define VLSSEG4_FLOAT __riscv_vlsseg4e32_v_f32m1 +#define VLSSEG8_FLOAT __riscv_vlsseg8e32_v_f32m1 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1 +#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1 +#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1 #else -#define VSETVL(n) vsetvl_e64m1(n) -#define FLOAT_V_T vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m1 -#define VSEV_FLOAT vse64_v_f64m1 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m1 -#define VLSSEG4_FLOAT vlsseg4e64_v_f64m1 -#define VLSSEG8_FLOAT vlsseg8e64_v_f64m1 -#define VSSEG2_FLOAT vsseg2e64_v_f64m1 -#define VSSEG4_FLOAT vsseg4e64_v_f64m1 -#define VSSEG8_FLOAT vsseg8e64_v_f64m1 +#define VSETVL(n) __riscv_vsetvl_e64m1(n) +#define FLOAT_V_T vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m1 +#define VSEV_FLOAT __riscv_vse64_v_f64m1 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m1 +#define VLSSEG4_FLOAT __riscv_vlsseg4e64_v_f64m1 +#define VLSSEG8_FLOAT __riscv_vlsseg8e64_v_f64m1 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1 +#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1 +#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){ diff --git a/kernel/riscv64/zgemm_tcopy_rvv_v1.c b/kernel/riscv64/zgemm_tcopy_rvv_v1.c index 7622fb810..7a085269c 100644 --- a/kernel/riscv64/zgemm_tcopy_rvv_v1.c +++ b/kernel/riscv64/zgemm_tcopy_rvv_v1.c @@ -28,15 +28,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) diff --git a/kernel/riscv64/zgemmkernel_rvv_v1x4.c b/kernel/riscv64/zgemmkernel_rvv_v1x4.c index 50e29222f..41399cf79 100644 --- a/kernel/riscv64/zgemmkernel_rvv_v1x4.c +++ b/kernel/riscv64/zgemmkernel_rvv_v1x4.c @@ -28,25 +28,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VFMVVF_FLOAT vfmv_v_f_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2 #endif #if defined(NN) || defined(NT) || defined(TN) || defined(TT) diff --git a/kernel/riscv64/zgemv_n_rvv.c b/kernel/riscv64/zgemv_n_rvv.c index 2eeb61b45..4a40c30a7 100644 --- a/kernel/riscv64/zgemv_n_rvv.c +++ b/kernel/riscv64/zgemv_n_rvv.c @@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define FLOAT_V_T vfloat32m4_t -#define VLEV_FLOAT vle32_v_f32m4 -#define VLSEV_FLOAT vlse32_v_f32m4 -#define VSEV_FLOAT vse32_v_f32m4 -#define VSSEV_FLOAT vsse32_v_f32m4 -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VSSEG_FLOAT vsseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VSSSEG_FLOAT vssseg2e32_v_f32m4 -#define VFMACCVF_FLOAT vfmacc_vf_f32m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define FLOAT_V_T vfloat32m4_t +#define VLEV_FLOAT __riscv_vle32_v_f32m4 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m4 +#define VSEV_FLOAT __riscv_vse32_v_f32m4 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m4 +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define FLOAT_V_T vfloat64m4_t -#define VLEV_FLOAT vle64_v_f64m4 -#define VLSEV_FLOAT vlse64_v_f64m4 -#define VSEV_FLOAT vse64_v_f64m4 -#define VSSEV_FLOAT vsse64_v_f64m4 -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VSSEG_FLOAT vsseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VSSSEG_FLOAT vssseg2e64_v_f64m4 -#define VFMACCVF_FLOAT vfmacc_vf_f64m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define FLOAT_V_T vfloat64m4_t +#define VLEV_FLOAT __riscv_vle64_v_f64m4 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m4 +#define VSEV_FLOAT __riscv_vse64_v_f64m4 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m4 +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 #endif int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) diff --git a/kernel/riscv64/zgemv_t_rvv.c b/kernel/riscv64/zgemv_t_rvv.c index b682d5cd8..15795cc3a 100644 --- a/kernel/riscv64/zgemv_t_rvv.c +++ b/kernel/riscv64/zgemv_t_rvv.c @@ -28,33 +28,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDSUM_FLOAT vfredusum_vs_f32m4_f32m1 -#define VFMACCVV_FLOAT vfmacc_vv_f32m4 -#define VFNMSACVV_FLOAT vfnmsac_vv_f32m4 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMULVV_FLOAT vfmul_vv_f32m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 +#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDSUM_FLOAT vfredusum_vs_f64m4_f64m1 -#define VFMACCVV_FLOAT vfmacc_vv_f64m4 -#define VFNMSACVV_FLOAT vfnmsac_vv_f64m4 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMULVV_FLOAT vfmul_vv_f64m4 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 +#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) @@ -73,7 +73,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, BLASLONG lda2 = lda * 2; size_t vlmax = VSETVL_MAX_M1; - v_res = VFMVVF_FLOAT_M1(0, vlmax); v_z0 = VFMVVF_FLOAT_M1(0, vlmax); vlmax = VSETVL(m); @@ -105,9 +104,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, ix += vl * inc_x * 2; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); temp_r = VFMVFS_FLOAT_M1(v_res); - v_res = VFREDSUM_FLOAT(v_res, vi, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vi, v_z0, vlmax); temp_i = VFMVFS_FLOAT_M1(v_res); #if !defined(XCONJ) @@ -149,9 +148,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, ix += vl * inc_x * 2; } - v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax); temp_r = VFMVFS_FLOAT_M1(v_res); - v_res = VFREDSUM_FLOAT(v_res, vi, v_z0, vlmax); + v_res = VFREDSUM_FLOAT(vi, v_z0, vlmax); temp_i = VFMVFS_FLOAT_M1(v_res); #if !defined(XCONJ) diff --git a/kernel/riscv64/zhemm_ltcopy_rvv_v1.c b/kernel/riscv64/zhemm_ltcopy_rvv_v1.c index cf466d3fa..79b20a646 100644 --- a/kernel/riscv64/zhemm_ltcopy_rvv_v1.c +++ b/kernel/riscv64/zhemm_ltcopy_rvv_v1.c @@ -28,45 +28,45 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define INT_V_T vint32m2_t -#define VID_V_INT vid_v_i32m2 -#define VADD_VX_INT vadd_vx_i32m2 -#define VFRSUB_VF_FLOAT vfrsub_vf_f32m2 -#define VMSGT_VX_INT vmsgt_vx_i32m2_b16 -#define VMSLT_VX_INT vmslt_vx_i32m2_b16 -#define VMSEQ_VX_INT vmseq_vx_i32m2_b16 -#define VBOOL_T vbool16_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2 -#define VFMVVF_FLOAT vfmv_v_f_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define INT_V_T vint32m2_t +#define VID_V_INT __riscv_vid_v_i32m2 +#define VADD_VX_INT __riscv_vadd_vx_i32m2 +#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f32m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 +#define VMSLT_VX_INT __riscv_vmslt_vx_i32m2_b16 +#define VMSEQ_VX_INT __riscv_vmseq_vx_i32m2_b16 +#define VBOOL_T vbool16_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define INT_V_T vint64m2_t -#define VID_V_INT vid_v_i64m2 -#define VADD_VX_INT vadd_vx_i64m2 -#define VFRSUB_VF_FLOAT vfrsub_vf_f64m2 -#define VMSGT_VX_INT vmsgt_vx_i64m2_b32 -#define VMSLT_VX_INT vmslt_vx_i64m2_b32 -#define VMSEQ_VX_INT vmseq_vx_i64m2_b32 -#define VBOOL_T vbool32_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define INT_V_T vint64m2_t +#define VID_V_INT __riscv_vid_v_i64m2 +#define VADD_VX_INT __riscv_vadd_vx_i64m2 +#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f64m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 +#define VMSLT_VX_INT __riscv_vmslt_vx_i64m2_b32 +#define VMSEQ_VX_INT __riscv_vmseq_vx_i64m2_b32 +#define VBOOL_T vbool32_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 #endif @@ -104,13 +104,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON vbool_lt0 = VMSLT_VX_INT(vindex, 0, vl); vbool_eq0 = VMSEQ_VX_INT(vindex, 0, vl); - vb0 = VMERGE_VVM_FLOAT(vbool_gt0, va20, va10, vl); - vb1 = VMERGE_VVM_FLOAT(vbool_gt0, va21, va11, vl); + vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool_gt0, vl); + vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool_gt0, vl); vb2 = VFRSUB_VF_FLOAT(vb1, ZERO, vl); - vb1 = VMERGE_VVM_FLOAT(vbool_lt0, vb1, vb2, vl); - vb1 = VMERGE_VVM_FLOAT(vbool_eq0, vb1, vzero, vl); + vb1 = VMERGE_VVM_FLOAT(vb1, vb2, vbool_lt0, vl); + vb1 = VMERGE_VVM_FLOAT(vb1, vzero, vbool_eq0, vl); VSSEG2_FLOAT(b, vb0, vb1, vl); b += vl * 2; diff --git a/kernel/riscv64/zhemm_utcopy_rvv_v1.c b/kernel/riscv64/zhemm_utcopy_rvv_v1.c index 6209f5417..a86815275 100644 --- a/kernel/riscv64/zhemm_utcopy_rvv_v1.c +++ b/kernel/riscv64/zhemm_utcopy_rvv_v1.c @@ -28,45 +28,45 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define INT_V_T vint32m2_t -#define VID_V_INT vid_v_i32m2 -#define VADD_VX_INT vadd_vx_i32m2 -#define VFRSUB_VF_FLOAT vfrsub_vf_f32m2 -#define VMSGT_VX_INT vmsgt_vx_i32m2_b16 -#define VMSLT_VX_INT vmslt_vx_i32m2_b16 -#define VMSEQ_VX_INT vmseq_vx_i32m2_b16 -#define VBOOL_T vbool16_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2 -#define VFMVVF_FLOAT vfmv_v_f_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define INT_V_T vint32m2_t +#define VID_V_INT __riscv_vid_v_i32m2 +#define VADD_VX_INT __riscv_vadd_vx_i32m2 +#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f32m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 +#define VMSLT_VX_INT __riscv_vmslt_vx_i32m2_b16 +#define VMSEQ_VX_INT __riscv_vmseq_vx_i32m2_b16 +#define VBOOL_T vbool16_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define INT_V_T vint64m2_t -#define VID_V_INT vid_v_i64m2 -#define VADD_VX_INT vadd_vx_i64m2 -#define VFRSUB_VF_FLOAT vfrsub_vf_f64m2 -#define VMSGT_VX_INT vmsgt_vx_i64m2_b32 -#define VMSLT_VX_INT vmslt_vx_i64m2_b32 -#define VMSEQ_VX_INT vmseq_vx_i64m2_b32 -#define VBOOL_T vbool32_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define INT_V_T vint64m2_t +#define VID_V_INT __riscv_vid_v_i64m2 +#define VADD_VX_INT __riscv_vadd_vx_i64m2 +#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f64m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 +#define VMSLT_VX_INT __riscv_vmslt_vx_i64m2_b32 +#define VMSEQ_VX_INT __riscv_vmseq_vx_i64m2_b32 +#define VBOOL_T vbool32_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 #endif @@ -101,13 +101,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON vbool_gt0 = VMSGT_VX_INT(vindex, 0, vl); vbool_eq0 = VMSEQ_VX_INT(vindex, 0, vl); - vb0 = VMERGE_VVM_FLOAT(vbool_gt0, va20, va10, vl); - vb1 = VMERGE_VVM_FLOAT(vbool_gt0, va21, va11, vl); + vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool_gt0, vl); + vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool_gt0, vl); vb2 = VFRSUB_VF_FLOAT(vb1, ZERO, vl); - vb1 = VMERGE_VVM_FLOAT(vbool_gt0, vb1, vb2, vl); - vb1 = VMERGE_VVM_FLOAT(vbool_eq0, vb1, vzero, vl); + vb1 = VMERGE_VVM_FLOAT(vb1, vb2, vbool_gt0, vl); + vb1 = VMERGE_VVM_FLOAT(vb1, vzero, vbool_eq0, vl); VSSEG2_FLOAT(b, vb0, vb1, vl); b += vl * 2; diff --git a/kernel/riscv64/znrm2_rvv.c b/kernel/riscv64/znrm2_rvv.c index 921ddb8cb..5f7873b5a 100644 --- a/kernel/riscv64/znrm2_rvv.c +++ b/kernel/riscv64/znrm2_rvv.c @@ -28,35 +28,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDSUM_FLOAT vfredusum_vs_f32m4_f32m1 -#define VFMACCVV_FLOAT vfmacc_vv_f32m4 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFREDMAXVS_FLOAT vfredmax_vs_f32m4_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VFABSV_FLOAT vfabs_v_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m4_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VFABSV_FLOAT __riscv_vfabs_v_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDSUM_FLOAT vfredusum_vs_f64m4_f64m1 -#define VFMACCVV_FLOAT vfmacc_vv_f64m4 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFREDMAXVS_FLOAT vfredmax_vs_f64m4_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VFABSV_FLOAT vfabs_v_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m4_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VFABSV_FLOAT __riscv_vfabs_v_f64m4 #endif // TODO: Should single precision use the widening MAC, or perhaps all should be double? @@ -85,10 +85,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) v0 = VFABSV_FLOAT(v0, vl); v1 = VFABSV_FLOAT(v1, vl); - v_max = VFREDMAXVS_FLOAT(v_max, v0, v_max, vl); + v_max = VFREDMAXVS_FLOAT(v0, v_max, vl); vr = VFMACCVV_FLOAT(vr, v0, v0, vl); - v_max = VFREDMAXVS_FLOAT(v_max, v1, v_max, vl); + v_max = VFREDMAXVS_FLOAT(v1, v_max, vl); vr = VFMACCVV_FLOAT(vr, v1, v1, vl); } @@ -103,16 +103,16 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) v0 = VFABSV_FLOAT(v0, vl); v1 = VFABSV_FLOAT(v1, vl); - v_max = VFREDMAXVS_FLOAT(v_max, v0, v_max, vl); + v_max = VFREDMAXVS_FLOAT(v0, v_max, vl); vr = VFMACCVV_FLOAT(vr, v0, v0, vl); - v_max = VFREDMAXVS_FLOAT(v_max, v1, v_max, vl); + v_max = VFREDMAXVS_FLOAT(v1, v_max, vl); vr = VFMACCVV_FLOAT(vr, v1, v1, vl); } } - v_res = VFREDSUM_FLOAT(v_res, vr, v_res, vlmax); + v_res = VFREDSUM_FLOAT(vr, v_res, vlmax); ssq = VFMVFS_FLOAT_M1(v_res); scale = VFMVFS_FLOAT_M1(v_max); diff --git a/kernel/riscv64/zrot_rvv.c b/kernel/riscv64/zrot_rvv.c index 68066a00b..ee81bfe91 100644 --- a/kernel/riscv64/zrot_rvv.c +++ b/kernel/riscv64/zrot_rvv.c @@ -28,33 +28,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define FLOAT_V_T vfloat32m4_t -#define VLEV_FLOAT vle32_v_f32m4 -#define VLSEV_FLOAT vlse32_v_f32m4 -#define VSEV_FLOAT vse32_v_f32m4 -#define VSSEV_FLOAT vsse32_v_f32m4 -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VSSEG_FLOAT vsseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VSSSEG_FLOAT vssseg2e32_v_f32m4 -#define VFMACCVF_FLOAT vfmacc_vf_f32m4 -#define VFMULVF_FLOAT vfmul_vf_f32m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define FLOAT_V_T vfloat32m4_t +#define VLEV_FLOAT __riscv_vle32_v_f32m4 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m4 +#define VSEV_FLOAT __riscv_vse32_v_f32m4 +#define VSSEV_FLOAT __riscv_vsse32_v_f32m4 +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define FLOAT_V_T vfloat64m4_t -#define VLEV_FLOAT vle64_v_f64m4 -#define VLSEV_FLOAT vlse64_v_f64m4 -#define VSEV_FLOAT vse64_v_f64m4 -#define VSSEV_FLOAT vsse64_v_f64m4 -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VSSEG_FLOAT vsseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VSSSEG_FLOAT vssseg2e64_v_f64m4 -#define VFMACCVF_FLOAT vfmacc_vf_f64m4 -#define VFMULVF_FLOAT vfmul_vf_f64m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define FLOAT_V_T vfloat64m4_t +#define VLEV_FLOAT __riscv_vle64_v_f64m4 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m4 +#define VSEV_FLOAT __riscv_vse64_v_f64m4 +#define VSSEV_FLOAT __riscv_vsse64_v_f64m4 +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 #endif int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s) diff --git a/kernel/riscv64/zscal_rvv.c b/kernel/riscv64/zscal_rvv.c index 079c36a2d..779fab68c 100644 --- a/kernel/riscv64/zscal_rvv.c +++ b/kernel/riscv64/zscal_rvv.c @@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define FLOAT_V_T vfloat32m4_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VSSEG_FLOAT vsseg2e32_v_f32m4 -#define VSSSEG_FLOAT vssseg2e32_v_f32m4 -#define VFMACCVF_FLOAT vfmacc_vf_f32m4 -#define VFMULVF_FLOAT vfmul_vf_f32m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define FLOAT_V_T vfloat32m4_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4 +#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define FLOAT_V_T vfloat64m4_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VSSEG_FLOAT vsseg2e64_v_f64m4 -#define VSSSEG_FLOAT vssseg2e64_v_f64m4 -#define VFMACCVF_FLOAT vfmacc_vf_f64m4 -#define VFMULVF_FLOAT vfmul_vf_f64m4 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define FLOAT_V_T vfloat64m4_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4 +#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 #endif int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r,FLOAT da_i, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) diff --git a/kernel/riscv64/zsum_rvv.c b/kernel/riscv64/zsum_rvv.c index 3928fbe27..44df112c6 100644 --- a/kernel/riscv64/zsum_rvv.c +++ b/kernel/riscv64/zsum_rvv.c @@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define VSETVL_MAX vsetvlmax_e32m4() -#define FLOAT_V_T vfloat32m4_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VFREDSUMVS_FLOAT vfredusum_vs_f32m4_f32m1 -#define VFMVVF_FLOAT vfmv_v_f_f32m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 -#define VFADDVV_FLOAT vfadd_vv_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m4() +#define FLOAT_V_T vfloat32m4_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m4_f32m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define VSETVL_MAX vsetvlmax_e64m4() -#define FLOAT_V_T vfloat64m4_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VFREDSUMVS_FLOAT vfredusum_vs_f64m4_f64m1 -#define VFMVVF_FLOAT vfmv_v_f_f64m4 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 -#define VFADDVV_FLOAT vfadd_vv_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m4() +#define FLOAT_V_T vfloat64m4_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m4_f64m1 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 +#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4 #endif FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) @@ -88,9 +88,8 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) } - FLOAT_V_T_M1 v_z0 = VFMVVF_FLOAT_M1(0, vlmax); FLOAT_V_T_M1 v_res = VFMVVF_FLOAT_M1(0, vlmax); - v_res = VFREDSUMVS_FLOAT(v_res, v_sum, v_z0, vlmax); + v_res = VFREDSUMVS_FLOAT(v_sum, v_res, vlmax); sumf += VFMVFS_FLOAT_M1(v_res); return(sumf); diff --git a/kernel/riscv64/zswap_rvv.c b/kernel/riscv64/zswap_rvv.c index 86f9103d3..17b7b9f43 100644 --- a/kernel/riscv64/zswap_rvv.c +++ b/kernel/riscv64/zswap_rvv.c @@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m4(n) -#define FLOAT_V_T vfloat32m4_t -#define VLSEG_FLOAT vlseg2e32_v_f32m4 -#define VLSSEG_FLOAT vlsseg2e32_v_f32m4 -#define VSSEG_FLOAT vsseg2e32_v_f32m4 -#define VSSSEG_FLOAT vssseg2e32_v_f32m4 +#define VSETVL(n) __riscv_vsetvl_e32m4(n) +#define FLOAT_V_T vfloat32m4_t +#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4 +#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4 +#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4 #else -#define VSETVL(n) vsetvl_e64m4(n) -#define FLOAT_V_T vfloat64m4_t -#define VLSEG_FLOAT vlseg2e64_v_f64m4 -#define VLSSEG_FLOAT vlsseg2e64_v_f64m4 -#define VSSEG_FLOAT vsseg2e64_v_f64m4 -#define VSSSEG_FLOAT vssseg2e64_v_f64m4 +#define VSETVL(n) __riscv_vsetvl_e64m4(n) +#define FLOAT_V_T vfloat64m4_t +#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4 +#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4 +#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4 +#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4 #endif int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT dummy4, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) diff --git a/kernel/riscv64/zsymm_lcopy_rvv_v1.c b/kernel/riscv64/zsymm_lcopy_rvv_v1.c index df5c916a5..0f9e04869 100644 --- a/kernel/riscv64/zsymm_lcopy_rvv_v1.c +++ b/kernel/riscv64/zsymm_lcopy_rvv_v1.c @@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define INT_V_T vint32m2_t -#define VID_V_INT vid_v_i32m2 -#define VADD_VX_INT vadd_vx_i32m2 -#define VMSGT_VX_INT vmsgt_vx_i32m2_b16 -#define VBOOL_T vbool16_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define INT_V_T vint32m2_t +#define VID_V_INT __riscv_vid_v_i32m2 +#define VADD_VX_INT __riscv_vadd_vx_i32m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 +#define VBOOL_T vbool16_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define INT_V_T vint64m2_t -#define VID_V_INT vid_v_i64m2 -#define VADD_VX_INT vadd_vx_i64m2 -#define VMSGT_VX_INT vmsgt_vx_i64m2_b32 -#define VBOOL_T vbool32_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define INT_V_T vint64m2_t +#define VID_V_INT __riscv_vid_v_i64m2 +#define VADD_VX_INT __riscv_vadd_vx_i64m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 +#define VBOOL_T vbool32_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b) @@ -91,8 +91,8 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON vindex = VADD_VX_INT(vindex_max, offset, vl); vbool = VMSGT_VX_INT(vindex, 0, vl); - vb0 = VMERGE_VVM_FLOAT(vbool, va20, va10, vl); - vb1 = VMERGE_VVM_FLOAT(vbool, va21, va11, vl); + vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool, vl); + vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool, vl); VSSEG2_FLOAT(b, vb0, vb1, vl); b += vl * 2; diff --git a/kernel/riscv64/zsymm_ucopy_rvv_v1.c b/kernel/riscv64/zsymm_ucopy_rvv_v1.c index dcf2b081a..fdc693700 100644 --- a/kernel/riscv64/zsymm_ucopy_rvv_v1.c +++ b/kernel/riscv64/zsymm_ucopy_rvv_v1.c @@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define INT_V_T vint32m2_t -#define VID_V_INT vid_v_i32m2 -#define VADD_VX_INT vadd_vx_i32m2 -#define VMSGT_VX_INT vmsgt_vx_i32m2_b16 -#define VBOOL_T vbool16_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define INT_V_T vint32m2_t +#define VID_V_INT __riscv_vid_v_i32m2 +#define VADD_VX_INT __riscv_vadd_vx_i32m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16 +#define VBOOL_T vbool16_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define INT_V_T vint64m2_t -#define VID_V_INT vid_v_i64m2 -#define VADD_VX_INT vadd_vx_i64m2 -#define VMSGT_VX_INT vmsgt_vx_i64m2_b32 -#define VBOOL_T vbool32_t -#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define INT_V_T vint64m2_t +#define VID_V_INT __riscv_vid_v_i64m2 +#define VADD_VX_INT __riscv_vadd_vx_i64m2 +#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32 +#define VBOOL_T vbool32_t +#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2 #endif @@ -92,8 +92,8 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON vindex = VADD_VX_INT(vindex_max, offset, vl); vbool = VMSGT_VX_INT(vindex, 0, vl); - vb0 = VMERGE_VVM_FLOAT(vbool, va20, va10, vl); - vb1 = VMERGE_VVM_FLOAT(vbool, va21, va11, vl); + vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool, vl); + vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool, vl); VSSEG2_FLOAT(b, vb0, vb1, vl); b += vl * 2; diff --git a/kernel/riscv64/ztrmm_lncopy_rvv_v1.c b/kernel/riscv64/ztrmm_lncopy_rvv_v1.c index afd694408..7276618c5 100644 --- a/kernel/riscv64/ztrmm_lncopy_rvv_v1.c +++ b/kernel/riscv64/ztrmm_lncopy_rvv_v1.c @@ -30,35 +30,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vint32m2_t -#define VID_V_UINT vid_v_i32m2 -#define VMSGTU_VX_UINT vmsgt_vx_i32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_i32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vint32m2_t +#define VID_V_UINT __riscv_vid_v_i32m2 +#define VMSGTU_VX_UINT __riscv_vmsgt_vx_i32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_i32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ @@ -121,12 +121,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON { VLSSEG2_FLOAT(&va0, &va1, ao, stride_lda, vl); vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl); #endif VSSEG2_FLOAT(b, va0, va1, vl); ao += 2; diff --git a/kernel/riscv64/ztrmm_ltcopy_rvv_v1.c b/kernel/riscv64/ztrmm_ltcopy_rvv_v1.c index c7d593949..72e8f2ce2 100644 --- a/kernel/riscv64/ztrmm_ltcopy_rvv_v1.c +++ b/kernel/riscv64/ztrmm_ltcopy_rvv_v1.c @@ -30,33 +30,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ @@ -117,14 +117,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON //va1 = VLEV_FLOAT(ao, vl); VLSEG2_FLOAT(&va0, &va1, ao, vl); vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl); #endif - //VSEV_FLOAT(b, vb, vl); VSSEG2_FLOAT(b, va0, va1, vl); ao += lda * 2; b += vl * 2; diff --git a/kernel/riscv64/ztrmm_uncopy_rvv_v1.c b/kernel/riscv64/ztrmm_uncopy_rvv_v1.c index 3c70b6385..e6d36c86d 100644 --- a/kernel/riscv64/ztrmm_uncopy_rvv_v1.c +++ b/kernel/riscv64/ztrmm_uncopy_rvv_v1.c @@ -30,35 +30,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VLSEV_FLOAT vlse32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VLSEV_FLOAT __riscv_vlse32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VLSEV_FLOAT vlse64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VLSEV_FLOAT __riscv_vlse64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ @@ -120,12 +120,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON { VLSSEG2_FLOAT(&va0, &va1, ao, stride_lda, vl); vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl); #endif VSSEG2_FLOAT(b, va0, va1, vl); ao += 2; diff --git a/kernel/riscv64/ztrmm_utcopy_rvv_v1.c b/kernel/riscv64/ztrmm_utcopy_rvv_v1.c index 706782cf0..7085cfc37 100644 --- a/kernel/riscv64/ztrmm_utcopy_rvv_v1.c +++ b/kernel/riscv64/ztrmm_utcopy_rvv_v1.c @@ -32,33 +32,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16 -#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 -#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32 -#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 +#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32 +#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){ @@ -117,12 +117,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON { VLSEG2_FLOAT(&va0, &va1, ao, vl); vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl); #ifdef UNIT vbool_eq = VMSEQ_VX_UINT(vindex, j, vl); - va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl); - va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl); + va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl); + va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl); #endif VSSEG2_FLOAT(b, va0, va1, vl); ao += lda * 2; diff --git a/kernel/riscv64/ztrmmkernel_2x2_rvv.c b/kernel/riscv64/ztrmmkernel_2x2_rvv.c index 3486a4648..399124d2e 100644 --- a/kernel/riscv64/ztrmmkernel_2x2_rvv.c +++ b/kernel/riscv64/ztrmmkernel_2x2_rvv.c @@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define VSETVL_MAX vsetvlmax_e32m2() -#define VSETVL_MAX_M1 vsetvlmax_e32m1() -#define FLOAT_V_T vfloat32m2_t -#define FLOAT_V_T_M1 vfloat32m1_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VLSEG4_FLOAT vlseg4e32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VFMVVF_FLOAT vfmv_v_f_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFMACCVV_FLOAT vfmacc_vv_f32m2 -#define VFNMSACVV_FLOAT vfnmsac_vv_f32m2 -#define VFREDSUMVS_FLOAT vfredusum_vs_f32m2_f32m1 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e32m2() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1() +#define FLOAT_V_T vfloat32m2_t +#define FLOAT_V_T_M1 vfloat32m1_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VLSEG4_FLOAT __riscv_vlseg4e32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m2 +#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m2 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m2_f32m1 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define VSETVL_MAX vsetvlmax_e64m2() -#define VSETVL_MAX_M1 vsetvlmax_e64m1() -#define FLOAT_V_T vfloat64m2_t -#define FLOAT_V_T_M1 vfloat64m1_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VLSEG4_FLOAT vlseg4e64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFMACCVV_FLOAT vfmacc_vv_f64m2 -#define VFNMSACVV_FLOAT vfnmsac_vv_f64m2 -#define VFREDSUMVS_FLOAT vfredusum_vs_f64m2_f64m1 -#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1 -#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define VSETVL_MAX __riscv_vsetvlmax_e64m2() +#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1() +#define FLOAT_V_T vfloat64m2_t +#define FLOAT_V_T_M1 vfloat64m1_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VLSEG4_FLOAT __riscv_vlseg4e64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m2 +#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m2 +#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m2_f64m1 +#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1 +#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64 #endif // Optimizes the implementation in ../generic/ztrmmkernel_2x2.c diff --git a/kernel/riscv64/ztrmmkernel_rvv_v1x4.c b/kernel/riscv64/ztrmmkernel_rvv_v1x4.c index 27409ec25..92b4b855b 100644 --- a/kernel/riscv64/ztrmmkernel_rvv_v1x4.c +++ b/kernel/riscv64/ztrmmkernel_rvv_v1x4.c @@ -28,27 +28,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLEV_FLOAT vle32_v_f32m2 -#define VSEV_FLOAT vse32_v_f32m2 -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VFMVVF_FLOAT vfmv_v_f_f32m2 -#define VFMACCVF_FLOAT vfmacc_vf_f32m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2 -#define VFMULVF_FLOAT vfmul_vf_f32m2 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLEV_FLOAT __riscv_vle32_v_f32m2 +#define VSEV_FLOAT __riscv_vse32_v_f32m2 +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLEV_FLOAT vle64_v_f64m2 -#define VSEV_FLOAT vse64_v_f64m2 -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VFMVVF_FLOAT vfmv_v_f_f64m2 -#define VFMACCVF_FLOAT vfmacc_vf_f64m2 -#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2 -#define VFMULVF_FLOAT vfmul_vf_f64m2 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLEV_FLOAT __riscv_vle64_v_f64m2 +#define VSEV_FLOAT __riscv_vse64_v_f64m2 +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2 +#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2 +#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2 +#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2 #endif #if defined(NN) || defined(NT) || defined(TN) || defined(TT) diff --git a/kernel/riscv64/ztrsm_lncopy_rvv_v1.c b/kernel/riscv64/ztrsm_lncopy_rvv_v1.c index b7ccb1eb3..383cb883f 100644 --- a/kernel/riscv64/ztrsm_lncopy_rvv_v1.c +++ b/kernel/riscv64/ztrsm_lncopy_rvv_v1.c @@ -29,25 +29,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 #endif diff --git a/kernel/riscv64/ztrsm_ltcopy_rvv_v1.c b/kernel/riscv64/ztrsm_ltcopy_rvv_v1.c index 911b81de5..f57e9f1de 100644 --- a/kernel/riscv64/ztrsm_ltcopy_rvv_v1.c +++ b/kernel/riscv64/ztrsm_ltcopy_rvv_v1.c @@ -29,25 +29,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 #endif int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *b){ diff --git a/kernel/riscv64/ztrsm_uncopy_rvv_v1.c b/kernel/riscv64/ztrsm_uncopy_rvv_v1.c index db075c29b..be3613429 100644 --- a/kernel/riscv64/ztrsm_uncopy_rvv_v1.c +++ b/kernel/riscv64/ztrsm_uncopy_rvv_v1.c @@ -30,25 +30,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32 #endif diff --git a/kernel/riscv64/ztrsm_utcopy_rvv_v1.c b/kernel/riscv64/ztrsm_utcopy_rvv_v1.c index e121c6273..b1f5ef8f0 100644 --- a/kernel/riscv64/ztrsm_utcopy_rvv_v1.c +++ b/kernel/riscv64/ztrsm_utcopy_rvv_v1.c @@ -29,25 +29,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" #if !defined(DOUBLE) -#define VSETVL(n) vsetvl_e32m2(n) -#define FLOAT_V_T vfloat32m2_t -#define VLSEG2_FLOAT vlseg2e32_v_f32m2 -#define VSSEG2_FLOAT vsseg2e32_v_f32m2 -#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m -#define VBOOL_T vbool16_t -#define UINT_V_T vuint32m2_t -#define VID_V_UINT vid_v_u32m2 -#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16 +#define VSETVL(n) __riscv_vsetvl_e32m2(n) +#define FLOAT_V_T vfloat32m2_t +#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2 +#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m +#define VBOOL_T vbool16_t +#define UINT_V_T vuint32m2_t +#define VID_V_UINT __riscv_vid_v_u32m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16 #else -#define VSETVL(n) vsetvl_e64m2(n) -#define FLOAT_V_T vfloat64m2_t -#define VLSEG2_FLOAT vlseg2e64_v_f64m2 -#define VSSEG2_FLOAT vsseg2e64_v_f64m2 -#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m -#define VBOOL_T vbool32_t -#define UINT_V_T vuint64m2_t -#define VID_V_UINT vid_v_u64m2 -#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32 +#define VSETVL(n) __riscv_vsetvl_e64m2(n) +#define FLOAT_V_T vfloat64m2_t +#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2 +#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2 +#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m +#define VBOOL_T vbool32_t +#define UINT_V_T vuint64m2_t +#define VID_V_UINT __riscv_vid_v_u64m2 +#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32 #endif