From 6144004e92e7d61a803a03d2f28fa117f9c01bc9 Mon Sep 17 00:00:00 2001 From: xctan Date: Wed, 23 Jul 2025 16:49:24 +0800 Subject: [PATCH] Fix xtheadvector compilation --- common_macro.h | 2 +- common_riscv64.h | 6 -- kernel/riscv64/dsdot_vector.c | 126 +++++++++++++++++----------------- 3 files changed, 64 insertions(+), 70 deletions(-) diff --git a/common_macro.h b/common_macro.h index f9c22089b..477818ff6 100644 --- a/common_macro.h +++ b/common_macro.h @@ -2709,7 +2709,7 @@ #ifndef ASSEMBLER #if !defined(DYNAMIC_ARCH) \ && (defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64) \ - || defined(ARCH_LOONGARCH64) || defined(ARCH_E2K) || defined(ARCH_ALPHA)) + || defined(ARCH_LOONGARCH64) || defined(ARCH_E2K) || defined(ARCH_ALPHA) || defined(ARCH_RISCV64)) extern BLASLONG gemm_offset_a; extern BLASLONG gemm_offset_b; extern BLASLONG bgemm_p; diff --git a/common_riscv64.h b/common_riscv64.h index ba638e8be..404dab7f9 100644 --- a/common_riscv64.h +++ b/common_riscv64.h @@ -93,13 +93,7 @@ static inline int blas_quickdivide(blasint x, blasint y){ # include #endif -#if defined( __riscv_xtheadc ) && defined( __riscv_v ) && ( __riscv_v <= 7000 ) -// t-head toolchain uses obsolete rvv intrinsics, can't build for C910V without this -#define RISCV_0p10_INTRINSICS -#define RISCV_RVV(x) x -#else #define RISCV_RVV(x) __riscv_ ## x -#endif #if defined(C910V) || defined(RISCV64_ZVL256B) # if !defined(DOUBLE) diff --git a/kernel/riscv64/dsdot_vector.c b/kernel/riscv64/dsdot_vector.c index e972828b5..b6f0caebe 100644 --- a/kernel/riscv64/dsdot_vector.c +++ b/kernel/riscv64/dsdot_vector.c @@ -37,114 +37,114 @@ double CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) vfloat32m2_t vx, vy; unsigned int gvl = 0; vfloat64m1_t v_res, v_z0; - gvl = vsetvlmax_e64m1(); - v_res = vfmv_v_f_f64m1(0, gvl); - v_z0 = vfmv_v_f_f64m1(0, gvl); + gvl = __riscv_vsetvlmax_e64m1(); + v_res = __riscv_vfmv_v_f_f64m1(0, gvl); + v_z0 = __riscv_vfmv_v_f_f64m1(0, gvl); if(inc_x == 1 && inc_y == 1){ - gvl = vsetvl_e64m4(n); - vr = vfmv_v_f_f64m4(0, gvl); + gvl = __riscv_vsetvl_e64m4(n); + vr = __riscv_vfmv_v_f_f64m4(0, gvl); for(i=0,j=0; i 0){ - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } //tail if(j < n){ - gvl = vsetvl_e64m4(n-j); - vx = vle32_v_f32m2(&x[j], gvl); - vy = vle32_v_f32m2(&y[j], gvl); - vfloat64m4_t vz = vfmv_v_f_f64m4(0, gvl); - //vr = vfdot_vv_f32m2(vx, vy, gvl); - vr = vfwmacc_vv_f64m4(vz, vx, vy, gvl); - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + gvl = __riscv_vsetvl_e64m4(n-j); + vx = __riscv_vle32_v_f32m2(&x[j], gvl); + vy = __riscv_vle32_v_f32m2(&y[j], gvl); + vfloat64m4_t vz = __riscv_vfmv_v_f_f64m4(0, gvl); + //vr = __riscv_vfdot_vv_f32m2(vx, vy, gvl); + vr = __riscv_vfwmacc_vv_f64m4(vz, vx, vy, gvl); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } }else if(inc_y == 1){ - gvl = vsetvl_e64m4(n); - vr = vfmv_v_f_f64m4(0, gvl); + gvl = __riscv_vsetvl_e64m4(n); + vr = __riscv_vfmv_v_f_f64m4(0, gvl); int stride_x = inc_x * sizeof(FLOAT); for(i=0,j=0; i 0){ - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } //tail if(j < n){ - gvl = vsetvl_e64m4(n-j); - vx = vlse32_v_f32m2(&x[j*inc_x], stride_x, gvl); - vy = vle32_v_f32m2(&y[j], gvl); - vfloat64m4_t vz = vfmv_v_f_f64m4(0, gvl); - //vr = vfdot_vv_f32m2(vx, vy, gvl); - vr = vfwmacc_vv_f64m4(vz, vx, vy, gvl); - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + gvl = __riscv_vsetvl_e64m4(n-j); + vx = __riscv_vlse32_v_f32m2(&x[j*inc_x], stride_x, gvl); + vy = __riscv_vle32_v_f32m2(&y[j], gvl); + vfloat64m4_t vz = __riscv_vfmv_v_f_f64m4(0, gvl); + //vr = __riscv_vfdot_vv_f32m2(vx, vy, gvl); + vr = __riscv_vfwmacc_vv_f64m4(vz, vx, vy, gvl); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } }else if(inc_x == 1){ - gvl = vsetvl_e64m4(n); - vr = vfmv_v_f_f64m4(0, gvl); + gvl = __riscv_vsetvl_e64m4(n); + vr = __riscv_vfmv_v_f_f64m4(0, gvl); int stride_y = inc_y * sizeof(FLOAT); for(i=0,j=0; i 0){ - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } //tail if(j < n){ - gvl = vsetvl_e64m4(n-j); - vx = vle32_v_f32m2(&x[j], gvl); - vy = vlse32_v_f32m2(&y[j*inc_y], stride_y, gvl); - vfloat64m4_t vz = vfmv_v_f_f64m4(0, gvl); - //vr = vfdot_vv_f32m2(vx, vy, gvl); - vr = vfwmacc_vv_f64m4(vz, vx, vy, gvl); - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + gvl = __riscv_vsetvl_e64m4(n-j); + vx = __riscv_vle32_v_f32m2(&x[j], gvl); + vy = __riscv_vlse32_v_f32m2(&y[j*inc_y], stride_y, gvl); + vfloat64m4_t vz = __riscv_vfmv_v_f_f64m4(0, gvl); + //vr = __riscv_vfdot_vv_f32m2(vx, vy, gvl); + vr = __riscv_vfwmacc_vv_f64m4(vz, vx, vy, gvl); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } }else{ - gvl = vsetvl_e64m4(n); - vr = vfmv_v_f_f64m4(0, gvl); + gvl = __riscv_vsetvl_e64m4(n); + vr = __riscv_vfmv_v_f_f64m4(0, gvl); int stride_x = inc_x * sizeof(FLOAT); int stride_y = inc_y * sizeof(FLOAT); for(i=0,j=0; i 0){ - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } //tail if(j < n){ - gvl = vsetvl_e64m4(n-j); - vx = vlse32_v_f32m2(&x[j*inc_x], stride_x, gvl); - vy = vlse32_v_f32m2(&y[j*inc_y], stride_y, gvl); - vfloat64m4_t vz = vfmv_v_f_f64m4(0, gvl); - //vr = vfdot_vv_f32m2(vx, vy, gvl); - vr = vfwmacc_vv_f64m4(vz, vx, vy, gvl); - v_res = vfredusum_vs_f64m4_f64m1(v_res, vr, v_z0, gvl); - dot += (double)vfmv_f_s_f64m1_f64(v_res); + gvl = __riscv_vsetvl_e64m4(n-j); + vx = __riscv_vlse32_v_f32m2(&x[j*inc_x], stride_x, gvl); + vy = __riscv_vlse32_v_f32m2(&y[j*inc_y], stride_y, gvl); + vfloat64m4_t vz = __riscv_vfmv_v_f_f64m4(0, gvl); + //vr = __riscv_vfdot_vv_f32m2(vx, vy, gvl); + vr = __riscv_vfwmacc_vv_f64m4(vz, vx, vy, gvl); + v_res = __riscv_vfredusum_vs_f64m4_f64m1(vr, v_z0, gvl); + dot += (double)__riscv_vfmv_f_s_f64m1_f64(v_res); } }