Browse Source

This PR adapts latest spec changes

Add prefix (_riscv) for all riscv intrinsics
Update some intrinsics' parameter, like vfredxxxx, vmerge
tags/v0.3.27
Heller Zheng 2 years ago
parent
commit
1374a2d08b
79 changed files with 2013 additions and 2031 deletions
  1. +27
    -27
      kernel/riscv64/amax_rvv.c
  2. +27
    -27
      kernel/riscv64/amin_rvv.c
  3. +27
    -27
      kernel/riscv64/asum_rvv.c
  4. +18
    -18
      kernel/riscv64/axpby_rvv.c
  5. +14
    -14
      kernel/riscv64/axpy_rvv.c
  6. +12
    -12
      kernel/riscv64/copy_rvv.c
  7. +33
    -33
      kernel/riscv64/dot_rvv.c
  8. +12
    -12
      kernel/riscv64/gemm_beta_rvv.c
  9. +14
    -14
      kernel/riscv64/gemm_ncopy_8_rvv.c
  10. +10
    -10
      kernel/riscv64/gemm_ncopy_rvv_v1.c
  11. +22
    -22
      kernel/riscv64/gemm_tcopy_8_rvv.c
  12. +8
    -8
      kernel/riscv64/gemm_tcopy_rvv_v1.c
  13. +12
    -12
      kernel/riscv64/gemmkernel_rvv_v1x8.c
  14. +14
    -14
      kernel/riscv64/gemv_n_rvv.c
  15. +26
    -27
      kernel/riscv64/gemv_t_rvv.c
  16. +57
    -58
      kernel/riscv64/iamax_rvv.c
  17. +58
    -59
      kernel/riscv64/iamin_rvv.c
  18. +56
    -57
      kernel/riscv64/imax_rvv.c
  19. +56
    -57
      kernel/riscv64/imin_rvv.c
  20. +63
    -64
      kernel/riscv64/izamax_rvv.c
  21. +60
    -61
      kernel/riscv64/izamin_rvv.c
  22. +25
    -25
      kernel/riscv64/max_rvv.c
  23. +25
    -25
      kernel/riscv64/min_rvv.c
  24. +23
    -23
      kernel/riscv64/nrm2_rvv.c
  25. +18
    -18
      kernel/riscv64/rot_rvv.c
  26. +16
    -16
      kernel/riscv64/scal_rvv.c
  27. +25
    -25
      kernel/riscv64/sum_rvv.c
  28. +12
    -16
      kernel/riscv64/swap_rvv.c
  29. +25
    -25
      kernel/riscv64/symm_lcopy_rvv_v1.c
  30. +25
    -25
      kernel/riscv64/symm_ucopy_rvv_v1.c
  31. +40
    -41
      kernel/riscv64/symv_L_rvv.c
  32. +40
    -41
      kernel/riscv64/symv_U_rvv.c
  33. +24
    -24
      kernel/riscv64/trmm_lncopy_rvv_v1.c
  34. +22
    -22
      kernel/riscv64/trmm_ltcopy_rvv_v1.c
  35. +24
    -24
      kernel/riscv64/trmm_uncopy_rvv_v1.c
  36. +22
    -22
      kernel/riscv64/trmm_utcopy_rvv_v1.c
  37. +14
    -14
      kernel/riscv64/trmmkernel_rvv_v1x8.c
  38. +27
    -27
      kernel/riscv64/trsm_kernel_LN_rvv_v1.c
  39. +27
    -27
      kernel/riscv64/trsm_kernel_LT_rvv_v1.c
  40. +27
    -27
      kernel/riscv64/trsm_kernel_RN_rvv_v1.c
  41. +21
    -21
      kernel/riscv64/trsm_kernel_RT_rvv_v1.c
  42. +20
    -20
      kernel/riscv64/trsm_lncopy_rvv_v1.c
  43. +20
    -20
      kernel/riscv64/trsm_ltcopy_rvv_v1.c
  44. +20
    -20
      kernel/riscv64/trsm_uncopy_rvv_v1.c
  45. +20
    -20
      kernel/riscv64/trsm_utcopy_rvv_v1.c
  46. +29
    -29
      kernel/riscv64/zamax_rvv.c
  47. +29
    -29
      kernel/riscv64/zamin_rvv.c
  48. +25
    -26
      kernel/riscv64/zasum_rvv.c
  49. +26
    -26
      kernel/riscv64/zaxpby_rvv.c
  50. +16
    -16
      kernel/riscv64/zaxpy_rvv.c
  51. +22
    -22
      kernel/riscv64/zcopy_rvv.c
  52. +32
    -33
      kernel/riscv64/zdot_rvv.c
  53. +16
    -16
      kernel/riscv64/zgemm_beta_rvv.c
  54. +12
    -12
      kernel/riscv64/zgemm_ncopy_4_rvv.c
  55. +8
    -8
      kernel/riscv64/zgemm_ncopy_rvv_v1.c
  56. +20
    -20
      kernel/riscv64/zgemm_tcopy_4_rvv.c
  57. +8
    -8
      kernel/riscv64/zgemm_tcopy_rvv_v1.c
  58. +18
    -18
      kernel/riscv64/zgemmkernel_rvv_v1x4.c
  59. +24
    -24
      kernel/riscv64/zgemv_n_rvv.c
  60. +30
    -31
      kernel/riscv64/zgemv_t_rvv.c
  61. +42
    -42
      kernel/riscv64/zhemm_ltcopy_rvv_v1.c
  62. +42
    -42
      kernel/riscv64/zhemm_utcopy_rvv_v1.c
  63. +33
    -33
      kernel/riscv64/znrm2_rvv.c
  64. +26
    -26
      kernel/riscv64/zrot_rvv.c
  65. +22
    -22
      kernel/riscv64/zscal_rvv.c
  66. +23
    -24
      kernel/riscv64/zsum_rvv.c
  67. +12
    -12
      kernel/riscv64/zswap_rvv.c
  68. +32
    -32
      kernel/riscv64/zsymm_lcopy_rvv_v1.c
  69. +32
    -32
      kernel/riscv64/zsymm_ucopy_rvv_v1.c
  70. +32
    -32
      kernel/riscv64/ztrmm_lncopy_rvv_v1.c
  71. +30
    -31
      kernel/riscv64/ztrmm_ltcopy_rvv_v1.c
  72. +32
    -32
      kernel/riscv64/ztrmm_uncopy_rvv_v1.c
  73. +30
    -30
      kernel/riscv64/ztrmm_utcopy_rvv_v1.c
  74. +30
    -30
      kernel/riscv64/ztrmmkernel_2x2_rvv.c
  75. +20
    -20
      kernel/riscv64/ztrmmkernel_rvv_v1x4.c
  76. +18
    -18
      kernel/riscv64/ztrsm_lncopy_rvv_v1.c
  77. +18
    -18
      kernel/riscv64/ztrsm_ltcopy_rvv_v1.c
  78. +18
    -18
      kernel/riscv64/ztrsm_uncopy_rvv_v1.c
  79. +18
    -18
      kernel/riscv64/ztrsm_utcopy_rvv_v1.c

+ 27
- 27
kernel/riscv64/amax_rvv.c View File

@@ -29,33 +29,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMAXVV_FLOAT vfmax_vv_f32m8
#define VFABSV_FLOAT vfabs_v_f32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8
#define VFABSV_FLOAT __riscv_vfabs_v_f32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMAXVV_FLOAT vfmax_vv_f64m8
#define VFABSV_FLOAT vfabs_v_f64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8
#define VFABSV_FLOAT __riscv_vfabs_v_f64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -95,7 +95,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDMAXVS_FLOAT(v_res, vmax, v_res, vlmax);
v_res = VFREDMAXVS_FLOAT(vmax, v_res, vlmax);
maxf = VFMVFS_FLOAT_M1(v_res);

return(maxf);


+ 27
- 27
kernel/riscv64/amin_rvv.c View File

@@ -29,33 +29,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMINVV_FLOAT vfmin_vv_f32m8
#define VFABSV_FLOAT vfabs_v_f32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8
#define VFABSV_FLOAT __riscv_vfabs_v_f32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMINVV_FLOAT vfmin_vv_f64m8
#define VFABSV_FLOAT vfabs_v_f64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8
#define VFABSV_FLOAT __riscv_vfabs_v_f64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -95,7 +95,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDMINVS_FLOAT(v_res, vmin, v_res, vlmax);
v_res = VFREDMINVS_FLOAT(vmin, v_res, vlmax);
minf = VFMVFS_FLOAT_M1(v_res);

return(minf);


+ 27
- 27
kernel/riscv64/asum_rvv.c View File

@@ -28,33 +28,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFADDVV_FLOAT vfadd_vv_f32m8
#define VFABSV_FLOAT vfabs_v_f32m8
#define VFREDSUMVS_FLOAT vfredusum_vs_f32m8_f32m1
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m8
#define VFABSV_FLOAT __riscv_vfabs_v_f32m8
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m8_f32m1
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFADDVV_FLOAT vfadd_vv_f64m8
#define VFABSV_FLOAT vfabs_v_f64m8
#define VFREDSUMVS_FLOAT vfredusum_vs_f64m8_f64m1
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m8
#define VFABSV_FLOAT __riscv_vfabs_v_f64m8
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m8_f64m1
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -93,7 +93,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDSUMVS_FLOAT(v_res, vsum, v_res, vlmax);
v_res = VFREDSUMVS_FLOAT(vsum, v_res, vlmax);
asumf = VFMVFS_FLOAT_M1(v_res);
return(asumf);
}

+ 18
- 18
kernel/riscv64/axpby_rvv.c View File

@@ -28,25 +28,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMACCVF_FLOAT vfmacc_vf_f32m8
#define VFMULVF_FLOAT vfmul_vf_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMACCVF_FLOAT vfmacc_vf_f64m8
#define VFMULVF_FLOAT vfmul_vf_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#endif

int CNAME(BLASLONG n, FLOAT alpha, FLOAT *x, BLASLONG inc_x, FLOAT beta, FLOAT *y, BLASLONG inc_y)


+ 14
- 14
kernel/riscv64/axpy_rvv.c View File

@@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMACCVF_FLOAT vfmacc_vf_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMACCVF_FLOAT vfmacc_vf_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8
#endif

int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2)


+ 12
- 12
kernel/riscv64/copy_rvv.c View File

@@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#endif

int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y)


+ 33
- 33
kernel/riscv64/dot_rvv.c View File

@@ -37,24 +37,24 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y)

if ( n <= 0 ) return(dot);

size_t vlmax = vsetvlmax_e64m8();
vfloat64m8_t vr = vfmv_v_f_f64m8(0, vlmax);
size_t vlmax = __riscv_vsetvlmax_e64m8();
vfloat64m8_t vr = __riscv_vfmv_v_f_f64m8(0, vlmax);

if(inc_x == 1 && inc_y == 1) {

for (size_t vl; n > 0; n -= vl, x += vl, y += vl) {
vl = vsetvl_e64m8(n);
vl = __riscv_vsetvl_e64m8(n);

#if !defined(DOUBLE)
vfloat32m4_t vx = vle32_v_f32m4(x, vl);
vfloat32m4_t vy = vle32_v_f32m4(y, vl);
vfloat32m4_t vx = __riscv_vle32_v_f32m4(x, vl);
vfloat32m4_t vy = __riscv_vle32_v_f32m4(y, vl);

vr = vfwmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl);
#else
vfloat64m8_t vx = vle64_v_f64m8(x, vl);
vfloat64m8_t vy = vle64_v_f64m8(y, vl);
vfloat64m8_t vx = __riscv_vle64_v_f64m8(x, vl);
vfloat64m8_t vy = __riscv_vle64_v_f64m8(y, vl);

vr = vfmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl);
#endif
}

@@ -63,18 +63,18 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y)
BLASLONG stride_y = inc_y * sizeof(FLOAT);

for (size_t vl; n > 0; n -= vl, x += vl, y += vl*inc_y) {
vl = vsetvl_e64m8(n);
vl = __riscv_vsetvl_e64m8(n);

#if !defined(DOUBLE)
vfloat32m4_t vx = vle32_v_f32m4(x, vl);
vfloat32m4_t vy = vlse32_v_f32m4(y, stride_y, vl);
vfloat32m4_t vx = __riscv_vle32_v_f32m4(x, vl);
vfloat32m4_t vy = __riscv_vlse32_v_f32m4(y, stride_y, vl);

vr = vfwmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl);
#else
vfloat64m8_t vx = vle64_v_f64m8(x, vl);
vfloat64m8_t vy = vlse64_v_f64m8(y, stride_y, vl);
vfloat64m8_t vx = __riscv_vle64_v_f64m8(x, vl);
vfloat64m8_t vy = __riscv_vlse64_v_f64m8(y, stride_y, vl);

vr = vfmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl);
#endif
}
} else if (1 == inc_y) {
@@ -82,18 +82,18 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y)
BLASLONG stride_x = inc_x * sizeof(FLOAT);

for (size_t vl; n > 0; n -= vl, x += vl*inc_x, y += vl) {
vl = vsetvl_e64m8(n);
vl = __riscv_vsetvl_e64m8(n);

#if !defined(DOUBLE)
vfloat32m4_t vx = vlse32_v_f32m4(x, stride_x, vl);
vfloat32m4_t vy = vle32_v_f32m4(y, vl);
vfloat32m4_t vx = __riscv_vlse32_v_f32m4(x, stride_x, vl);
vfloat32m4_t vy = __riscv_vle32_v_f32m4(y, vl);

vr = vfwmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl);
#else
vfloat64m8_t vx = vlse64_v_f64m8(x, stride_x, vl);
vfloat64m8_t vy = vle64_v_f64m8(y, vl);
vfloat64m8_t vx = __riscv_vlse64_v_f64m8(x, stride_x, vl);
vfloat64m8_t vy = __riscv_vle64_v_f64m8(y, vl);

vr = vfmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl);
#endif
}
} else {
@@ -102,25 +102,25 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y)
BLASLONG stride_y = inc_y * sizeof(FLOAT);

for (size_t vl; n > 0; n -= vl, x += vl*inc_x, y += vl*inc_y) {
vl = vsetvl_e64m8(n);
vl = __riscv_vsetvl_e64m8(n);

#if !defined(DOUBLE)
vfloat32m4_t vx = vlse32_v_f32m4(x, stride_x, vl);
vfloat32m4_t vy = vlse32_v_f32m4(y, stride_y, vl);
vfloat32m4_t vx = __riscv_vlse32_v_f32m4(x, stride_x, vl);
vfloat32m4_t vy = __riscv_vlse32_v_f32m4(y, stride_y, vl);

vr = vfwmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfwmacc_vv_f64m8(vr, vx, vy, vl);
#else
vfloat64m8_t vx = vlse64_v_f64m8(x, stride_x, vl);
vfloat64m8_t vy = vlse64_v_f64m8(y, stride_y, vl);
vfloat64m8_t vx = __riscv_vlse64_v_f64m8(x, stride_x, vl);
vfloat64m8_t vy = __riscv_vlse64_v_f64m8(y, stride_y, vl);

vr = vfmacc_vv_f64m8(vr, vx, vy, vl);
vr = __riscv_vfmacc_vv_f64m8(vr, vx, vy, vl);
#endif
}
}

vfloat64m1_t vec_zero = vfmv_v_f_f64m1(0, vlmax);
vfloat64m1_t vec_sum = vfredusum_vs_f64m8_f64m1(vec_zero, vr, vec_zero, vlmax);
dot = vfmv_f_s_f64m1_f64(vec_sum);
vfloat64m1_t vec_zero = __riscv_vfmv_v_f_f64m1(0, vlmax);
vfloat64m1_t vec_sum = __riscv_vfredusum_vs_f64m8_f64m1(vr, vec_zero, vlmax);
dot = __riscv_vfmv_f_s_f64m1_f64(vec_sum);

return(dot);
}

+ 12
- 12
kernel/riscv64/gemm_beta_rvv.c View File

@@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMULVF_FLOAT vfmul_vf_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMULVF_FLOAT vfmul_vf_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#endif

// Optimizes the implementation in ../generic/gemm_beta.c


+ 14
- 14
kernel/riscv64/gemm_ncopy_8_rvv.c View File

@@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m1
#define VSEV_FLOAT vse32_v_f32m1
#define VSSEG2_FLOAT vsseg2e32_v_f32m1
#define VSSEG4_FLOAT vsseg4e32_v_f32m1
#define VSSEG8_FLOAT vsseg8e32_v_f32m1
#define VSETVL(n) __riscv_vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m1
#define VSEV_FLOAT __riscv_vse32_v_f32m1
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1
#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1
#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1
#else
#define VSETVL(n) vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m1
#define VSEV_FLOAT vse64_v_f64m1
#define VSSEG2_FLOAT vsseg2e64_v_f64m1
#define VSSEG4_FLOAT vsseg4e64_v_f64m1
#define VSSEG8_FLOAT vsseg8e64_v_f64m1
#define VSETVL(n) __riscv_vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m1
#define VSEV_FLOAT __riscv_vse64_v_f64m1
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1
#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1
#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1
#endif

// Optimizes the implementation in ../generic/gemm_ncopy_8.c


+ 10
- 10
kernel/riscv64/gemm_ncopy_rvv_v1.c View File

@@ -28,17 +28,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b)


+ 22
- 22
kernel/riscv64/gemm_tcopy_8_rvv.c View File

@@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m1
#define VLSEV_FLOAT vlse32_v_f32m1
#define VSEV_FLOAT vse32_v_f32m1
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m1
#define VSSEG2_FLOAT vsseg2e32_v_f32m1
#define VLSSEG4_FLOAT vlsseg4e32_v_f32m1
#define VSSEG4_FLOAT vsseg4e32_v_f32m1
#define VLSSEG8_FLOAT vlsseg8e32_v_f32m1
#define VSSEG8_FLOAT vsseg8e32_v_f32m1
#define VSETVL(n) __riscv_vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m1
#define VLSEV_FLOAT __riscv_vlse32_v_f32m1
#define VSEV_FLOAT __riscv_vse32_v_f32m1
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m1
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1
#define VLSSEG4_FLOAT __riscv_vlsseg4e32_v_f32m1
#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1
#define VLSSEG8_FLOAT __riscv_vlsseg8e32_v_f32m1
#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1
#else
#define VSETVL(n) vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m1
#define VLSEV_FLOAT vlse64_v_f64m1
#define VSEV_FLOAT vse64_v_f64m1
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m1
#define VSSEG2_FLOAT vsseg2e64_v_f64m1
#define VLSSEG4_FLOAT vlsseg4e64_v_f64m1
#define VSSEG4_FLOAT vsseg4e64_v_f64m1
#define VLSSEG8_FLOAT vlsseg8e64_v_f64m1
#define VSSEG8_FLOAT vsseg8e64_v_f64m1
#define VSETVL(n) __riscv_vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m1
#define VLSEV_FLOAT __riscv_vlse64_v_f64m1
#define VSEV_FLOAT __riscv_vse64_v_f64m1
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m1
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1
#define VLSSEG4_FLOAT __riscv_vlsseg4e64_v_f64m1
#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1
#define VLSSEG8_FLOAT __riscv_vlsseg8e64_v_f64m1
#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1
#endif

int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b)


+ 8
- 8
kernel/riscv64/gemm_tcopy_rvv_v1.c View File

@@ -28,15 +28,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b)


+ 12
- 12
kernel/riscv64/gemmkernel_rvv_v1x8.c View File

@@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VFMVVF_FLOAT vfmv_v_f_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#endif

int CNAME(BLASLONG bm, BLASLONG bn, BLASLONG bk, FLOAT alpha, IFLOAT* ba, IFLOAT* bb, FLOAT* C, BLASLONG ldc


+ 14
- 14
kernel/riscv64/gemv_n_rvv.c View File

@@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMACCVF_FLOAT vfmacc_vf_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMACCVF_FLOAT vfmacc_vf_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8
#endif

int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)


+ 26
- 27
kernel/riscv64/gemv_t_rvv.c View File

@@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1
#define VFMACCVV_FLOAT vfmacc_vv_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1
#define VFMACCVV_FLOAT vfmacc_vv_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
@@ -63,7 +63,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
FLOAT_V_T va, vx, vr;
FLOAT_V_T_M1 v_res, v_z0;
size_t vlmax = VSETVL_MAX_M1;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_z0 = VFMVVF_FLOAT_M1(0, vlmax);
vlmax = VSETVL_MAX;

@@ -83,7 +82,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
vr = VFMACCVV_FLOAT(vr, va, vx, vl);
}

v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);
*y += alpha * VFMVFS_FLOAT_M1(v_res);
y += inc_y;
a += lda;
@@ -107,7 +106,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
vr = VFMACCVV_FLOAT(vr, va, vx, vl);
}

v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);
*y += alpha * VFMVFS_FLOAT_M1(v_res);
y += inc_y;
a += lda;


+ 57
- 58
kernel/riscv64/iamax_rvv.c View File

@@ -28,57 +28,57 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if defined(DOUBLE)
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8
#define VMFGEVF_FLOAT vmfge_vf_f64m8_b8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFABSV_FLOAT vfabs_v_f64m8
#define VFMAXVV_FLOAT vfmax_vv_f64m8
#define VFIRSTM vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT vid_v_u64m8_m
#define VIDV_UINT vid_v_u64m8
#define VADDVX_MASK_UINT vadd_vx_u64m8_m
#define VADDVX_UINT vadd_vx_u64m8
#define VMVVX_UINT vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT vslidedown_vx_u64m8
#define VMVVXS_UINT vmv_x_s_u64m8_u64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8
#define VMFGEVF_FLOAT __riscv_vmfge_vf_f64m8_b8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFABSV_FLOAT __riscv_vfabs_v_f64m8
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8
#define VFIRSTM __riscv_vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m
#define VIDV_UINT __riscv_vid_v_u64m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m
#define VADDVX_UINT __riscv_vadd_vx_u64m8
#define VMVVX_UINT __riscv_vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8
#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64
#else
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4
#define VMFGEVF_FLOAT vmfge_vf_f32m8_b4
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFABSV_FLOAT vfabs_v_f32m8
#define VFMAXVV_FLOAT vfmax_vv_f32m8
#define VFIRSTM vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT vid_v_u32m8_m
#define VIDV_UINT vid_v_u32m8
#define VADDVX_MASK_UINT vadd_vx_u32m8_m
#define VADDVX_UINT vadd_vx_u32m8
#define VMVVX_UINT vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT vslidedown_vx_u32m8
#define VMVVXS_UINT vmv_x_s_u32m8_u32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4
#define VMFGEVF_FLOAT __riscv_vmfge_vf_f32m8_b4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFABSV_FLOAT __riscv_vfabs_v_f32m8
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8
#define VFIRSTM __riscv_vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m
#define VIDV_UINT __riscv_vid_v_u32m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m
#define VADDVX_UINT __riscv_vadd_vx_u32m8
#define VMVVX_UINT __riscv_vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8
#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32
#endif

BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -106,8 +106,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

//index where element greater than v_max
mask = VMFLTVV_FLOAT(v_max, vx, vl);
v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl);
v_max_index = VIDV_MASK_UINT(mask, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl);

//update v_max
v_max = VFMAXVV_FLOAT(v_max, vx, vl);
@@ -125,8 +125,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

//index where element greater than v_max
mask = VMFLTVV_FLOAT(v_max, vx, vl);
v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl);
v_max_index = VIDV_MASK_UINT(mask, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl);

//update v_max
v_max = VFMAXVV_FLOAT(v_max, vx, vl);
@@ -134,16 +134,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
}

FLOAT_V_T_M1 v_res, v_z0;
FLOAT_V_T_M1 v_res;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_z0 = VFMVVF_FLOAT_M1(0, vlmax);

v_res = VFREDMAXVS_FLOAT(v_res, v_max, v_z0, vlmax);
v_res = VFREDMAXVS_FLOAT(v_max, v_res, vlmax);
maxf = VFMVFS_FLOAT_M1(v_res);
mask = VMFGEVF_FLOAT(v_max, maxf, vlmax);
max_index = VFIRSTM(mask, vlmax);
v_max_index = VSLIDEDOWN_UINT(v_max_index, v_max_index, max_index, vlmax);
v_max_index = VSLIDEDOWN_UINT(v_max_index, max_index, vlmax);
max_index = VMVVXS_UINT(v_max_index);

return(max_index+1);


+ 58
- 59
kernel/riscv64/iamin_rvv.c View File

@@ -29,57 +29,57 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if defined(DOUBLE)
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8
#define VMFLEVF_FLOAT vmfle_vf_f64m8_b8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFABSV_FLOAT vfabs_v_f64m8
#define VFMINVV_FLOAT vfmin_vv_f64m8
#define VFIRSTM vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT vid_v_u64m8_m
#define VIDV_UINT vid_v_u64m8
#define VADDVX_MASK_UINT vadd_vx_u64m8_m
#define VADDVX_UINT vadd_vx_u64m8
#define VMVVX_UINT vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT vslidedown_vx_u64m8
#define VMVVXS_UINT vmv_x_s_u64m8_u64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8
#define VMFLEVF_FLOAT __riscv_vmfle_vf_f64m8_b8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFABSV_FLOAT __riscv_vfabs_v_f64m8
#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8
#define VFIRSTM __riscv_vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m
#define VIDV_UINT __riscv_vid_v_u64m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m
#define VADDVX_UINT __riscv_vadd_vx_u64m8
#define VMVVX_UINT __riscv_vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8
#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64
#else
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4
#define VMFLEVF_FLOAT vmfle_vf_f32m8_b4
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFABSV_FLOAT vfabs_v_f32m8
#define VFMINVV_FLOAT vfmin_vv_f32m8
#define VFIRSTM vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT vid_v_u32m8_m
#define VIDV_UINT vid_v_u32m8
#define VADDVX_MASK_UINT vadd_vx_u32m8_m
#define VADDVX_UINT vadd_vx_u32m8
#define VMVVX_UINT vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT vslidedown_vx_u32m8
#define VMVVXS_UINT vmv_x_s_u32m8_u32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4
#define VMFLEVF_FLOAT __riscv_vmfle_vf_f32m8_b4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFABSV_FLOAT __riscv_vfabs_v_f32m8
#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8
#define VFIRSTM __riscv_vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m
#define VIDV_UINT __riscv_vid_v_u32m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m
#define VADDVX_UINT __riscv_vadd_vx_u32m8
#define VMVVX_UINT __riscv_vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8
#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32
#endif

BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -107,8 +107,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

// index where element less than v_min
mask = VMFLTVV_FLOAT(vx, v_min, vl);
v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl);
v_min_index = VIDV_MASK_UINT(mask, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl);

//update v_min and start_index j
v_min = VFMINVV_FLOAT(v_min, vx, vl);
@@ -126,8 +126,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

// index where element less than v_min
mask = VMFLTVV_FLOAT(vx, v_min, vl);
v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl);
v_min_index = VIDV_MASK_UINT(mask, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl);

//update v_min and start_index j
v_min = VFMINVV_FLOAT(v_min, vx, vl);
@@ -135,16 +135,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
}

FLOAT_V_T_M1 v_res, v_max;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_max = VFMVVF_FLOAT_M1(FLT_MAX, vlmax);
FLOAT_V_T_M1 v_res;
v_res = VFMVVF_FLOAT_M1(FLT_MAX, vlmax);

v_res = VFREDMINVS_FLOAT(v_res, v_min, v_max, vlmax);
v_res = VFREDMINVS_FLOAT(v_min, v_res, vlmax);
minf = VFMVFS_FLOAT_M1(v_res);
mask = VMFLEVF_FLOAT(v_min, minf, vlmax);
min_index = VFIRSTM(mask, vlmax);

v_min_index = VSLIDEDOWN_UINT(v_min_index, v_min_index, min_index, vlmax);
v_min_index = VSLIDEDOWN_UINT(v_min_index, min_index, vlmax);
min_index = VMVVXS_UINT(v_min_index);

return(min_index+1);


+ 56
- 57
kernel/riscv64/imax_rvv.c View File

@@ -29,55 +29,55 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if defined(DOUBLE)
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8
#define VMFGEVF_FLOAT vmfge_vf_f64m8_b8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMAXVV_FLOAT vfmax_vv_f64m8
#define VFIRSTM vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT vid_v_u64m8_m
#define VIDV_UINT vid_v_u64m8
#define VADDVX_MASK_UINT vadd_vx_u64m8_m
#define VADDVX_UINT vadd_vx_u64m8
#define VMVVX_UINT vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT vslidedown_vx_u64m8
#define VMVVXS_UINT vmv_x_s_u64m8_u64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8
#define VMFGEVF_FLOAT __riscv_vmfge_vf_f64m8_b8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8
#define VFIRSTM __riscv_vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m
#define VIDV_UINT __riscv_vid_v_u64m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m
#define VADDVX_UINT __riscv_vadd_vx_u64m8
#define VMVVX_UINT __riscv_vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8
#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64
#else
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4
#define VMFGEVF_FLOAT vmfge_vf_f32m8_b4
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMAXVV_FLOAT vfmax_vv_f32m8
#define VFIRSTM vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT vid_v_u32m8_m
#define VIDV_UINT vid_v_u32m8
#define VADDVX_MASK_UINT vadd_vx_u32m8_m
#define VADDVX_UINT vadd_vx_u32m8
#define VMVVX_UINT vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT vslidedown_vx_u32m8
#define VMVVXS_UINT vmv_x_s_u32m8_u32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4
#define VMFGEVF_FLOAT __riscv_vmfge_vf_f32m8_b4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8
#define VFIRSTM __riscv_vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m
#define VIDV_UINT __riscv_vid_v_u32m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m
#define VADDVX_UINT __riscv_vadd_vx_u32m8
#define VMVVX_UINT __riscv_vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8
#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32
#endif

BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -104,8 +104,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

//index where element greater than v_max
mask = VMFLTVV_FLOAT(v_max, vx, vl);
v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl);
v_max_index = VIDV_MASK_UINT(mask, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl);

//update v_max and start_index j
v_max = VFMAXVV_FLOAT(v_max, vx, vl);
@@ -122,8 +122,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

//index where element greater than v_max
mask = VMFLTVV_FLOAT(v_max, vx, vl);
v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl);
v_max_index = VIDV_MASK_UINT(mask, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl);

//update v_max and start_index j
v_max = VFMAXVV_FLOAT(v_max, vx, vl);
@@ -131,16 +131,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
}

FLOAT_V_T_M1 v_res, v_min;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_min = VFMVVF_FLOAT_M1(-FLT_MAX, vlmax);
FLOAT_V_T_M1 v_res;
v_res = VFMVVF_FLOAT_M1(-FLT_MAX, vlmax);

v_res = VFREDMAXVS_FLOAT(v_res, v_max, v_min, vlmax);
v_res = VFREDMAXVS_FLOAT(v_max, v_res, vlmax);
maxf = VFMVFS_FLOAT_M1(v_res);
mask = VMFGEVF_FLOAT(v_max, maxf, vlmax);
max_index = VFIRSTM(mask, vlmax);
v_max_index = VSLIDEDOWN_UINT(v_max_index, v_max_index, max_index, vlmax);
v_max_index = VSLIDEDOWN_UINT(v_max_index, max_index, vlmax);
max_index = VMVVXS_UINT(v_max_index);

return(max_index+1);


+ 56
- 57
kernel/riscv64/imin_rvv.c View File

@@ -29,55 +29,55 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if defined(DOUBLE)
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT vmflt_vv_f64m8_b8
#define VMFLEVF_FLOAT vmfle_vf_f64m8_b8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMINVV_FLOAT vfmin_vv_f64m8
#define VFIRSTM vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT vid_v_u64m8_m
#define VIDV_UINT vid_v_u64m8
#define VADDVX_MASK_UINT vadd_vx_u64m8_m
#define VADDVX_UINT vadd_vx_u64m8
#define VMVVX_UINT vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT vslidedown_vx_u64m8
#define VMVVXS_UINT vmv_x_s_u64m8_u64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m8_b8
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m8_b8
#define VMFLEVF_FLOAT __riscv_vmfle_vf_f64m8_b8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8
#define VFIRSTM __riscv_vfirst_m_b8
#define UINT_V_T vuint64m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u64m8_m
#define VIDV_UINT __riscv_vid_v_u64m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m8_m
#define VADDVX_UINT __riscv_vadd_vx_u64m8
#define VMVVX_UINT __riscv_vmv_v_x_u64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m8
#define VMVVXS_UINT __riscv_vmv_x_s_u64m8_u64
#else
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT vmflt_vv_f32m8_b4
#define VMFLEVF_FLOAT vmfle_vf_f32m8_b4
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMINVV_FLOAT vfmin_vv_f32m8
#define VFIRSTM vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT vid_v_u32m8_m
#define VIDV_UINT vid_v_u32m8
#define VADDVX_MASK_UINT vadd_vx_u32m8_m
#define VADDVX_UINT vadd_vx_u32m8
#define VMVVX_UINT vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT vslidedown_vx_u32m8
#define VMVVXS_UINT vmv_x_s_u32m8_u32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1
#define MASK_T vbool4_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m8_b4
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m8_b4
#define VMFLEVF_FLOAT __riscv_vmfle_vf_f32m8_b4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8
#define VFIRSTM __riscv_vfirst_m_b4
#define UINT_V_T vuint32m8_t
#define VIDV_MASK_UINT __riscv_vid_v_u32m8_m
#define VIDV_UINT __riscv_vid_v_u32m8
#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m8_m
#define VADDVX_UINT __riscv_vadd_vx_u32m8
#define VMVVX_UINT __riscv_vmv_v_x_u32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m8
#define VMVVXS_UINT __riscv_vmv_x_s_u32m8_u32
#endif

BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -104,8 +104,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

// index where element less than v_min
mask = VMFLTVV_FLOAT(vx, v_min, vl);
v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl);
v_min_index = VIDV_MASK_UINT(mask, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl);

//update v_min and start_index j
v_min = VFMINVV_FLOAT(v_min, vx, vl);
@@ -122,8 +122,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

// index where element less than v_min
mask = VMFLTVV_FLOAT(vx, v_min, vl);
v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl);
v_min_index = VIDV_MASK_UINT(mask, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl);

//update v_min and start_index j
v_min = VFMINVV_FLOAT(v_min, vx, vl);
@@ -131,16 +131,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
}

FLOAT_V_T_M1 v_res, v_max;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_max = VFMVVF_FLOAT_M1(FLT_MAX, vlmax);
FLOAT_V_T_M1 v_res;
v_res = VFMVVF_FLOAT_M1(FLT_MAX, vlmax);

v_res = VFREDMINVS_FLOAT(v_res, v_min, v_max, vlmax);
v_res = VFREDMINVS_FLOAT(v_min, v_res, vlmax);
minf = VFMVFS_FLOAT_M1(v_res);
mask = VMFLEVF_FLOAT(v_min, minf, vlmax);
min_index = VFIRSTM(mask, vlmax);

v_min_index = VSLIDEDOWN_UINT(v_min_index, v_min_index, min_index, vlmax);
v_min_index = VSLIDEDOWN_UINT(v_min_index, min_index, vlmax);
min_index = VMVVXS_UINT(v_min_index);

return(min_index+1);


+ 63
- 64
kernel/riscv64/izamax_rvv.c View File

@@ -28,63 +28,63 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if defined(DOUBLE)
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m4
#define VLSEV_FLOAT vlse64_v_f64m4
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDMAXVS_FLOAT vfredmax_vs_f64m4_f64m1
#define MASK_T vbool16_t
#define VMFLTVF_FLOAT vmflt_vf_f64m4_b16
#define VMFLTVV_FLOAT vmflt_vv_f64m4_b16
#define VMFGEVF_FLOAT vmfge_vf_f64m4_b16
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFABSV_FLOAT vfabs_v_f64m4
#define VFMAXVV_FLOAT vfmax_vv_f64m4
#define VFADDVV_FLOAT vfadd_vv_f64m4
#define VFIRSTM vfirst_m_b16
#define UINT_V_T vuint64m4_t
#define VIDV_MASK_UINT vid_v_u64m4_m
#define VIDV_UINT vid_v_u64m4
#define VADDVX_MASK_UINT vadd_vx_u64m4_m
#define VADDVX_UINT vadd_vx_u64m4
#define VMVVX_UINT vmv_v_x_u64m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT vslidedown_vx_u64m4
#define VMVVXS_UINT vmv_x_s_u64m4_u64
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m4
#define VLSEV_FLOAT __riscv_vlse64_v_f64m4
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m4_f64m1
#define MASK_T vbool16_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m4_b16
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m4_b16
#define VMFGEVF_FLOAT __riscv_vmfge_vf_f64m4_b16
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFABSV_FLOAT __riscv_vfabs_v_f64m4
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4
#define VFIRSTM __riscv_vfirst_m_b16
#define UINT_V_T vuint64m4_t
#define VIDV_MASK_UINT __riscv_vid_v_u64m4_m
#define VIDV_UINT __riscv_vid_v_u64m4
#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m4_m
#define VADDVX_UINT __riscv_vadd_vx_u64m4
#define VMVVX_UINT __riscv_vmv_v_x_u64m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m4
#define VMVVXS_UINT __riscv_vmv_x_s_u64m4_u64
#else
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m4
#define VLSEV_FLOAT vlse32_v_f32m4
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDMAXVS_FLOAT vfredmax_vs_f32m4_f32m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT vmflt_vf_f32m4_b8
#define VMFLTVV_FLOAT vmflt_vv_f32m4_b8
#define VMFGEVF_FLOAT vmfge_vf_f32m4_b8
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFABSV_FLOAT vfabs_v_f32m4
#define VFMAXVV_FLOAT vfmax_vv_f32m4
#define VFADDVV_FLOAT vfadd_vv_f32m4
#define VFIRSTM vfirst_m_b8
#define UINT_V_T vuint32m4_t
#define VIDV_MASK_UINT vid_v_u32m4_m
#define VIDV_UINT vid_v_u32m4
#define VADDVX_MASK_UINT vadd_vx_u32m4_m
#define VADDVX_UINT vadd_vx_u32m4
#define VMVVX_UINT vmv_v_x_u32m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT vslidedown_vx_u32m4
#define VMVVXS_UINT vmv_x_s_u32m4_u32
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m4
#define VLSEV_FLOAT __riscv_vlse32_v_f32m4
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m4_f32m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m4_b8
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m4_b8
#define VMFGEVF_FLOAT __riscv_vmfge_vf_f32m4_b8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFABSV_FLOAT __riscv_vfabs_v_f32m4
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4
#define VFIRSTM __riscv_vfirst_m_b8
#define UINT_V_T vuint32m4_t
#define VIDV_MASK_UINT __riscv_vid_v_u32m4_m
#define VIDV_UINT __riscv_vid_v_u32m4
#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m4_m
#define VADDVX_UINT __riscv_vadd_vx_u32m4
#define VMVVX_UINT __riscv_vmv_v_x_u32m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m4
#define VMVVXS_UINT __riscv_vmv_x_s_u32m4_u32
#endif

BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -116,8 +116,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

//index where element greater than v_max
mask = VMFLTVV_FLOAT(v_max, vx0, vl);
v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl);
v_max_index = VIDV_MASK_UINT(mask, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl);

//update v_max and start_index j
v_max = VFMAXVV_FLOAT(v_max, vx0, vl);
@@ -138,24 +138,23 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
//index where element greater than v_max
mask = VMFLTVV_FLOAT(v_max, vx0, vl);
v_max_index = VIDV_MASK_UINT(mask, v_max_index, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, v_max_index, j, vl);
v_max_index = VIDV_MASK_UINT(mask, vl);
v_max_index = VADDVX_MASK_UINT(mask, v_max_index, j, vl);
//update v_max and start_index j
v_max = VFMAXVV_FLOAT(v_max, vx0, vl);
}

}
FLOAT_V_T_M1 v_res, v_z0;
FLOAT_V_T_M1 v_res;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_z0 = VFMVVF_FLOAT_M1(0, vlmax);

v_res = VFREDMAXVS_FLOAT(v_res, v_max, v_z0, vlmax);
v_res = VFREDMAXVS_FLOAT(v_max, v_res, vlmax);
maxf = VFMVFS_FLOAT_M1(v_res);
mask = VMFGEVF_FLOAT(v_max, maxf, vlmax);
max_index = VFIRSTM(mask, vlmax);
v_max_index = VSLIDEDOWN_UINT(v_max_index, v_max_index, max_index, vlmax);
v_max_index = VSLIDEDOWN_UINT(v_max_index, max_index, vlmax);
max_index = VMVVXS_UINT(v_max_index);

return(max_index+1);


+ 60
- 61
kernel/riscv64/izamin_rvv.c View File

@@ -29,59 +29,59 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if defined(DOUBLE)
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDMINVS_FLOAT vfredmin_vs_f64m4_f64m1
#define MASK_T vbool16_t
#define VMFLTVF_FLOAT vmflt_vf_f64m4_b16
#define VMFLTVV_FLOAT vmflt_vv_f64m4_b16
#define VMFLEVF_FLOAT vmfle_vf_f64m4_b16
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFABSV_FLOAT vfabs_v_f64m4
#define VFMINVV_FLOAT vfmin_vv_f64m4
#define VFADDVV_FLOAT vfadd_vv_f64m4
#define VFIRSTM vfirst_m_b16
#define UINT_V_T vuint64m4_t
#define VIDV_MASK_UINT vid_v_u64m4_m
#define VIDV_UINT vid_v_u64m4
#define VADDVX_MASK_UINT vadd_vx_u64m4_m
#define VADDVX_UINT vadd_vx_u64m4
#define VMVVX_UINT vmv_v_x_u64m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT vslidedown_vx_u64m4
#define VMVVXS_UINT vmv_x_s_u64m4_u64
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m4_f64m1
#define MASK_T vbool16_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f64m4_b16
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f64m4_b16
#define VMFLEVF_FLOAT __riscv_vmfle_vf_f64m4_b16
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFABSV_FLOAT __riscv_vfabs_v_f64m4
#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4
#define VFIRSTM __riscv_vfirst_m_b16
#define UINT_V_T vuint64m4_t
#define VIDV_MASK_UINT __riscv_vid_v_u64m4_m
#define VIDV_UINT __riscv_vid_v_u64m4
#define VADDVX_MASK_UINT __riscv_vadd_vx_u64m4_m
#define VADDVX_UINT __riscv_vadd_vx_u64m4
#define VMVVX_UINT __riscv_vmv_v_x_u64m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u64m4
#define VMVVXS_UINT __riscv_vmv_x_s_u64m4_u64
#else
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDMINVS_FLOAT vfredmin_vs_f32m4_f32m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT vmflt_vf_f32m4_b8
#define VMFLTVV_FLOAT vmflt_vv_f32m4_b8
#define VMFLEVF_FLOAT vmfle_vf_f32m4_b8
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFABSV_FLOAT vfabs_v_f32m4
#define VFMINVV_FLOAT vfmin_vv_f32m4
#define VFADDVV_FLOAT vfadd_vv_f32m4
#define VFIRSTM vfirst_m_b8
#define UINT_V_T vuint32m4_t
#define VIDV_MASK_UINT vid_v_u32m4_m
#define VIDV_UINT vid_v_u32m4
#define VADDVX_MASK_UINT vadd_vx_u32m4_m
#define VADDVX_UINT vadd_vx_u32m4
#define VMVVX_UINT vmv_v_x_u32m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT vslidedown_vx_u32m4
#define VMVVXS_UINT vmv_x_s_u32m4_u32
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m4_f32m1
#define MASK_T vbool8_t
#define VMFLTVF_FLOAT __riscv_vmflt_vf_f32m4_b8
#define VMFLTVV_FLOAT __riscv_vmflt_vv_f32m4_b8
#define VMFLEVF_FLOAT __riscv_vmfle_vf_f32m4_b8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFABSV_FLOAT __riscv_vfabs_v_f32m4
#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4
#define VFIRSTM __riscv_vfirst_m_b8
#define UINT_V_T vuint32m4_t
#define VIDV_MASK_UINT __riscv_vid_v_u32m4_m
#define VIDV_UINT __riscv_vid_v_u32m4
#define VADDVX_MASK_UINT __riscv_vadd_vx_u32m4_m
#define VADDVX_UINT __riscv_vadd_vx_u32m4
#define VMVVX_UINT __riscv_vmv_v_x_u32m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VSLIDEDOWN_UINT __riscv_vslidedown_vx_u32m4
#define VMVVXS_UINT __riscv_vmv_x_s_u32m4_u32
#endif

BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -113,8 +113,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

// index where element less than v_min
mask = VMFLTVV_FLOAT(vx0, v_min, vl);
v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl);
v_min_index = VIDV_MASK_UINT(mask, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl);

//update v_min and start_index j
v_min = VFMINVV_FLOAT(v_min, vx0, vl);
@@ -136,8 +136,8 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

// index where element less than v_min
mask = VMFLTVV_FLOAT(vx0, v_min, vl);
v_min_index = VIDV_MASK_UINT(mask, v_min_index, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, v_min_index, j, vl);
v_min_index = VIDV_MASK_UINT(mask, vl);
v_min_index = VADDVX_MASK_UINT(mask, v_min_index, j, vl);

//update v_min and start_index j
v_min = VFMINVV_FLOAT(v_min, vx0, vl);
@@ -145,16 +145,15 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

FLOAT_V_T_M1 v_res, v_max;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_max = VFMVVF_FLOAT_M1(FLT_MAX, vlmax);
FLOAT_V_T_M1 v_res;
v_res = VFMVVF_FLOAT_M1(FLT_MAX, vlmax);

v_res = VFREDMINVS_FLOAT(v_res, v_min, v_max, vlmax);
v_res = VFREDMINVS_FLOAT(v_min, v_res, vlmax);
minf = VFMVFS_FLOAT_M1(v_res);
mask = VMFLEVF_FLOAT(v_min, minf, vlmax);
min_index = VFIRSTM(mask, vlmax);

v_min_index = VSLIDEDOWN_UINT(v_min_index, v_min_index, min_index, vlmax);
v_min_index = VSLIDEDOWN_UINT(v_min_index, min_index, vlmax);
min_index = VMVVXS_UINT(v_min_index);

return(min_index+1);


+ 25
- 25
kernel/riscv64/max_rvv.c View File

@@ -29,31 +29,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f32m8_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMAXVV_FLOAT vfmax_vv_f32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m8_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMAXVS_FLOAT vfredmax_vs_f64m8_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMAXVV_FLOAT vfmax_vv_f64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m8_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -91,7 +91,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDMAXVS_FLOAT(v_res, vmax, v_res, vlmax);
v_res = VFREDMAXVS_FLOAT(vmax, v_res, vlmax);
maxf = VFMVFS_FLOAT_M1(v_res);

return(maxf);


+ 25
- 25
kernel/riscv64/min_rvv.c View File

@@ -29,31 +29,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDMINVS_FLOAT vfredmin_vs_f32m8_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMINVV_FLOAT vfmin_vv_f32m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m8_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDMINVS_FLOAT vfredmin_vs_f64m8_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMINVV_FLOAT vfmin_vv_f64m8
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m8_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m8
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -91,7 +91,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDMINVS_FLOAT(v_res, vmin, v_res, vlmax);
v_res = VFREDMINVS_FLOAT(vmin, v_res, vlmax);
minf = VFMVFS_FLOAT_M1(v_res);

return(minf);


+ 23
- 23
kernel/riscv64/nrm2_rvv.c View File

@@ -29,30 +29,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <math.h>

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1
#define VFMACCVV_FLOAT vfmacc_vv_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define ABS fabsf
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1
#define VFMACCVV_FLOAT vfmacc_vv_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define ABS fabs
#endif

@@ -95,7 +95,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
}
}

v_res = VFREDSUM_FLOAT(v_res, vr, v_res, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_res, vlmax);

ssq = VFMVFS_FLOAT_M1(v_res);



+ 18
- 18
kernel/riscv64/rot_rvv.c View File

@@ -28,25 +28,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMACCVF_FLOAT vfmacc_vf_f32m8
#define VFMULVF_FLOAT vfmul_vf_f32m8
#define VFMSACVF_FLOAT vfmsac_vf_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMACCVF_FLOAT vfmacc_vf_f64m8
#define VFMULVF_FLOAT vfmul_vf_f64m8
#define VFMSACVF_FLOAT vfmsac_vf_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m8
#endif

int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s)


+ 16
- 16
kernel/riscv64/scal_rvv.c View File

@@ -28,23 +28,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMULVF_FLOAT vfmul_vf_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMULVF_FLOAT vfmul_vf_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#endif

int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2)


+ 25
- 25
kernel/riscv64/sum_rvv.c View File

@@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFADDVV_FLOAT vfadd_vv_f32m8
#define VFREDSUMVS_FLOAT vfredusum_vs_f32m8_f32m1
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m8
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m8_f32m1
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFADDVV_FLOAT vfadd_vv_f64m8
#define VFREDSUMVS_FLOAT vfredusum_vs_f64m8_f64m1
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m8
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m8_f64m1
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -89,7 +89,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDSUMVS_FLOAT(v_res, vsum, v_res, vlmax);
v_res = VFREDSUMVS_FLOAT(vsum, v_res, vlmax);
sumf = VFMVFS_FLOAT_M1(v_res);
return(sumf);
}

+ 12
- 16
kernel/riscv64/swap_rvv.c View File

@@ -28,23 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#endif

int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2)


+ 25
- 25
kernel/riscv64/symm_lcopy_rvv_v1.c View File

@@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT vid_v_i32m2
#define VADD_VX_INT vadd_vx_i32m2
#define VMSGT_VX_INT vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT __riscv_vid_v_i32m2
#define VADD_VX_INT __riscv_vadd_vx_i32m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT vid_v_i64m2
#define VADD_VX_INT vadd_vx_i64m2
#define VMSGT_VX_INT vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT __riscv_vid_v_i64m2
#define VADD_VX_INT __riscv_vadd_vx_i64m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2
#endif

// Optimizes the implementation in ../generic/symm_lcopy_4.c
@@ -87,7 +87,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
vindex = VADD_VX_INT(vindex_max, offset, vl);
vbool = VMSGT_VX_INT(vindex, 0, vl);

vb = VMERGE_VVM_FLOAT(vbool, va2, va1, vl);
vb = VMERGE_VVM_FLOAT(va2, va1, vbool, vl);
VSEV_FLOAT(b, vb, vl);

b += vl;


+ 25
- 25
kernel/riscv64/symm_ucopy_rvv_v1.c View File

@@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT vid_v_i32m2
#define VADD_VX_INT vadd_vx_i32m2
#define VMSGT_VX_INT vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT __riscv_vid_v_i32m2
#define VADD_VX_INT __riscv_vadd_vx_i32m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT vid_v_i64m2
#define VADD_VX_INT vadd_vx_i64m2
#define VMSGT_VX_INT vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT __riscv_vid_v_i64m2
#define VADD_VX_INT __riscv_vadd_vx_i64m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2
#endif

// Optimizes the implementation in ../generic/symm_ucopy_4.c
@@ -87,7 +87,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
vindex = VADD_VX_INT(vindex_max, offset, vl);
vbool = VMSGT_VX_INT(vindex, 0, vl);

vb = VMERGE_VVM_FLOAT(vbool, va2, va1, vl);
vb = VMERGE_VVM_FLOAT(va2, va1, vbool, vl);
VSEV_FLOAT(b, vb, vl);

b += vl;


+ 40
- 41
kernel/riscv64/symv_L_rvv.c View File

@@ -28,43 +28,43 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T_M1 vfloat32m1_t
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMACCVV_FLOAT vfmacc_vv_f32m8
#define VFMACCVF_FLOAT vfmacc_vf_f32m8
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m8
#define VFMULVF_FLOAT vfmul_vf_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMSACVF_FLOAT vfmsac_vf_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T_M1 vfloat32m1_t
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T_M1 vfloat64m1_t
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMACCVV_FLOAT vfmacc_vv_f64m8
#define VFMACCVF_FLOAT vfmacc_vf_f64m8
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m8
#define VFMULVF_FLOAT vfmul_vf_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMSACVF_FLOAT vfmsac_vf_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T_M1 vfloat64m1_t
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
@@ -77,7 +77,6 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA

FLOAT_V_T_M1 v_res, v_z0;
size_t vlmax = VSETVL_MAX_M1, vl;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_z0 = VFMVVF_FLOAT_M1(0, vlmax);
vlmax = VSETVL_MAX;

@@ -105,7 +104,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA
vr = VFMACCVV_FLOAT(vr, vx, va, vl);

}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);

y[j] += alpha * VFMVFS_FLOAT_M1(v_res);
a_ptr += lda;
@@ -137,7 +136,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA

iy += inc_yv;
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);

y[jy] += alpha * VFMVFS_FLOAT_M1(v_res);
jy += inc_y;
@@ -172,7 +171,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA
ix += inc_xv;
}

v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);

y[j] += alpha * VFMVFS_FLOAT_M1(v_res);
jx += inc_x;
@@ -211,7 +210,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA
ix += inc_xv;
iy += inc_yv;
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);

y[jy] += alpha * VFMVFS_FLOAT_M1(v_res);
jx += inc_x;


+ 40
- 41
kernel/riscv64/symv_U_rvv.c View File

@@ -29,43 +29,43 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T_M1 vfloat32m1_t
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT vle32_v_f32m8
#define VSEV_FLOAT vse32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VSSEV_FLOAT vsse32_v_f32m8
#define VFMACCVV_FLOAT vfmacc_vv_f32m8
#define VFMACCVF_FLOAT vfmacc_vf_f32m8
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m8
#define VFMULVF_FLOAT vfmul_vf_f32m8
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMSACVF_FLOAT vfmsac_vf_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFREDSUM_FLOAT vfredusum_vs_f32m8_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T_M1 vfloat32m1_t
#define FLOAT_V_T vfloat32m8_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VSEV_FLOAT __riscv_vse32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VSSEV_FLOAT __riscv_vsse32_v_f32m8
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m8
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m8_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T_M1 vfloat64m1_t
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT vle64_v_f64m8
#define VSEV_FLOAT vse64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VSSEV_FLOAT vsse64_v_f64m8
#define VFMACCVV_FLOAT vfmacc_vv_f64m8
#define VFMACCVF_FLOAT vfmacc_vf_f64m8
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m8
#define VFMULVF_FLOAT vfmul_vf_f64m8
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMSACVF_FLOAT vfmsac_vf_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFREDSUM_FLOAT vfredusum_vs_f64m8_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T_M1 vfloat64m1_t
#define FLOAT_V_T vfloat64m8_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VSEV_FLOAT __riscv_vse64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VSSEV_FLOAT __riscv_vsse64_v_f64m8
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m8
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m8
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m8
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m8
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m8_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
@@ -77,7 +77,6 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA
FLOAT *a_ptr = a;
FLOAT_V_T_M1 v_res, v_z0;
size_t vl_max = VSETVL_MAX_M1, vl;
v_res = VFMVVF_FLOAT_M1(0, vl_max);
v_z0 = VFMVVF_FLOAT_M1(0, vl_max);
vl_max = VSETVL_MAX;

@@ -105,7 +104,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA
vx = VLEV_FLOAT(&x[i], vl);
vr = VFMACCVV_FLOAT(vr, vx, va, vl);
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max);
v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max);

y[j] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res);
a_ptr += lda;
@@ -137,7 +136,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA

iy += inc_yv;
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max);
v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max);

y[jy] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res);
a_ptr += lda;
@@ -171,7 +170,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA

ix += inc_xv;
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max);
v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max);

y[j] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res);
a_ptr += lda;
@@ -209,7 +208,7 @@ int CNAME(BLASLONG m, BLASLONG offset, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOA
ix += inc_xv;
iy += inc_yv;
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vl_max);
v_res = VFREDSUM_FLOAT(vr, v_z0, vl_max);

y[jy] += temp1 * a_ptr[j] + alpha * VFMVFS_FLOAT_M1(v_res);
a_ptr += lda;


+ 24
- 24
kernel/riscv64/trmm_lncopy_rvv_v1.c View File

@@ -30,29 +30,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

// Optimizes the implementation in ../arm64/tmmm_lncopy_sve_v1.c
@@ -116,10 +116,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
{
va1 = VLSEV_FLOAT(ao, stride_lda, vl);
vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl);
vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl);
#endif
VSEV_FLOAT(b, vb, vl);
ao++;


+ 22
- 22
kernel/riscv64/trmm_ltcopy_rvv_v1.c View File

@@ -30,27 +30,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

// Optimizes the implementation in ../arm64/tmmm_ltcopy_sve_v1.c
@@ -111,10 +111,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
{
va1 = VLEV_FLOAT(ao, vl);
vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl);
vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl);
#endif
VSEV_FLOAT(b, vb, vl);
ao += lda;


+ 24
- 24
kernel/riscv64/trmm_uncopy_rvv_v1.c View File

@@ -30,29 +30,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

// Optimizes the implementation in ../arm64/tmmm_uncopy_sve_v1.c
@@ -114,10 +114,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
{
va1 = VLSEV_FLOAT(ao, stride_lda, vl);
vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl);
vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl);
#endif
VSEV_FLOAT(b, vb, vl);
ao++;


+ 22
- 22
kernel/riscv64/trmm_utcopy_rvv_v1.c View File

@@ -32,27 +32,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

// Optimizes the implementation in ../arm64/tmmm_utcopy_sve_v1.c
@@ -113,10 +113,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
{
va1 = VLEV_FLOAT(ao, vl);
vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
vb = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
vb = VFMERGE_VFM_FLOAT(vbool_eq, vb, ONE, vl);
vb = VFMERGE_VFM_FLOAT(vb, ONE, vbool_eq, vl);
#endif
VSEV_FLOAT(b, vb, vl);
ao += lda;


+ 14
- 14
kernel/riscv64/trmmkernel_rvv_v1x8.c View File

@@ -28,21 +28,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VFMVVF_FLOAT vfmv_v_f_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFMULVF_FLOAT vfmul_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFMULVF_FLOAT vfmul_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2
#endif




+ 27
- 27
kernel/riscv64/trsm_kernel_LN_rvv_v1.c View File

@@ -28,34 +28,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLSEV_FLOAT vlse32_v_f32m2
#define VSSEV_FLOAT vsse32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSSEG2_FLOAT vssseg2e32_v_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2
#define VFMULVF_FLOAT vfmul_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VSSEV_FLOAT __riscv_vsse32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSSEG2_FLOAT __riscv_vssseg2e32_v_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLSEV_FLOAT vlse64_v_f64m2
#define VSSEV_FLOAT vsse64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSSEG2_FLOAT vssseg2e64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2
#define VFMULVF_FLOAT vfmul_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VSSEV_FLOAT __riscv_vsse64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSSEG2_FLOAT __riscv_vssseg2e64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2
#endif




+ 27
- 27
kernel/riscv64/trsm_kernel_LT_rvv_v1.c View File

@@ -28,34 +28,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLSEV_FLOAT vlse32_v_f32m2
#define VSSEV_FLOAT vsse32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSSEG2_FLOAT vssseg2e32_v_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2
#define VFMULVF_FLOAT vfmul_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VSSEV_FLOAT __riscv_vsse32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSSEG2_FLOAT __riscv_vssseg2e32_v_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLSEV_FLOAT vlse64_v_f64m2
#define VSSEV_FLOAT vsse64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSSEG2_FLOAT vssseg2e64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2
#define VFMULVF_FLOAT vfmul_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VSSEV_FLOAT __riscv_vsse64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSSEG2_FLOAT __riscv_vssseg2e64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2
#endif




+ 27
- 27
kernel/riscv64/trsm_kernel_RN_rvv_v1.c View File

@@ -28,34 +28,34 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSSEV_FLOAT vsse32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSSEG2_FLOAT vssseg2e32_v_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2
#define VFMULVF_FLOAT vfmul_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSSEV_FLOAT __riscv_vsse32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSSEG2_FLOAT __riscv_vssseg2e32_v_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSSEV_FLOAT vsse64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSSEG2_FLOAT vssseg2e64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2
#define VFMULVF_FLOAT vfmul_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSSEV_FLOAT __riscv_vsse64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSSEG2_FLOAT __riscv_vssseg2e64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2
#endif

static FLOAT dm1 = -1.;


+ 21
- 21
kernel/riscv64/trsm_kernel_RT_rvv_v1.c View File

@@ -28,28 +28,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2
#define VFMULVF_FLOAT vfmul_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2
#define VFMULVF_FLOAT vfmul_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2
#endif




+ 20
- 20
kernel/riscv64/trsm_lncopy_rvv_v1.c View File

@@ -29,27 +29,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VSEV_FLOAT_M vse32_v_f32m2_m
#define VLSEV_FLOAT vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VSEV_FLOAT_M vse64_v_f64m2_m
#define VLSEV_FLOAT vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32

#endif



+ 20
- 20
kernel/riscv64/trsm_ltcopy_rvv_v1.c View File

@@ -29,27 +29,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VSEV_FLOAT_M vse32_v_f32m2_m
#define VLSEV_FLOAT vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VSEV_FLOAT_M vse64_v_f64m2_m
#define VLSEV_FLOAT vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#endif

#ifndef UNIT


+ 20
- 20
kernel/riscv64/trsm_uncopy_rvv_v1.c View File

@@ -30,27 +30,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VSEV_FLOAT_M vse32_v_f32m2_m
#define VLSEV_FLOAT vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VSEV_FLOAT_M vse64_v_f64m2_m
#define VLSEV_FLOAT vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#endif




+ 20
- 20
kernel/riscv64/trsm_utcopy_rvv_v1.c View File

@@ -29,27 +29,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VSEV_FLOAT_M vse32_v_f32m2_m
#define VLSEV_FLOAT vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VSEV_FLOAT_M __riscv_vse32_v_f32m2_m
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VSEV_FLOAT_M vse64_v_f64m2_m
#define VLSEV_FLOAT vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VSEV_FLOAT_M __riscv_vse64_v_f64m2_m
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32
#endif




+ 29
- 29
kernel/riscv64/zamax_rvv.c View File

@@ -29,35 +29,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDMAXVS_FLOAT vfredmax_vs_f32m4_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMAXVV_FLOAT vfmax_vv_f32m4
#define VFADDVV_FLOAT vfadd_vv_f32m4
#define VFABSV_FLOAT vfabs_v_f32m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m4_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f32m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4
#define VFABSV_FLOAT __riscv_vfabs_v_f32m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDMAXVS_FLOAT vfredmax_vs_f64m4_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMAXVV_FLOAT vfmax_vv_f64m4
#define VFADDVV_FLOAT vfadd_vv_f64m4
#define VFABSV_FLOAT vfabs_v_f64m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m4_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMAXVV_FLOAT __riscv_vfmax_vv_f64m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4
#define VFABSV_FLOAT __riscv_vfabs_v_f64m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -106,7 +106,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDMAXVS_FLOAT(v_res, vmax, v_res, vlmax);
v_res = VFREDMAXVS_FLOAT(vmax, v_res, vlmax);
maxf = VFMVFS_FLOAT_M1(v_res);

return(maxf);


+ 29
- 29
kernel/riscv64/zamin_rvv.c View File

@@ -29,35 +29,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <float.h>

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDMINVS_FLOAT vfredmin_vs_f32m4_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMINVV_FLOAT vfmin_vv_f32m4
#define VFADDVV_FLOAT vfadd_vv_f32m4
#define VFABSV_FLOAT vfabs_v_f32m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f32m4_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f32m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4
#define VFABSV_FLOAT __riscv_vfabs_v_f32m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDMINVS_FLOAT vfredmin_vs_f64m4_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMINVV_FLOAT vfmin_vv_f64m4
#define VFADDVV_FLOAT vfadd_vv_f64m4
#define VFABSV_FLOAT vfabs_v_f64m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDMINVS_FLOAT __riscv_vfredmin_vs_f64m4_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMINVV_FLOAT __riscv_vfmin_vv_f64m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4
#define VFABSV_FLOAT __riscv_vfabs_v_f64m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -105,7 +105,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

v_res = VFREDMINVS_FLOAT(v_res, vmin, v_res, vlmax);
v_res = VFREDMINVS_FLOAT(vmin, v_res, vlmax);
minf = VFMVFS_FLOAT_M1(v_res);

return(minf);


+ 25
- 26
kernel/riscv64/zasum_rvv.c View File

@@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m8(n)
#define VSETVL_MAX vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m8
#define VLSEV_FLOAT vlse32_v_f32m8
#define VFREDSUMVS_FLOAT vfredusum_vs_f32m8_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VFADDVV_FLOAT vfadd_vv_f32m8
#define VFABSV_FLOAT vfabs_v_f32m8
#define VSETVL(n) __riscv_vsetvl_e32m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m8()
#define FLOAT_V_T vfloat32m8_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m8
#define VLSEV_FLOAT __riscv_vlse32_v_f32m8
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m8_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m8
#define VFABSV_FLOAT __riscv_vfabs_v_f32m8
#else
#define VSETVL(n) vsetvl_e64m8(n)
#define VSETVL_MAX vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m8
#define VLSEV_FLOAT vlse64_v_f64m8
#define VFREDSUMVS_FLOAT vfredusum_vs_f64m8_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VFADDVV_FLOAT vfadd_vv_f64m8
#define VFABSV_FLOAT vfabs_v_f64m8
#define VSETVL(n) __riscv_vsetvl_e64m8(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m8()
#define FLOAT_V_T vfloat64m8_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m8
#define VLSEV_FLOAT __riscv_vlse64_v_f64m8
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m8_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m8
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m8
#define VFABSV_FLOAT __riscv_vfabs_v_f64m8
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -99,9 +99,8 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

FLOAT_V_T_M1 v_z0 = VFMVVF_FLOAT_M1(0, vlmax);
FLOAT_V_T_M1 v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_res = VFREDSUMVS_FLOAT(v_res, v_sum, v_z0, vlmax);
v_res = VFREDSUMVS_FLOAT(v_sum, v_res, vlmax);
asumf += VFMVFS_FLOAT_M1(v_res);

return(asumf);


+ 26
- 26
kernel/riscv64/zaxpby_rvv.c View File

@@ -33,33 +33,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEV_FLOAT vlse32_v_f32m4
#define VSSEV_FLOAT vsse32_v_f32m4
#define VFMACCVF_FLOAT vfmacc_vf_f32m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMULVF_FLOAT vfmul_vf_f32m4
#define VFMSACVF_FLOAT vfmsac_vf_f32m4
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VSSEG_FLOAT vsseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT vssseg2e32_v_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEV_FLOAT __riscv_vlse32_v_f32m4
#define VSSEV_FLOAT __riscv_vsse32_v_f32m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f32m4
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEV_FLOAT vlse64_v_f64m4
#define VSSEV_FLOAT vsse64_v_f64m4
#define VFMACCVF_FLOAT vfmacc_vf_f64m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMULVF_FLOAT vfmul_vf_f64m4
#define VFMSACVF_FLOAT vfmsac_vf_f64m4
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VSSEG_FLOAT vsseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT vssseg2e64_v_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEV_FLOAT __riscv_vlse64_v_f64m4
#define VSSEV_FLOAT __riscv_vsse64_v_f64m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4
#define VFMSACVF_FLOAT __riscv_vfmsac_vf_f64m4
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4
#endif

int CNAME(BLASLONG n, FLOAT alpha_r, FLOAT alpha_i, FLOAT *x, BLASLONG inc_x, FLOAT beta_r, FLOAT beta_i,FLOAT *y, BLASLONG inc_y)


+ 16
- 16
kernel/riscv64/zaxpy_rvv.c View File

@@ -28,23 +28,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VSSEG_FLOAT vsseg2e32_v_f32m4
#define VSSSEG_FLOAT vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT vfmacc_vf_f32m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4
#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VSSEG_FLOAT vsseg2e64_v_f64m4
#define VSSSEG_FLOAT vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT vfmacc_vf_f64m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4
#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4
#endif

int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2)


+ 22
- 22
kernel/riscv64/zcopy_rvv.c View File

@@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL_M8(n) vsetvl_e32m8(n)
#define FLOAT_V_T_M8 vfloat32m8_t
#define VLEV_FLOAT_M8 vle32_v_f32m8
#define VSEV_FLOAT_M8 vse32_v_f32m8
#define VSETVL_M4(n) vsetvl_e32m4(n)
#define FLOAT_V_T_M4 vfloat32m4_t
#define VLSEG_FLOAT_M4 vlseg2e32_v_f32m4
#define VSSEG_FLOAT_M4 vsseg2e32_v_f32m4
#define VLSSEG_FLOAT_M4 vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT_M4 vssseg2e32_v_f32m4
#define VSETVL_M8(n) __riscv_vsetvl_e32m8(n)
#define FLOAT_V_T_M8 vfloat32m8_t
#define VLEV_FLOAT_M8 __riscv_vle32_v_f32m8
#define VSEV_FLOAT_M8 __riscv_vse32_v_f32m8
#define VSETVL_M4(n) __riscv_vsetvl_e32m4(n)
#define FLOAT_V_T_M4 vfloat32m4_t
#define VLSEG_FLOAT_M4 __riscv_vlseg2e32_v_f32m4
#define VSSEG_FLOAT_M4 __riscv_vsseg2e32_v_f32m4
#define VLSSEG_FLOAT_M4 __riscv_vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT_M4 __riscv_vssseg2e32_v_f32m4
#else
#define VSETVL_M8(n) vsetvl_e64m8(n)
#define FLOAT_V_T_M8 vfloat64m8_t
#define VLEV_FLOAT_M8 vle64_v_f64m8
#define VSEV_FLOAT_M8 vse64_v_f64m8
#define VSETVL_M4(n) vsetvl_e64m4(n)
#define FLOAT_V_T_M4 vfloat64m4_t
#define VLSEG_FLOAT_M4 vlseg2e64_v_f64m4
#define VSSEG_FLOAT_M4 vsseg2e64_v_f64m4
#define VLSSEG_FLOAT_M4 vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT_M4 vssseg2e64_v_f64m4
#define VSETVL_M8(n) __riscv_vsetvl_e64m8(n)
#define FLOAT_V_T_M8 vfloat64m8_t
#define VLEV_FLOAT_M8 __riscv_vle64_v_f64m8
#define VSEV_FLOAT_M8 __riscv_vse64_v_f64m8
#define VSETVL_M4(n) __riscv_vsetvl_e64m4(n)
#define FLOAT_V_T_M4 vfloat64m4_t
#define VLSEG_FLOAT_M4 __riscv_vlseg2e64_v_f64m4
#define VSSEG_FLOAT_M4 __riscv_vsseg2e64_v_f64m4
#define VLSSEG_FLOAT_M4 __riscv_vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT_M4 __riscv_vssseg2e64_v_f64m4
#endif

int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y)


+ 32
- 33
kernel/riscv64/zdot_rvv.c View File

@@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDSUM_FLOAT vfredusum_vs_f32m4_f32m1
#define VFMACCVV_FLOAT vfmacc_vv_f32m4
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMULVV_FLOAT vfmul_vv_f32m4
#define VFMSACVV_FLOAT vfmsac_vv_f32m4
#define VFNMSACVV_FLOAT vfnmsac_vv_f32m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4
#define VFMSACVV_FLOAT __riscv_vfmsac_vv_f32m4
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDSUM_FLOAT vfredusum_vs_f64m4_f64m1
#define VFMACCVV_FLOAT vfmacc_vv_f64m4
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMULVV_FLOAT vfmul_vv_f64m4
#define VFMSACVV_FLOAT vfmsac_vv_f64m4
#define VFNMSACVV_FLOAT vfnmsac_vv_f64m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4
#define VFMSACVV_FLOAT __riscv_vfmsac_vv_f64m4
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y)
@@ -72,7 +72,6 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA
FLOAT_V_T vr0, vr1, vx0, vx1, vy0, vy1;
FLOAT_V_T_M1 v_res, v_z0;
size_t vlmax_m1 = VSETVL_MAX_M1;
v_res = VFMVVF_FLOAT_M1(0, vlmax_m1);
v_z0 = VFMVVF_FLOAT_M1(0, vlmax_m1);

size_t vlmax = VSETVL_MAX;
@@ -161,9 +160,9 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA
}
}

v_res = VFREDSUM_FLOAT(v_res, vr0, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr0, v_z0, vlmax);
CREAL(result) = VFMVFS_FLOAT_M1(v_res);
v_res = VFREDSUM_FLOAT(v_res, vr1, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr1, v_z0, vlmax);
CIMAG(result) = VFMVFS_FLOAT_M1(v_res);
return(result);


+ 16
- 16
kernel/riscv64/zgemm_beta_rvv.c View File

@@ -39,23 +39,23 @@
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VSSEG_FLOAT vsseg2e32_v_f32m4
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMULVF_FLOAT vfmul_vf_f32m4
#define VFADDVV_FLOAT vfadd_vv_f32m4
#define VFSUBVV_FLOAT vfsub_vv_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4
#define VFSUBVV_FLOAT __riscv_vfsub_vv_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VSSEG_FLOAT vsseg2e64_v_f64m4
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMULVF_FLOAT vfmul_vf_f64m4
#define VFADDVV_FLOAT vfadd_vv_f64m4
#define VFSUBVV_FLOAT vfsub_vv_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4
#define VFSUBVV_FLOAT __riscv_vfsub_vv_f64m4
#endif

int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1,


+ 12
- 12
kernel/riscv64/zgemm_ncopy_4_rvv.c View File

@@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLSEG2_FLOAT vlseg2e32_v_f32m1
#define VSSEG2_FLOAT vsseg2e32_v_f32m1
#define VSSEG4_FLOAT vsseg4e32_v_f32m1
#define VSSEG8_FLOAT vsseg8e32_v_f32m1
#define VSETVL(n) __riscv_vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m1
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1
#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1
#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1
#else
#define VSETVL(n) vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLSEG2_FLOAT vlseg2e64_v_f64m1
#define VSSEG2_FLOAT vsseg2e64_v_f64m1
#define VSSEG4_FLOAT vsseg4e64_v_f64m1
#define VSSEG8_FLOAT vsseg8e64_v_f64m1
#define VSETVL(n) __riscv_vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m1
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1
#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1
#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1
#endif

// Optimizes the implementation in ../generic/zgemm_ncopy_4.c


+ 8
- 8
kernel/riscv64/zgemm_ncopy_rvv_v1.c View File

@@ -29,15 +29,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b){


+ 20
- 20
kernel/riscv64/zgemm_tcopy_4_rvv.c View File

@@ -28,27 +28,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m1
#define VSEV_FLOAT vse32_v_f32m1
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m1
#define VLSSEG4_FLOAT vlsseg4e32_v_f32m1
#define VLSSEG8_FLOAT vlsseg8e32_v_f32m1
#define VSSEG2_FLOAT vsseg2e32_v_f32m1
#define VSSEG4_FLOAT vsseg4e32_v_f32m1
#define VSSEG8_FLOAT vsseg8e32_v_f32m1
#define VSETVL(n) __riscv_vsetvl_e32m1(n)
#define FLOAT_V_T vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m1
#define VSEV_FLOAT __riscv_vse32_v_f32m1
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m1
#define VLSSEG4_FLOAT __riscv_vlsseg4e32_v_f32m1
#define VLSSEG8_FLOAT __riscv_vlsseg8e32_v_f32m1
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m1
#define VSSEG4_FLOAT __riscv_vsseg4e32_v_f32m1
#define VSSEG8_FLOAT __riscv_vsseg8e32_v_f32m1
#else
#define VSETVL(n) vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m1
#define VSEV_FLOAT vse64_v_f64m1
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m1
#define VLSSEG4_FLOAT vlsseg4e64_v_f64m1
#define VLSSEG8_FLOAT vlsseg8e64_v_f64m1
#define VSSEG2_FLOAT vsseg2e64_v_f64m1
#define VSSEG4_FLOAT vsseg4e64_v_f64m1
#define VSSEG8_FLOAT vsseg8e64_v_f64m1
#define VSETVL(n) __riscv_vsetvl_e64m1(n)
#define FLOAT_V_T vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m1
#define VSEV_FLOAT __riscv_vse64_v_f64m1
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m1
#define VLSSEG4_FLOAT __riscv_vlsseg4e64_v_f64m1
#define VLSSEG8_FLOAT __riscv_vlsseg8e64_v_f64m1
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m1
#define VSSEG4_FLOAT __riscv_vsseg4e64_v_f64m1
#define VSSEG8_FLOAT __riscv_vsseg8e64_v_f64m1
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, FLOAT *b){


+ 8
- 8
kernel/riscv64/zgemm_tcopy_rvv_v1.c View File

@@ -28,15 +28,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b)


+ 18
- 18
kernel/riscv64/zgemmkernel_rvv_v1x4.c View File

@@ -28,25 +28,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VFMVVF_FLOAT vfmv_v_f_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2
#endif

#if defined(NN) || defined(NT) || defined(TN) || defined(TT)


+ 24
- 24
kernel/riscv64/zgemv_n_rvv.c View File

@@ -28,31 +28,31 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLEV_FLOAT vle32_v_f32m4
#define VLSEV_FLOAT vlse32_v_f32m4
#define VSEV_FLOAT vse32_v_f32m4
#define VSSEV_FLOAT vsse32_v_f32m4
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VSSEG_FLOAT vsseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT vfmacc_vf_f32m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLEV_FLOAT __riscv_vle32_v_f32m4
#define VLSEV_FLOAT __riscv_vlse32_v_f32m4
#define VSEV_FLOAT __riscv_vse32_v_f32m4
#define VSSEV_FLOAT __riscv_vsse32_v_f32m4
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLEV_FLOAT vle64_v_f64m4
#define VLSEV_FLOAT vlse64_v_f64m4
#define VSEV_FLOAT vse64_v_f64m4
#define VSSEV_FLOAT vsse64_v_f64m4
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VSSEG_FLOAT vsseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT vfmacc_vf_f64m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLEV_FLOAT __riscv_vle64_v_f64m4
#define VLSEV_FLOAT __riscv_vlse64_v_f64m4
#define VSEV_FLOAT __riscv_vse64_v_f64m4
#define VSSEV_FLOAT __riscv_vsse64_v_f64m4
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4
#endif

int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)


+ 30
- 31
kernel/riscv64/zgemv_t_rvv.c View File

@@ -28,33 +28,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDSUM_FLOAT vfredusum_vs_f32m4_f32m1
#define VFMACCVV_FLOAT vfmacc_vv_f32m4
#define VFNMSACVV_FLOAT vfnmsac_vv_f32m4
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMULVV_FLOAT vfmul_vv_f32m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMULVV_FLOAT __riscv_vfmul_vv_f32m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDSUM_FLOAT vfredusum_vs_f64m4_f64m1
#define VFMACCVV_FLOAT vfmacc_vv_f64m4
#define VFNMSACVV_FLOAT vfnmsac_vv_f64m4
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMULVV_FLOAT vfmul_vv_f64m4
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMULVV_FLOAT __riscv_vfmul_vv_f64m4
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
@@ -73,7 +73,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i,
BLASLONG lda2 = lda * 2;

size_t vlmax = VSETVL_MAX_M1;
v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_z0 = VFMVVF_FLOAT_M1(0, vlmax);
vlmax = VSETVL(m);

@@ -105,9 +104,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i,
ix += vl * inc_x * 2;
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);
temp_r = VFMVFS_FLOAT_M1(v_res);
v_res = VFREDSUM_FLOAT(v_res, vi, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vi, v_z0, vlmax);
temp_i = VFMVFS_FLOAT_M1(v_res);

#if !defined(XCONJ)
@@ -149,9 +148,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha_r, FLOAT alpha_i,
ix += vl * inc_x * 2;
}
v_res = VFREDSUM_FLOAT(v_res, vr, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_z0, vlmax);
temp_r = VFMVFS_FLOAT_M1(v_res);
v_res = VFREDSUM_FLOAT(v_res, vi, v_z0, vlmax);
v_res = VFREDSUM_FLOAT(vi, v_z0, vlmax);
temp_i = VFMVFS_FLOAT_M1(v_res);
#if !defined(XCONJ)


+ 42
- 42
kernel/riscv64/zhemm_ltcopy_rvv_v1.c View File

@@ -28,45 +28,45 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT vid_v_i32m2
#define VADD_VX_INT vadd_vx_i32m2
#define VFRSUB_VF_FLOAT vfrsub_vf_f32m2
#define VMSGT_VX_INT vmsgt_vx_i32m2_b16
#define VMSLT_VX_INT vmslt_vx_i32m2_b16
#define VMSEQ_VX_INT vmseq_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2
#define VFMVVF_FLOAT vfmv_v_f_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT __riscv_vid_v_i32m2
#define VADD_VX_INT __riscv_vadd_vx_i32m2
#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f32m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16
#define VMSLT_VX_INT __riscv_vmslt_vx_i32m2_b16
#define VMSEQ_VX_INT __riscv_vmseq_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT vid_v_i64m2
#define VADD_VX_INT vadd_vx_i64m2
#define VFRSUB_VF_FLOAT vfrsub_vf_f64m2
#define VMSGT_VX_INT vmsgt_vx_i64m2_b32
#define VMSLT_VX_INT vmslt_vx_i64m2_b32
#define VMSEQ_VX_INT vmseq_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT __riscv_vid_v_i64m2
#define VADD_VX_INT __riscv_vadd_vx_i64m2
#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f64m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32
#define VMSLT_VX_INT __riscv_vmslt_vx_i64m2_b32
#define VMSEQ_VX_INT __riscv_vmseq_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#endif


@@ -104,13 +104,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
vbool_lt0 = VMSLT_VX_INT(vindex, 0, vl);
vbool_eq0 = VMSEQ_VX_INT(vindex, 0, vl);

vb0 = VMERGE_VVM_FLOAT(vbool_gt0, va20, va10, vl);
vb1 = VMERGE_VVM_FLOAT(vbool_gt0, va21, va11, vl);
vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool_gt0, vl);
vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool_gt0, vl);

vb2 = VFRSUB_VF_FLOAT(vb1, ZERO, vl);

vb1 = VMERGE_VVM_FLOAT(vbool_lt0, vb1, vb2, vl);
vb1 = VMERGE_VVM_FLOAT(vbool_eq0, vb1, vzero, vl);
vb1 = VMERGE_VVM_FLOAT(vb1, vb2, vbool_lt0, vl);
vb1 = VMERGE_VVM_FLOAT(vb1, vzero, vbool_eq0, vl);
VSSEG2_FLOAT(b, vb0, vb1, vl);

b += vl * 2;


+ 42
- 42
kernel/riscv64/zhemm_utcopy_rvv_v1.c View File

@@ -28,45 +28,45 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT vid_v_i32m2
#define VADD_VX_INT vadd_vx_i32m2
#define VFRSUB_VF_FLOAT vfrsub_vf_f32m2
#define VMSGT_VX_INT vmsgt_vx_i32m2_b16
#define VMSLT_VX_INT vmslt_vx_i32m2_b16
#define VMSEQ_VX_INT vmseq_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2
#define VFMVVF_FLOAT vfmv_v_f_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT __riscv_vid_v_i32m2
#define VADD_VX_INT __riscv_vadd_vx_i32m2
#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f32m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16
#define VMSLT_VX_INT __riscv_vmslt_vx_i32m2_b16
#define VMSEQ_VX_INT __riscv_vmseq_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT vid_v_i64m2
#define VADD_VX_INT vadd_vx_i64m2
#define VFRSUB_VF_FLOAT vfrsub_vf_f64m2
#define VMSGT_VX_INT vmsgt_vx_i64m2_b32
#define VMSLT_VX_INT vmslt_vx_i64m2_b32
#define VMSEQ_VX_INT vmseq_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT __riscv_vid_v_i64m2
#define VADD_VX_INT __riscv_vadd_vx_i64m2
#define VFRSUB_VF_FLOAT __riscv_vfrsub_vf_f64m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32
#define VMSLT_VX_INT __riscv_vmslt_vx_i64m2_b32
#define VMSEQ_VX_INT __riscv_vmseq_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#endif


@@ -101,13 +101,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
vbool_gt0 = VMSGT_VX_INT(vindex, 0, vl);
vbool_eq0 = VMSEQ_VX_INT(vindex, 0, vl);

vb0 = VMERGE_VVM_FLOAT(vbool_gt0, va20, va10, vl);
vb1 = VMERGE_VVM_FLOAT(vbool_gt0, va21, va11, vl);
vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool_gt0, vl);
vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool_gt0, vl);

vb2 = VFRSUB_VF_FLOAT(vb1, ZERO, vl);

vb1 = VMERGE_VVM_FLOAT(vbool_gt0, vb1, vb2, vl);
vb1 = VMERGE_VVM_FLOAT(vbool_eq0, vb1, vzero, vl);
vb1 = VMERGE_VVM_FLOAT(vb1, vb2, vbool_gt0, vl);
vb1 = VMERGE_VVM_FLOAT(vb1, vzero, vbool_eq0, vl);
VSSEG2_FLOAT(b, vb0, vb1, vl);

b += vl * 2;


+ 33
- 33
kernel/riscv64/znrm2_rvv.c View File

@@ -28,35 +28,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDSUM_FLOAT vfredusum_vs_f32m4_f32m1
#define VFMACCVV_FLOAT vfmacc_vv_f32m4
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFREDMAXVS_FLOAT vfredmax_vs_f32m4_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VFABSV_FLOAT vfabs_v_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f32m4_f32m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f32m4_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VFABSV_FLOAT __riscv_vfabs_v_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDSUM_FLOAT vfredusum_vs_f64m4_f64m1
#define VFMACCVV_FLOAT vfmacc_vv_f64m4
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFREDMAXVS_FLOAT vfredmax_vs_f64m4_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VFABSV_FLOAT vfabs_v_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDSUM_FLOAT __riscv_vfredusum_vs_f64m4_f64m1
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFREDMAXVS_FLOAT __riscv_vfredmax_vs_f64m4_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VFABSV_FLOAT __riscv_vfabs_v_f64m4
#endif

// TODO: Should single precision use the widening MAC, or perhaps all should be double?
@@ -85,10 +85,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
v0 = VFABSV_FLOAT(v0, vl);
v1 = VFABSV_FLOAT(v1, vl);

v_max = VFREDMAXVS_FLOAT(v_max, v0, v_max, vl);
v_max = VFREDMAXVS_FLOAT(v0, v_max, vl);
vr = VFMACCVV_FLOAT(vr, v0, v0, vl);

v_max = VFREDMAXVS_FLOAT(v_max, v1, v_max, vl);
v_max = VFREDMAXVS_FLOAT(v1, v_max, vl);
vr = VFMACCVV_FLOAT(vr, v1, v1, vl);
}

@@ -103,16 +103,16 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
v0 = VFABSV_FLOAT(v0, vl);
v1 = VFABSV_FLOAT(v1, vl);

v_max = VFREDMAXVS_FLOAT(v_max, v0, v_max, vl);
v_max = VFREDMAXVS_FLOAT(v0, v_max, vl);
vr = VFMACCVV_FLOAT(vr, v0, v0, vl);

v_max = VFREDMAXVS_FLOAT(v_max, v1, v_max, vl);
v_max = VFREDMAXVS_FLOAT(v1, v_max, vl);
vr = VFMACCVV_FLOAT(vr, v1, v1, vl);
}

}

v_res = VFREDSUM_FLOAT(v_res, vr, v_res, vlmax);
v_res = VFREDSUM_FLOAT(vr, v_res, vlmax);

ssq = VFMVFS_FLOAT_M1(v_res);
scale = VFMVFS_FLOAT_M1(v_max);


+ 26
- 26
kernel/riscv64/zrot_rvv.c View File

@@ -28,33 +28,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLEV_FLOAT vle32_v_f32m4
#define VLSEV_FLOAT vlse32_v_f32m4
#define VSEV_FLOAT vse32_v_f32m4
#define VSSEV_FLOAT vsse32_v_f32m4
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VSSEG_FLOAT vsseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT vfmacc_vf_f32m4
#define VFMULVF_FLOAT vfmul_vf_f32m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLEV_FLOAT __riscv_vle32_v_f32m4
#define VLSEV_FLOAT __riscv_vlse32_v_f32m4
#define VSEV_FLOAT __riscv_vse32_v_f32m4
#define VSSEV_FLOAT __riscv_vsse32_v_f32m4
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLEV_FLOAT vle64_v_f64m4
#define VLSEV_FLOAT vlse64_v_f64m4
#define VSEV_FLOAT vse64_v_f64m4
#define VSSEV_FLOAT vsse64_v_f64m4
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VSSEG_FLOAT vsseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT vfmacc_vf_f64m4
#define VFMULVF_FLOAT vfmul_vf_f64m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLEV_FLOAT __riscv_vle64_v_f64m4
#define VLSEV_FLOAT __riscv_vlse64_v_f64m4
#define VSEV_FLOAT __riscv_vse64_v_f64m4
#define VSSEV_FLOAT __riscv_vsse64_v_f64m4
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4
#endif

int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s)


+ 22
- 22
kernel/riscv64/zscal_rvv.c View File

@@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VSSEG_FLOAT vsseg2e32_v_f32m4
#define VSSSEG_FLOAT vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT vfmacc_vf_f32m4
#define VFMULVF_FLOAT vfmul_vf_f32m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m4
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4
#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VSSEG_FLOAT vsseg2e64_v_f64m4
#define VSSSEG_FLOAT vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT vfmacc_vf_f64m4
#define VFMULVF_FLOAT vfmul_vf_f64m4
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m4
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4
#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m4
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m4
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m4
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#endif

int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r,FLOAT da_i, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2)


+ 23
- 24
kernel/riscv64/zsum_rvv.c View File

@@ -28,29 +28,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define VSETVL_MAX vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VFREDSUMVS_FLOAT vfredusum_vs_f32m4_f32m1
#define VFMVVF_FLOAT vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VFADDVV_FLOAT vfadd_vv_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m4()
#define FLOAT_V_T vfloat32m4_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m4_f32m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#define VFADDVV_FLOAT __riscv_vfadd_vv_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define VSETVL_MAX vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VFREDSUMVS_FLOAT vfredusum_vs_f64m4_f64m1
#define VFMVVF_FLOAT vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VFADDVV_FLOAT vfadd_vv_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m4()
#define FLOAT_V_T vfloat64m4_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m4_f64m1
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m4
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#define VFADDVV_FLOAT __riscv_vfadd_vv_f64m4
#endif

FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
@@ -88,9 +88,8 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)

}

FLOAT_V_T_M1 v_z0 = VFMVVF_FLOAT_M1(0, vlmax);
FLOAT_V_T_M1 v_res = VFMVVF_FLOAT_M1(0, vlmax);
v_res = VFREDSUMVS_FLOAT(v_res, v_sum, v_z0, vlmax);
v_res = VFREDSUMVS_FLOAT(v_sum, v_res, vlmax);
sumf += VFMVFS_FLOAT_M1(v_res);

return(sumf);


+ 12
- 12
kernel/riscv64/zswap_rvv.c View File

@@ -28,19 +28,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT vlseg2e32_v_f32m4
#define VLSSEG_FLOAT vlsseg2e32_v_f32m4
#define VSSEG_FLOAT vsseg2e32_v_f32m4
#define VSSSEG_FLOAT vssseg2e32_v_f32m4
#define VSETVL(n) __riscv_vsetvl_e32m4(n)
#define FLOAT_V_T vfloat32m4_t
#define VLSEG_FLOAT __riscv_vlseg2e32_v_f32m4
#define VLSSEG_FLOAT __riscv_vlsseg2e32_v_f32m4
#define VSSEG_FLOAT __riscv_vsseg2e32_v_f32m4
#define VSSSEG_FLOAT __riscv_vssseg2e32_v_f32m4
#else
#define VSETVL(n) vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT vlseg2e64_v_f64m4
#define VLSSEG_FLOAT vlsseg2e64_v_f64m4
#define VSSEG_FLOAT vsseg2e64_v_f64m4
#define VSSSEG_FLOAT vssseg2e64_v_f64m4
#define VSETVL(n) __riscv_vsetvl_e64m4(n)
#define FLOAT_V_T vfloat64m4_t
#define VLSEG_FLOAT __riscv_vlseg2e64_v_f64m4
#define VLSSEG_FLOAT __riscv_vlsseg2e64_v_f64m4
#define VSSEG_FLOAT __riscv_vsseg2e64_v_f64m4
#define VSSSEG_FLOAT __riscv_vssseg2e64_v_f64m4
#endif

int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT dummy3, FLOAT dummy4, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2)


+ 32
- 32
kernel/riscv64/zsymm_lcopy_rvv_v1.c View File

@@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT vid_v_i32m2
#define VADD_VX_INT vadd_vx_i32m2
#define VMSGT_VX_INT vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT __riscv_vid_v_i32m2
#define VADD_VX_INT __riscv_vadd_vx_i32m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT vid_v_i64m2
#define VADD_VX_INT vadd_vx_i64m2
#define VMSGT_VX_INT vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT __riscv_vid_v_i64m2
#define VADD_VX_INT __riscv_vadd_vx_i64m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b)
@@ -91,8 +91,8 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
vindex = VADD_VX_INT(vindex_max, offset, vl);
vbool = VMSGT_VX_INT(vindex, 0, vl);

vb0 = VMERGE_VVM_FLOAT(vbool, va20, va10, vl);
vb1 = VMERGE_VVM_FLOAT(vbool, va21, va11, vl);
vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool, vl);
vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool, vl);
VSSEG2_FLOAT(b, vb0, vb1, vl);

b += vl * 2;


+ 32
- 32
kernel/riscv64/zsymm_ucopy_rvv_v1.c View File

@@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT vid_v_i32m2
#define VADD_VX_INT vadd_vx_i32m2
#define VMSGT_VX_INT vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define INT_V_T vint32m2_t
#define VID_V_INT __riscv_vid_v_i32m2
#define VADD_VX_INT __riscv_vadd_vx_i32m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i32m2_b16
#define VBOOL_T vbool16_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT vid_v_i64m2
#define VADD_VX_INT vadd_vx_i64m2
#define VMSGT_VX_INT vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT vmerge_vvm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define INT_V_T vint64m2_t
#define VID_V_INT __riscv_vid_v_i64m2
#define VADD_VX_INT __riscv_vadd_vx_i64m2
#define VMSGT_VX_INT __riscv_vmsgt_vx_i64m2_b32
#define VBOOL_T vbool32_t
#define VMERGE_VVM_FLOAT __riscv_vmerge_vvm_f64m2
#endif


@@ -92,8 +92,8 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
vindex = VADD_VX_INT(vindex_max, offset, vl);
vbool = VMSGT_VX_INT(vindex, 0, vl);

vb0 = VMERGE_VVM_FLOAT(vbool, va20, va10, vl);
vb1 = VMERGE_VVM_FLOAT(vbool, va21, va11, vl);
vb0 = VMERGE_VVM_FLOAT(va20, va10, vbool, vl);
vb1 = VMERGE_VVM_FLOAT(va21, va11, vbool, vl);
VSSEG2_FLOAT(b, vb0, vb1, vl);

b += vl * 2;


+ 32
- 32
kernel/riscv64/ztrmm_lncopy_rvv_v1.c View File

@@ -30,35 +30,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vint32m2_t
#define VID_V_UINT vid_v_i32m2
#define VMSGTU_VX_UINT vmsgt_vx_i32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_i32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vint32m2_t
#define VID_V_UINT __riscv_vid_v_i32m2
#define VMSGTU_VX_UINT __riscv_vmsgt_vx_i32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_i32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){
@@ -121,12 +121,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
{
VLSSEG2_FLOAT(&va0, &va1, ao, stride_lda, vl);
vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl);
#endif
VSSEG2_FLOAT(b, va0, va1, vl);
ao += 2;


+ 30
- 31
kernel/riscv64/ztrmm_ltcopy_rvv_v1.c View File

@@ -30,33 +30,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){
@@ -117,14 +117,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
//va1 = VLEV_FLOAT(ao, vl);
VLSEG2_FLOAT(&va0, &va1, ao, vl);
vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl);
#endif
//VSEV_FLOAT(b, vb, vl);
VSSEG2_FLOAT(b, va0, va1, vl);
ao += lda * 2;
b += vl * 2;


+ 32
- 32
kernel/riscv64/ztrmm_uncopy_rvv_v1.c View File

@@ -30,35 +30,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VLSEV_FLOAT vlse32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VLSEV_FLOAT __riscv_vlse32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VLSEV_FLOAT vlse64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VLSEV_FLOAT __riscv_vlse64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){
@@ -120,12 +120,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
{
VLSSEG2_FLOAT(&va0, &va1, ao, stride_lda, vl);
vbool_cmp = VMSLTU_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl);
#endif
VSSEG2_FLOAT(b, va0, va1, vl);
ao += 2;


+ 30
- 30
kernel/riscv64/ztrmm_utcopy_rvv_v1.c View File

@@ -32,33 +32,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16
#define VMSEQ_VX_UINT vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u32m2_b16
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT vfmerge_vfm_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#define VMSEQ_VX_UINT __riscv_vmseq_vx_u64m2_b32
#define VFMERGE_VFM_FLOAT __riscv_vfmerge_vfm_f64m2
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLONG posY, FLOAT *b){
@@ -117,12 +117,12 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
{
VLSEG2_FLOAT(&va0, &va1, ao, vl);
vbool_cmp = VMSGTU_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_cmp, va0, ZERO, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_cmp, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ZERO, vbool_cmp, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_cmp, vl);
#ifdef UNIT
vbool_eq = VMSEQ_VX_UINT(vindex, j, vl);
va0 = VFMERGE_VFM_FLOAT(vbool_eq, va0, ONE, vl);
va1 = VFMERGE_VFM_FLOAT(vbool_eq, va1, ZERO, vl);
va0 = VFMERGE_VFM_FLOAT(va0, ONE, vbool_eq, vl);
va1 = VFMERGE_VFM_FLOAT(va1, ZERO, vbool_eq, vl);
#endif
VSSEG2_FLOAT(b, va0, va1, vl);
ao += lda * 2;


+ 30
- 30
kernel/riscv64/ztrmmkernel_2x2_rvv.c View File

@@ -28,37 +28,37 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define VSETVL_MAX vsetvlmax_e32m2()
#define VSETVL_MAX_M1 vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m2_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT vle32_v_f32m2
#define VLSEG4_FLOAT vlseg4e32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VFMVVF_FLOAT vfmv_v_f_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFMACCVV_FLOAT vfmacc_vv_f32m2
#define VFNMSACVV_FLOAT vfnmsac_vv_f32m2
#define VFREDSUMVS_FLOAT vfredusum_vs_f32m2_f32m1
#define VFMVVF_FLOAT_M1 vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f32m1_f32
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e32m2()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e32m1()
#define FLOAT_V_T vfloat32m2_t
#define FLOAT_V_T_M1 vfloat32m1_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VLSEG4_FLOAT __riscv_vlseg4e32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f32m2
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f32m2
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f32m2_f32m1
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f32m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f32m1_f32
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define VSETVL_MAX vsetvlmax_e64m2()
#define VSETVL_MAX_M1 vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m2_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT vle64_v_f64m2
#define VLSEG4_FLOAT vlseg4e64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFMACCVV_FLOAT vfmacc_vv_f64m2
#define VFNMSACVV_FLOAT vfnmsac_vv_f64m2
#define VFREDSUMVS_FLOAT vfredusum_vs_f64m2_f64m1
#define VFMVVF_FLOAT_M1 vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 vfmv_f_s_f64m1_f64
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define VSETVL_MAX __riscv_vsetvlmax_e64m2()
#define VSETVL_MAX_M1 __riscv_vsetvlmax_e64m1()
#define FLOAT_V_T vfloat64m2_t
#define FLOAT_V_T_M1 vfloat64m1_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VLSEG4_FLOAT __riscv_vlseg4e64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFMACCVV_FLOAT __riscv_vfmacc_vv_f64m2
#define VFNMSACVV_FLOAT __riscv_vfnmsac_vv_f64m2
#define VFREDSUMVS_FLOAT __riscv_vfredusum_vs_f64m2_f64m1
#define VFMVVF_FLOAT_M1 __riscv_vfmv_v_f_f64m1
#define VFMVFS_FLOAT_M1 __riscv_vfmv_f_s_f64m1_f64
#endif

// Optimizes the implementation in ../generic/ztrmmkernel_2x2.c


+ 20
- 20
kernel/riscv64/ztrmmkernel_rvv_v1x4.c View File

@@ -28,27 +28,27 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT vle32_v_f32m2
#define VSEV_FLOAT vse32_v_f32m2
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VFMVVF_FLOAT vfmv_v_f_f32m2
#define VFMACCVF_FLOAT vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f32m2
#define VFMULVF_FLOAT vfmul_vf_f32m2
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLEV_FLOAT __riscv_vle32_v_f32m2
#define VSEV_FLOAT __riscv_vse32_v_f32m2
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f32m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f32m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f32m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f32m2
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT vle64_v_f64m2
#define VSEV_FLOAT vse64_v_f64m2
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VFMVVF_FLOAT vfmv_v_f_f64m2
#define VFMACCVF_FLOAT vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT vfnmsac_vf_f64m2
#define VFMULVF_FLOAT vfmul_vf_f64m2
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLEV_FLOAT __riscv_vle64_v_f64m2
#define VSEV_FLOAT __riscv_vse64_v_f64m2
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VFMVVF_FLOAT __riscv_vfmv_v_f_f64m2
#define VFMACCVF_FLOAT __riscv_vfmacc_vf_f64m2
#define VFNMSACVF_FLOAT __riscv_vfnmsac_vf_f64m2
#define VFMULVF_FLOAT __riscv_vfmul_vf_f64m2
#endif

#if defined(NN) || defined(NT) || defined(TN) || defined(TT)


+ 18
- 18
kernel/riscv64/ztrsm_lncopy_rvv_v1.c View File

@@ -29,25 +29,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32

#endif



+ 18
- 18
kernel/riscv64/ztrsm_ltcopy_rvv_v1.c View File

@@ -29,25 +29,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#endif

int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT *b){


+ 18
- 18
kernel/riscv64/ztrsm_uncopy_rvv_v1.c View File

@@ -30,25 +30,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSSEG2_FLOAT vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSGTU_VX_UINT vmsgtu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSSEG2_FLOAT __riscv_vlsseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSSEG2_FLOAT vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSGTU_VX_UINT vmsgtu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSSEG2_FLOAT __riscv_vlsseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSGTU_VX_UINT __riscv_vmsgtu_vx_u64m2_b32
#endif




+ 18
- 18
kernel/riscv64/ztrsm_utcopy_rvv_v1.c View File

@@ -29,25 +29,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "common.h"

#if !defined(DOUBLE)
#define VSETVL(n) vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSEG2_FLOAT vlseg2e32_v_f32m2
#define VSSEG2_FLOAT vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT vid_v_u32m2
#define VMSLTU_VX_UINT vmsltu_vx_u32m2_b16
#define VSETVL(n) __riscv_vsetvl_e32m2(n)
#define FLOAT_V_T vfloat32m2_t
#define VLSEG2_FLOAT __riscv_vlseg2e32_v_f32m2
#define VSSEG2_FLOAT __riscv_vsseg2e32_v_f32m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e32_v_f32m2_m
#define VBOOL_T vbool16_t
#define UINT_V_T vuint32m2_t
#define VID_V_UINT __riscv_vid_v_u32m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u32m2_b16
#else
#define VSETVL(n) vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSEG2_FLOAT vlseg2e64_v_f64m2
#define VSSEG2_FLOAT vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT vid_v_u64m2
#define VMSLTU_VX_UINT vmsltu_vx_u64m2_b32
#define VSETVL(n) __riscv_vsetvl_e64m2(n)
#define FLOAT_V_T vfloat64m2_t
#define VLSEG2_FLOAT __riscv_vlseg2e64_v_f64m2
#define VSSEG2_FLOAT __riscv_vsseg2e64_v_f64m2
#define VSSEG2_FLOAT_M __riscv_vsseg2e64_v_f64m2_m
#define VBOOL_T vbool32_t
#define UINT_V_T vuint64m2_t
#define VID_V_UINT __riscv_vid_v_u64m2
#define VMSLTU_VX_UINT __riscv_vmsltu_vx_u64m2_b32
#endif




Loading…
Cancel
Save