diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index d7e75bb97..f4a93aa1b 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -240,3 +240,5 @@ In chronological order: * Marek Michalowski * [2025-01-21] Add thread throttling profile for SGEMV on `NEOVERSEV1` +* Ye Tao + * [2025-02-03] Optimize SBGEMM kernel on NEOVERSEV1 diff --git a/Makefile.arm64 b/Makefile.arm64 index 2909a83e0..bea905f58 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -101,7 +101,7 @@ ifeq ($(CORE), NEOVERSEV1) ifeq (1, $(filter 1,$(GCCVERSIONGTEQ7) $(ISCLANG))) ifeq (1, $(filter 1,$(GCCVERSIONGTEQ10) $(ISCLANG))) ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ4) $(GCCVERSIONGTEQ11) $(ISCLANG))) -CCOMMON_OPT += -march=armv8.4-a+sve +CCOMMON_OPT += -march=armv8.4-a+sve+bf16 ifeq (1, $(ISCLANG)) CCOMMON_OPT += -mtune=cortex-x1 else @@ -111,7 +111,7 @@ ifneq ($(F_COMPILER), NAG) FCOMMON_OPT += -march=armv8.4-a -mtune=neoverse-v1 endif else -CCOMMON_OPT += -march=armv8.4-a+sve +CCOMMON_OPT += -march=armv8.4-a+sve+bf16 ifneq ($(CROSS), 1) CCOMMON_OPT += -mtune=native endif diff --git a/kernel/arm64/KERNEL.NEOVERSEV1 b/kernel/arm64/KERNEL.NEOVERSEV1 index 859466409..8845e6860 100644 --- a/kernel/arm64/KERNEL.NEOVERSEV1 +++ b/kernel/arm64/KERNEL.NEOVERSEV1 @@ -2,3 +2,17 @@ include $(KERNELDIR)/KERNEL.ARMV8SVE SGEMVTKERNEL = gemv_t_sve_v1x3.c DGEMVTKERNEL = gemv_t_sve_v1x3.c +ifeq ($(BUILD_BFLOAT16), 1) +SBGEMM_BETA = sbgemm_beta_neoversev1.c +SBGEMMKERNEL = sbgemm_kernel_$(SBGEMM_UNROLL_M)x$(SBGEMM_UNROLL_N)_neoversev1.c +ifneq ($(SBGEMM_UNROLL_M), $(SBGEMM_UNROLL_N)) +SBGEMMINCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_M)_neoversev1.c +SBGEMMITCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_M)_neoversev1.c +SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX) +SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +SBGEMMONCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_N)_neoversev1.c +SBGEMMOTCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_N)_neoversev1.c +SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX) +SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX) +endif \ No newline at end of file diff --git a/kernel/arm64/sbgemm_beta_neoversev1.c b/kernel/arm64/sbgemm_beta_neoversev1.c new file mode 100644 index 000000000..572d499d7 --- /dev/null +++ b/kernel/arm64/sbgemm_beta_neoversev1.c @@ -0,0 +1,83 @@ +/*************************************************************************** + * Copyright (c) 2024, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta, IFLOAT *dummy2, + BLASLONG dummy3, IFLOAT *dummy4, BLASLONG dummy5, FLOAT *c, + BLASLONG ldc) { + + BLASLONG i, j; + BLASLONG chunk, remain; + FLOAT *c_offset1, *c_offset; + c_offset = c; + chunk = m >> 3; + remain = m & 7; + if (beta == ZERO) { + for (j = n; j > 0; j--) { + c_offset1 = c_offset; + c_offset += ldc; + for (i = chunk; i > 0; i--) { + *(c_offset1 + 0) = ZERO; + *(c_offset1 + 1) = ZERO; + *(c_offset1 + 2) = ZERO; + *(c_offset1 + 3) = ZERO; + *(c_offset1 + 4) = ZERO; + *(c_offset1 + 5) = ZERO; + *(c_offset1 + 6) = ZERO; + *(c_offset1 + 7) = ZERO; + c_offset1 += 8; + } + for (i = remain; i > 0; i--) { + *c_offset1 = ZERO; + c_offset1++; + } + } + } else { + for (j = n; j > 0; j--) { + c_offset1 = c_offset; + c_offset += ldc; + for (i = chunk; i > 0; i--) { + *(c_offset1 + 0) *= beta; + *(c_offset1 + 1) *= beta; + *(c_offset1 + 2) *= beta; + *(c_offset1 + 3) *= beta; + *(c_offset1 + 4) *= beta; + *(c_offset1 + 5) *= beta; + *(c_offset1 + 6) *= beta; + *(c_offset1 + 7) *= beta; + c_offset1 += 8; + } + for (i = remain; i > 0; i--) { + *c_offset1 *= beta; + c_offset1++; + } + } + } + return 0; +}; diff --git a/kernel/arm64/sbgemm_kernel_4x4_neoversev1.c b/kernel/arm64/sbgemm_kernel_4x4_neoversev1.c new file mode 100644 index 000000000..889b5fc5b --- /dev/null +++ b/kernel/arm64/sbgemm_kernel_4x4_neoversev1.c @@ -0,0 +1,46 @@ +/*************************************************************************** + * Copyright (c) 2024-2025, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include + +#include "common.h" + +#define ALPHA_ONE +#include "sbgemm_kernel_4x4_neoversev1_impl.c" +#undef ALPHA_ONE +#include "sbgemm_kernel_4x4_neoversev1_impl.c" + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, + FLOAT *C, BLASLONG ldc) { + if (alpha == 1.0f) + return sbgemm_kernel_neoversev1_alpha_one(m, n, k, alpha, A, B, C, ldc); + else + return sbgemm_kernel_neoversev1_alpha(m, n, k, alpha, A, B, C, ldc); + return 0; +} + diff --git a/kernel/arm64/sbgemm_kernel_4x4_neoversev1_impl.c b/kernel/arm64/sbgemm_kernel_4x4_neoversev1_impl.c new file mode 100644 index 000000000..b6d9e9816 --- /dev/null +++ b/kernel/arm64/sbgemm_kernel_4x4_neoversev1_impl.c @@ -0,0 +1,414 @@ +/*************************************************************************** + * Copyright (c) 2024-2025, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include + +#include "common.h" + +#define INIT_C(M, N) mc##M##N = svdup_f32(0); + +#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N); + +#define INIT_C_4x4 \ + do { \ + INIT_C(0, 0); \ + INIT_C(0, 1); \ + INIT_C(1, 0); \ + INIT_C(1, 1); \ + } while (0); + +#ifdef ALPHA_ONE +#define UPDATE_C(PG, PTR, DST, SRC) \ + do { \ + DST = svld1_f32((PG), (PTR)); \ + DST = svadd_z((PG), SRC, DST); \ + svst1_f32((PG), (PTR), DST); \ + } while (0); +#else +#define UPDATE_C(PG, PTR, DST, SRC) \ + do { \ + DST = svld1_f32((PG), (PTR)); \ + DST = svmad_z((PG), svalpha, SRC, DST); \ + svst1_f32((PG), (PTR), DST); \ + } while (0); +#endif + +#define ZIP_EVEN_ELEMENTS(PG, mc0, mc1, tmp, vc) \ + do { \ + (tmp) = svuzp1_f32((mc0), (mc1)); \ + (vc) = svcompact_f32((PG), (tmp)); \ + } while (0) + +#define ZIP_ODD_ELEMENTS(PG, mc0, mc1, tmp, vc) \ + do { \ + (tmp) = svuzp2_f32((mc0), (mc1)); \ + (vc) = svcompact_f32((PG), (tmp)); \ + } while (0) + +#define ACCUMULATE_LAST4_TO_FIRST4(M, N, TMP) \ + do { \ + TMP = svext_f32(mc##M##N, mc##M##N, 4); \ + mc##M##N = svadd_f32_z(svptrue_b32(), mc##M##N, (TMP)); \ + } while (0) + +#ifdef ALPHA_ONE +int sbgemm_kernel_neoversev1_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, + FLOAT alpha, IFLOAT *A, IFLOAT *B, + FLOAT *C, BLASLONG ldc) +#else +int sbgemm_kernel_neoversev1_alpha(BLASLONG m, BLASLONG n, BLASLONG k, + FLOAT alpha, IFLOAT *A, IFLOAT *B, FLOAT *C, + BLASLONG ldc) +#endif +{ + + BLASLONG pad_k = (k + 7) & ~7; + svbfloat16_t ma0, ma1, mb0, mb1; + svfloat32_t mc00, mc01, mc10, mc11, vc0, vc1, vc2, vc3, oc0, oc1, oc2, oc3; + svfloat32_t tmp; + svfloat32_t svalpha = svdup_f32(alpha); + + svbool_t pg16_all = svptrue_b16(); + + svbool_t pg32_first_1 = svwhilelt_b32(0, 1); + svbool_t pg32_first_2 = svwhilelt_b32(0, 2); + svbool_t pg32_first_4 = svwhilelt_b32(0, 4); + + svbool_t pg32_select_first_2_per_quadword = svdupq_b32(1, 1, 0, 0); + + bfloat16_t *ptr_a = (bfloat16_t *)A; + bfloat16_t *ptr_b = (bfloat16_t *)B; + FLOAT *ptr_c = C; + + bfloat16_t *ptr_a0; + bfloat16_t *ptr_b0; + FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3; + + for (BLASLONG j = 0; j < n / 4; j++) { + ptr_c0 = ptr_c; + ptr_c1 = ptr_c0 + ldc; + ptr_c2 = ptr_c1 + ldc; + ptr_c3 = ptr_c2 + ldc; + ptr_c += 4 * ldc; + ptr_a = (bfloat16_t *)A; + + for (BLASLONG i = 0; i < m / 4; i++) { + ptr_a0 = ptr_a; + ptr_a += 4 * pad_k; + + ptr_b0 = ptr_b; + + INIT_C_4x4; + + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); + + mb0 = svld1_bf16(pg16_all, ptr_b0); + mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); + + MATMUL(0, 0); + MATMUL(0, 1); + MATMUL(1, 0); + MATMUL(1, 1); + + ptr_a0 += 32; + ptr_b0 += 32; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); + ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); + ACCUMULATE_LAST4_TO_FIRST4(1, 1, tmp); + + ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); + ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc1); + + ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc2); + ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc3); + + UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0); + UPDATE_C(pg32_first_4, ptr_c1, oc1, vc1); + UPDATE_C(pg32_first_4, ptr_c2, oc2, vc2) + UPDATE_C(pg32_first_4, ptr_c3, oc3, vc3) + + ptr_c0 += 4; + ptr_c1 += 4; + ptr_c2 += 4; + ptr_c3 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * pad_k; + + ptr_b0 = ptr_b; + INIT_C(0, 0); + INIT_C(0, 1); + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + mb0 = svld1_bf16(pg16_all, ptr_b0); + mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); + + MATMUL(0, 0); + MATMUL(0, 1); + + ptr_a0 += 16; + ptr_b0 += 32; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); + + vc0 = svuzp1(mc00, mc00); + vc1 = svuzp2(mc00, mc00); + vc2 = svuzp1(mc01, mc01); + vc3 = svuzp2(mc01, mc01); + + UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0); + UPDATE_C(pg32_first_2, ptr_c1, oc1, vc1); + UPDATE_C(pg32_first_2, ptr_c2, oc2, vc2); + UPDATE_C(pg32_first_2, ptr_c3, oc3, vc3); + + ptr_c0 += 2; + ptr_c1 += 2; + ptr_c2 += 2; + ptr_c3 += 2; + } + + if (m & 1) { + ptr_a0 = ptr_a; + ptr_b0 = ptr_b; + + INIT_C(0, 0); + INIT_C(0, 1); + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + mb0 = svld1_bf16(pg16_all, ptr_b0); + mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); + + MATMUL(0, 0); + MATMUL(0, 1); + + ptr_a0 += 16; + ptr_b0 += 32; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); + + // use compact is more straightforward + vc1 = svuzp2(mc00, mc00); + vc3 = svuzp2(mc01, mc01); + + UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00); + UPDATE_C(pg32_first_1, ptr_c1, oc1, vc1); + UPDATE_C(pg32_first_1, ptr_c2, oc2, mc01); + UPDATE_C(pg32_first_1, ptr_c3, oc3, vc3); + } + + ptr_b += 4 * pad_k; + } + + if (n & 2) { + ptr_c0 = ptr_c; + ptr_c1 = ptr_c0 + ldc; + ptr_c += 2 * ldc; + ptr_a = (bfloat16_t *)A; + + for (BLASLONG i = 0; i < m / 4; i++) { + ptr_a0 = ptr_a; + ptr_a += 4 * pad_k; + + ptr_b0 = ptr_b; + + INIT_C(0, 0); + INIT_C(1, 0); + + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); + + mb0 = svld1_bf16(pg16_all, ptr_b0); + + MATMUL(0, 0); + MATMUL(1, 0); + + ptr_a0 += 32; + ptr_b0 += 16; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); + + ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); + ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc2); + + UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0); + UPDATE_C(pg32_first_4, ptr_c1, oc2, vc2); + + ptr_c0 += 4; + ptr_c1 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * pad_k; + ptr_b0 = ptr_b; + + INIT_C(0, 0); + + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + mb0 = svld1_bf16(pg16_all, ptr_b0); + + MATMUL(0, 0); + + ptr_a0 += 16; + ptr_b0 += 16; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + vc0 = svuzp1(mc00, mc00); + vc1 = svuzp2(mc00, mc00); + + UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0); + UPDATE_C(pg32_first_2, ptr_c1, oc1, vc1); + + ptr_c0 += 2; + ptr_c1 += 2; + } + + if (m & 1) { + ptr_a0 = ptr_a; + ptr_b0 = ptr_b; + INIT_C(0, 0); + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + mb0 = svld1_bf16(pg16_all, ptr_b0); + MATMUL(0, 0); + ptr_a0 += 16; + ptr_b0 += 16; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + vc1 = svuzp2(mc00, mc00); + + UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00); + UPDATE_C(pg32_first_1, ptr_c1, oc1, vc1); + } + + ptr_b += 2 * pad_k; + } + + if (n & 1) { // TODO: this case seems a overhead. find out whether it's in our + // case. + ptr_c0 = ptr_c; + ptr_a = (bfloat16_t *)A; + + for (BLASLONG i = 0; i < m / 4; i++) { + ptr_a0 = ptr_a; + ptr_a += 4 * pad_k; + + ptr_b0 = ptr_b; + + INIT_C(0, 0); + INIT_C(1, 0); + + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); + + mb0 = svld1_bf16(pg16_all, ptr_b0); + + MATMUL(0, 0); + MATMUL(1, 0); + + ptr_a0 += 32; + ptr_b0 += 16; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); + + ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); + + UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0); + + ptr_c0 += 4; + } + + if (m & 2) { + ptr_a0 = ptr_a; + ptr_a += 2 * pad_k; + ptr_b0 = ptr_b; + + INIT_C(0, 0); + + for (BLASLONG p = 0; p < pad_k; p += 8) { + ma0 = svld1_bf16(pg16_all, ptr_a0); + mb0 = svld1_bf16(pg16_all, ptr_b0); + + MATMUL(0, 0); + + ptr_a0 += 16; + ptr_b0 += 16; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + + vc0 = svuzp1(mc00, mc00); + + UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0); + + ptr_c0 += 2; + } + + if (m & 1) { + ptr_a0 = ptr_a; + ptr_b0 = ptr_b; + + INIT_C(0, 0); + for (BLASLONG p = 0; p < pad_k; p += 8) { + + ma0 = svld1_bf16(pg16_all, ptr_a0); + mb0 = svld1_bf16(pg16_all, ptr_b0); + + MATMUL(0, 0); + ptr_a0 += 16; + ptr_b0 += 16; + } + + ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); + + UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00); + } + } + + return 0; +} diff --git a/kernel/arm64/sbgemm_ncopy_4_neoversev1.c b/kernel/arm64/sbgemm_ncopy_4_neoversev1.c new file mode 100644 index 000000000..100f5c68e --- /dev/null +++ b/kernel/arm64/sbgemm_ncopy_4_neoversev1.c @@ -0,0 +1,148 @@ +/*************************************************************************** + * Copyright (c) 2024-2025, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ + +#include + +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { + IFLOAT *a_offset; + IFLOAT *a_offsetx[4]; + IFLOAT *b_offset; + a_offset = a; + b_offset = b; + + bfloat16_t zero_value_bf16; + *((uint16_t *)(&zero_value_bf16)) = 0; + + svbool_t pg16_all = svptrue_b16(); // 16 elements for sve-256 machine. + svbool_t pg16_first_8 = svwhilelt_b16(0, 8); + + svbfloat16_t v0, v1, v2, v3; + svuint64_t t0, t1; + + BLASLONG rest = m & 7; + svbool_t pg16_rest = svwhilelt_b16_s32(0, rest); + + for (BLASLONG j = 0; j < n / 4; j++) { + a_offsetx[0] = a_offset; + a_offsetx[1] = a_offsetx[0] + lda; + a_offsetx[2] = a_offsetx[1] + lda; + a_offsetx[3] = a_offsetx[2] + lda; + a_offset += 4 * lda; + + for (BLASLONG i = 0; i < m / 8; i++) { + v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]); + v1 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[1]); + v2 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[2]); + v3 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[3]); + + t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); + t1 = svzip1_u64(svreinterpret_u64_bf16(v2), svreinterpret_u64_bf16(v3)); + + svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); + svst1_bf16(pg16_all, (bfloat16_t *)b_offset + 16, + svreinterpret_bf16_u64(t1)); + + a_offsetx[0] += 8; + a_offsetx[1] += 8; + a_offsetx[2] += 8; + a_offsetx[3] += 8; + + b_offset += 32; + } + + if (rest) { // remainder along k dim + v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]); + v1 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[1]); + v2 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[2]); + v3 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[3]); + + t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); + t1 = svzip1_u64(svreinterpret_u64_bf16(v2), svreinterpret_u64_bf16(v3)); + + svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); + svst1_bf16(pg16_all, (bfloat16_t *)b_offset + 16, + svreinterpret_bf16_u64(t1)); + + b_offset += 32; + } + } + + if (n & 2) { + a_offsetx[0] = a_offset; + a_offsetx[1] = a_offsetx[0] + lda; + a_offset += 2 * lda; + + for (BLASLONG i = 0; i < m / 8; i++) { + v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]); + v1 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[1]); + + t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); + svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); + + b_offset += 16; + a_offsetx[0] += 8; + a_offsetx[1] += 8; + } + + if (rest) { // remainder along k dim + v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]); + v1 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[1]); + + t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); + svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); + + b_offset += 16; + } + } + + if (n & 1) { + a_offsetx[0] = a_offset; + + for (BLASLONG i = 0; i < m / 8; i++) { + v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]); + v1 = svdup_bf16(zero_value_bf16); + + t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); + svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); + + b_offset += 16; + a_offsetx[0] += 8; + } + + if (rest) { // remainder along k dim + v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]); + v1 = svdup_bf16(zero_value_bf16); + t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); + svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); + } + } + + return 0; +} diff --git a/kernel/arm64/sbgemm_tcopy_4_neoversev1.c b/kernel/arm64/sbgemm_tcopy_4_neoversev1.c new file mode 100644 index 000000000..140e8f7ed --- /dev/null +++ b/kernel/arm64/sbgemm_tcopy_4_neoversev1.c @@ -0,0 +1,361 @@ +/*************************************************************************** + * Copyright (c) 2024-2025, The OpenBLAS Project + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name of the OpenBLAS project nor the names of + * its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ +#include "common.h" +#include +#include + +int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { + BLASLONG pad_m = ((m + 7) & ~7); + BLASLONG rest = (m & 7); // rest along m dim + + IFLOAT *a_offset; + IFLOAT *a_offset0, *a_offset1, *a_offset2, *a_offset3; + IFLOAT *a_offset4, *a_offset5, *a_offset6, *a_offset7; + + IFLOAT *b_offset; + IFLOAT *b_offset0, *b_offset1; + + a_offset = a; + b_offset = b; + + svuint16_t c0, c1, c2, c3, c4, c5, c6, c7; + svuint16_t t0, t1, t2, t3; + svuint32_t m00, m01, m10, m11; + svuint64_t st_offsets_0, st_offsets_1; + + svbool_t pg16_first_4 = svwhilelt_b16(0, 4); + svbool_t pg16_first_8 = svwhilelt_b16(0, 8); + + svbool_t pg64_first_4 = svwhilelt_b64(0, 4); + + u_int32_t sizeof_u64 = 8; + u_int64_t _st_offsets_0[4] = { + 0 * sizeof_u64, + 1 * sizeof_u64, + 4 * sizeof_u64, + 5 * sizeof_u64, + }; + + u_int64_t _st_offsets_1[4] = { + 2 * sizeof_u64, + 3 * sizeof_u64, + 6 * sizeof_u64, + 7 * sizeof_u64, + }; + + st_offsets_0 = svld1_u64(pg64_first_4, _st_offsets_0); + st_offsets_1 = svld1_u64(pg64_first_4, _st_offsets_1); + + for (BLASLONG j = 0; j < n / 8; j++) { + a_offset0 = a_offset; + a_offset1 = a_offset0 + lda; + a_offset2 = a_offset1 + lda; + a_offset3 = a_offset2 + lda; + a_offset4 = a_offset3 + lda; + a_offset5 = a_offset4 + lda; + a_offset6 = a_offset5 + lda; + a_offset7 = a_offset6 + lda; + a_offset += 8; + + b_offset0 = b_offset; + b_offset1 = b_offset0 + 4 * pad_m; + + b_offset += 8 * pad_m; + for (BLASLONG i = 0; i < m / 8; i++) { + // transpose 8x8 matrix and pack into two 4x8 block consists of two 2x4 + // small blocks + c0 = svld1_u16(pg16_first_8, a_offset0); + c1 = svld1_u16(pg16_first_8, a_offset1); + c2 = svld1_u16(pg16_first_8, a_offset2); + c3 = svld1_u16(pg16_first_8, a_offset3); + c4 = svld1_u16(pg16_first_8, a_offset4); + c5 = svld1_u16(pg16_first_8, a_offset5); + c6 = svld1_u16(pg16_first_8, a_offset6); + c7 = svld1_u16(pg16_first_8, a_offset7); + + t0 = svzip1_u16(c0, c1); + t1 = svzip1_u16(c2, c3); + t2 = svzip1_u16(c4, c5); + t3 = svzip1_u16(c6, c7); + + m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); + m10 = svzip2_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); + m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); + m11 = svzip2_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); + + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_0, svreinterpret_u64_u32(m00)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_1, svreinterpret_u64_u32(m01)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1, + st_offsets_0, svreinterpret_u64_u32(m10)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1, + st_offsets_1, svreinterpret_u64_u32(m11)); + + a_offset0 += 8 * lda; + a_offset1 += 8 * lda; + a_offset2 += 8 * lda; + a_offset3 += 8 * lda; + a_offset4 += 8 * lda; + a_offset5 += 8 * lda; + a_offset6 += 8 * lda; + a_offset7 += 8 * lda; + + b_offset0 += 32; + b_offset1 += 32; + } + + if (rest) { + c0 = svld1_u16(pg16_first_8, a_offset0); + c1 = (rest >= 2 ? svld1_u16(pg16_first_8, a_offset1) : svdup_u16(0)); + c2 = (rest >= 3 ? svld1_u16(pg16_first_8, a_offset2) : svdup_u16(0)); + c3 = (rest >= 4 ? svld1_u16(pg16_first_8, a_offset3) : svdup_u16(0)); + c4 = (rest >= 5 ? svld1_u16(pg16_first_8, a_offset4) : svdup_u16(0)); + c5 = (rest >= 6 ? svld1_u16(pg16_first_8, a_offset5) : svdup_u16(0)); + c6 = (rest == 7 ? svld1_u16(pg16_first_8, a_offset6) : svdup_u16(0)); + c7 = (svdup_u16(0)); + + t0 = svzip1_u16(c0, c1); + t1 = svzip1_u16(c2, c3); + t2 = svzip1_u16(c4, c5); + t3 = svzip1_u16(c6, c7); + + m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); + m10 = svzip2_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); + m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); + m11 = svzip2_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); + + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_0, svreinterpret_u64_u32(m00)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_1, svreinterpret_u64_u32(m01)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1, + st_offsets_0, svreinterpret_u64_u32(m10)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1, + st_offsets_1, svreinterpret_u64_u32(m11)); + } + } + + if (n & 4) { + a_offset0 = a_offset; + a_offset1 = a_offset0 + lda; + a_offset2 = a_offset1 + lda; + a_offset3 = a_offset2 + lda; + a_offset4 = a_offset3 + lda; + a_offset5 = a_offset4 + lda; + a_offset6 = a_offset5 + lda; + a_offset7 = a_offset6 + lda; + a_offset += 4; + + b_offset0 = b_offset; + b_offset += 4 * pad_m; + + for (BLASLONG i = 0; i < m / 8; i++) { + // transpose 8x8 matrix and pack into two 4x8 block consists of two 2x4 + // small blocks + c0 = svld1_u16(pg16_first_4, a_offset0); + c1 = svld1_u16(pg16_first_4, a_offset1); + c2 = svld1_u16(pg16_first_4, a_offset2); + c3 = svld1_u16(pg16_first_4, a_offset3); + c4 = svld1_u16(pg16_first_4, a_offset4); + c5 = svld1_u16(pg16_first_4, a_offset5); + c6 = svld1_u16(pg16_first_4, a_offset6); + c7 = svld1_u16(pg16_first_4, a_offset7); + + t0 = svzip1_u16(c0, c1); + t1 = svzip1_u16(c2, c3); + t2 = svzip1_u16(c4, c5); + t3 = svzip1_u16(c6, c7); + + m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); + m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_0, svreinterpret_u64_u32(m00)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_1, svreinterpret_u64_u32(m01)); + + a_offset0 += 8 * lda; + a_offset1 += 8 * lda; + a_offset2 += 8 * lda; + a_offset3 += 8 * lda; + a_offset4 += 8 * lda; + a_offset5 += 8 * lda; + a_offset6 += 8 * lda; + a_offset7 += 8 * lda; + + b_offset0 += 32; + } + + if (rest) { + c0 = svld1_u16(pg16_first_4, a_offset0); // rest >= 1 + c1 = (rest >= 2 ? svld1_u16(pg16_first_4, a_offset1) : svdup_u16(0)); + c2 = (rest >= 3 ? svld1_u16(pg16_first_4, a_offset2) : svdup_u16(0)); + c3 = (rest >= 4 ? svld1_u16(pg16_first_4, a_offset3) : svdup_u16(0)); + c4 = (rest >= 5 ? svld1_u16(pg16_first_4, a_offset4) : svdup_u16(0)); + c5 = (rest >= 6 ? svld1_u16(pg16_first_4, a_offset5) : svdup_u16(0)); + c6 = (rest == 7 ? svld1_u16(pg16_first_4, a_offset6) : svdup_u16(0)); + c7 = (svdup_u16(0)); + + t0 = svzip1_u16(c0, c1); + t1 = svzip1_u16(c2, c3); + t2 = svzip1_u16(c4, c5); + t3 = svzip1_u16(c6, c7); + + m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); + m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); + + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_0, svreinterpret_u64_u32(m00)); + svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0, + st_offsets_1, svreinterpret_u64_u32(m01)); + } + } + + if (n & 2) { + a_offset0 = a_offset; + a_offset1 = a_offset0 + lda; + a_offset2 = a_offset1 + lda; + a_offset3 = a_offset2 + lda; + a_offset4 = a_offset3 + lda; + a_offset5 = a_offset4 + lda; + a_offset6 = a_offset5 + lda; + a_offset7 = a_offset6 + lda; + a_offset += 2; + + b_offset0 = b_offset; + b_offset1 = b_offset0 + 8; + + b_offset += 2 * pad_m; + + for (BLASLONG i = 0; i < m / 8; i++) { + for (BLASLONG line = 0; line < 2; line++) { + b_offset0[line * 4] = a_offset0[line]; + b_offset0[line * 4 + 1] = a_offset1[line]; + b_offset0[line * 4 + 2] = a_offset2[line]; + b_offset0[line * 4 + 3] = a_offset3[line]; + + b_offset1[line * 4] = a_offset4[line]; + b_offset1[line * 4 + 1] = a_offset5[line]; + b_offset1[line * 4 + 2] = a_offset6[line]; + b_offset1[line * 4 + 3] = a_offset7[line]; + } + b_offset0 += 16; + b_offset1 += 16; + + a_offset0 += 8 * lda; + a_offset1 += 8 * lda; + a_offset2 += 8 * lda; + a_offset3 += 8 * lda; + a_offset4 += 8 * lda; + a_offset5 += 8 * lda; + a_offset6 += 8 * lda; + a_offset7 += 8 * lda; + } + + if (rest) { + for (BLASLONG line = 0; line < 2; line++) { + b_offset0[line * 4] = a_offset0[line]; + b_offset0[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line]; + b_offset0[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line]; + b_offset0[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line]; + + b_offset1[line * 4] = rest <= 4 ? 0 : a_offset4[line]; + b_offset1[line * 4 + 1] = rest <= 5 ? 0 : a_offset5[line]; + b_offset1[line * 4 + 2] = rest <= 6 ? 0 : a_offset6[line]; + b_offset1[line * 4 + 3] = 0; + } + } + } + + if (n & 1) { + a_offset0 = a_offset; + a_offset1 = a_offset0 + lda; + a_offset2 = a_offset1 + lda; + a_offset3 = a_offset2 + lda; + a_offset4 = a_offset3 + lda; + a_offset5 = a_offset4 + lda; + a_offset6 = a_offset5 + lda; + a_offset7 = a_offset6 + lda; + + for (BLASLONG i = 0; i < m / 8; i++) { + b_offset[0] = a_offset0[0]; + b_offset[1] = a_offset1[0]; + b_offset[2] = a_offset2[0]; + b_offset[3] = a_offset3[0]; + + b_offset[4] = 0; + b_offset[5] = 0; + b_offset[6] = 0; + b_offset[7] = 0; + + b_offset[8] = a_offset4[0]; + b_offset[9] = a_offset5[0]; + b_offset[10] = a_offset6[0]; + b_offset[11] = a_offset7[0]; + + b_offset[12] = 0; + b_offset[13] = 0; + b_offset[14] = 0; + b_offset[15] = 0; + + b_offset += 16; + a_offset0 += 8 * lda; + a_offset1 += 8 * lda; + a_offset2 += 8 * lda; + a_offset3 += 8 * lda; + a_offset4 += 8 * lda; + a_offset5 += 8 * lda; + a_offset6 += 8 * lda; + a_offset7 += 8 * lda; + } + + if (rest) { + b_offset[0] = *a_offset0; + b_offset[1] = rest == 1 ? 0 : *a_offset1; + b_offset[2] = rest <= 2 ? 0 : *a_offset2; + b_offset[3] = rest <= 3 ? 0 : *a_offset3; + + b_offset[4] = 0; + b_offset[5] = 0; + b_offset[6] = 0; + b_offset[7] = 0; + + b_offset[8] = rest <= 4 ? 0 : *a_offset4; + b_offset[9] = rest <= 5 ? 0 : *a_offset5; + b_offset[10] = rest <= 6 ? 0 : *a_offset6; + b_offset[11] = 0; + + b_offset[12] = 0; + b_offset[13] = 0; + b_offset[14] = 0; + b_offset[15] = 0; + } + } + + return 0; +} diff --git a/param.h b/param.h index 27743c6ef..36e6f619f 100644 --- a/param.h +++ b/param.h @@ -3554,6 +3554,13 @@ is a big desktop or server with abundant cache rather than a phone or embedded d #define GEMM_PREFERED_SIZE 8 #endif +#undef SBGEMM_ALIGN_K +#undef SBGEMM_DEFAULT_UNROLL_M +#undef SBGEMM_DEFAULT_UNROLL_N +#define SBGEMM_ALIGN_K 8 +#define SBGEMM_DEFAULT_UNROLL_M 4 +#define SBGEMM_DEFAULT_UNROLL_N 4 + #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 8