Browse Source

Merge pull request #5235 from quickwritereader/issue_unaligned_ppc64le

Explicit unaligned vector load/stores in PPC64LE GEMV kernels
tags/v0.3.30
Martin Kroeker GitHub 9 months ago
parent
commit
1df8738f27
No known key found for this signature in database GPG Key ID: B5690EEEBB952194
2 changed files with 477 additions and 591 deletions
  1. +336
    -421
      kernel/power/sgemv_n.c
  2. +141
    -170
      kernel/power/sgemv_t.c

+ 336
- 421
kernel/power/sgemv_n.c View File

@@ -17,454 +17,369 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#if !defined(__VEC__) || !defined(__ALTIVEC__)
#include "../arm/gemv_n.c"

#else

#include "common.h"
#include <altivec.h>

#include "common.h"
#define NBMAX 4096

static void sgemv_kernel_4x8(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, BLASLONG lda4, FLOAT *alpha)
{

static void sgemv_kernel_4x8(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y,
BLASLONG lda4, FLOAT *alpha) {
BLASLONG i;
FLOAT *a0,*a1,*a2,*a3,*b0,*b1,*b2,*b3;
FLOAT x0,x1,x2,x3,x4,x5,x6,x7;
a0 = ap[0];
a1 = ap[1];
a2 = ap[2];
a3 = ap[3];
b0 = a0 + lda4 ;
b1 = a1 + lda4 ;
b2 = a2 + lda4 ;
b3 = a3 + lda4 ;
x0 = xo[0] * *alpha;
x1 = xo[1] * *alpha;
x2 = xo[2] * *alpha;
x3 = xo[3] * *alpha;
x4 = xo[4] * *alpha;
x5 = xo[5] * *alpha;
x6 = xo[6] * *alpha;
x7 = xo[7] * *alpha;
__vector float* va0 = (__vector float*)a0;
__vector float* va1 = (__vector float*)a1;
__vector float* va2 = (__vector float*)a2;
__vector float* va3 = (__vector float*)a3;
__vector float* vb0 = (__vector float*)b0;
__vector float* vb1 = (__vector float*)b1;
__vector float* vb2 = (__vector float*)b2;
__vector float* vb3 = (__vector float*)b3;
__vector float v_x0 = {x0,x0,x0,x0};
__vector float v_x1 = {x1,x1,x1,x1};
__vector float v_x2 = {x2,x2,x2,x2};
__vector float v_x3 = {x3,x3,x3,x3};
__vector float v_x4 = {x4,x4,x4,x4};
__vector float v_x5 = {x5,x5,x5,x5};
__vector float v_x6 = {x6,x6,x6,x6};
__vector float v_x7 = {x7,x7,x7,x7};
__vector float* v_y =(__vector float*)y;
for ( i=0; i< n/4; i++)
{
register __vector float vy=v_y[i];
vy += v_x0 * va0[i] + v_x1 * va1[i] + v_x2 * va2[i] + v_x3 * va3[i] ;
vy += v_x4 * vb0[i] + v_x5 * vb1[i] + v_x6 * vb2[i] + v_x7 * vb3[i] ;
v_y[i] =vy;
FLOAT *a0, *a1, *a2, *a3, *b0, *b1, *b2, *b3;
FLOAT x0, x1, x2, x3, x4, x5, x6, x7;
a0 = ap[0];
a1 = ap[1];
a2 = ap[2];
a3 = ap[3];
b0 = a0 + lda4;
b1 = a1 + lda4;
b2 = a2 + lda4;
b3 = a3 + lda4;
x0 = xo[0] * (*alpha);
x1 = xo[1] * (*alpha);
x2 = xo[2] * (*alpha);
x3 = xo[3] * (*alpha);
x4 = xo[4] * (*alpha);
x5 = xo[5] * (*alpha);
x6 = xo[6] * (*alpha);
x7 = xo[7] * (*alpha);

__vector float v_x0 = {x0, x0, x0, x0};
__vector float v_x1 = {x1, x1, x1, x1};
__vector float v_x2 = {x2, x2, x2, x2};
__vector float v_x3 = {x3, x3, x3, x3};
__vector float v_x4 = {x4, x4, x4, x4};
__vector float v_x5 = {x5, x5, x5, x5};
__vector float v_x6 = {x6, x6, x6, x6};
__vector float v_x7 = {x7, x7, x7, x7};

for (i = 0; i < n; i += 4) {
__vector float vy = vec_vsx_ld(0, &y[i]);
__vector float va0 = vec_vsx_ld(0, &a0[i]);
__vector float va1 = vec_vsx_ld(0, &a1[i]);
__vector float va2 = vec_vsx_ld(0, &a2[i]);
__vector float va3 = vec_vsx_ld(0, &a3[i]);
__vector float vb0 = vec_vsx_ld(0, &b0[i]);
__vector float vb1 = vec_vsx_ld(0, &b1[i]);
__vector float vb2 = vec_vsx_ld(0, &b2[i]);
__vector float vb3 = vec_vsx_ld(0, &b3[i]);
vy += v_x0 * va0 + v_x1 * va1 + v_x2 * va2 + v_x3 * va3;
vy += v_x4 * vb0 + v_x5 * vb1 + v_x6 * vb2 + v_x7 * vb3;
vec_vsx_st(vy, 0, &y[i]);
}

}
static void sgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y, FLOAT *alpha)
{
static void sgemv_kernel_4x4(BLASLONG n, FLOAT **ap, FLOAT *xo, FLOAT *y,
FLOAT *alpha) {
BLASLONG i;
FLOAT x0,x1,x2,x3;
x0 = xo[0] * *alpha;
x1 = xo[1] * *alpha;
x2 = xo[2] * *alpha;
x3 = xo[3] * *alpha;
__vector float v_x0 = {x0,x0,x0,x0};
__vector float v_x1 = {x1,x1,x1,x1};
__vector float v_x2 = {x2,x2,x2,x2};
__vector float v_x3 = {x3,x3,x3,x3};
__vector float* v_y =(__vector float*)y;
__vector float* va0 = (__vector float*)ap[0];
__vector float* va1 = (__vector float*)ap[1];
__vector float* va2 = (__vector float*)ap[2];
__vector float* va3 = (__vector float*)ap[3];
for ( i=0; i< n/4; i++ )
{
register __vector float vy=v_y[i];
vy += v_x0 * va0[i] + v_x1 * va1[i] + v_x2 * va2[i] + v_x3 * va3[i] ;
v_y[i] =vy;
FLOAT x0, x1, x2, x3;
FLOAT *a0, *a1, *a2, *a3;
a0 = ap[0];
a1 = ap[1];
a2 = ap[2];
a3 = ap[3];
x0 = xo[0] * (*alpha);
x1 = xo[1] * (*alpha);
x2 = xo[2] * (*alpha);
x3 = xo[3] * (*alpha);
__vector float v_x0 = {x0, x0, x0, x0};
__vector float v_x1 = {x1, x1, x1, x1};
__vector float v_x2 = {x2, x2, x2, x2};
__vector float v_x3 = {x3, x3, x3, x3};

for (i = 0; i < n; i += 4) {
__vector float vy = vec_vsx_ld(0, &y[i]);
__vector float va0 = vec_vsx_ld(0, &a0[i]);
__vector float va1 = vec_vsx_ld(0, &a1[i]);
__vector float va2 = vec_vsx_ld(0, &a2[i]);
__vector float va3 = vec_vsx_ld(0, &a3[i]);
vy += v_x0 * va0 + v_x1 * va1 + v_x2 * va2 + v_x3 * va3;
vec_vsx_st(vy, 0, &y[i]);
}
}

}

static void sgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha)
{

static void sgemv_kernel_4x2(BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y,
FLOAT *alpha) {
BLASLONG i;
FLOAT x0,x1;
x0 = x[0] * *alpha;
x1 = x[1] * *alpha;
__vector float v_x0 = {x0,x0,x0,x0};
__vector float v_x1 = {x1,x1,x1,x1};
__vector float* v_y =(__vector float*)y;
__vector float* va0 = (__vector float*)ap[0];
__vector float* va1 = (__vector float*)ap[1];
for ( i=0; i< n/4; i++ )
{
v_y[i] += v_x0 * va0[i] + v_x1 * va1[i] ;
FLOAT x0, x1;
FLOAT *a0, *a1;
a0 = ap[0];
a1 = ap[1];
x0 = x[0] * (*alpha);
x1 = x[1] * (*alpha);
__vector float v_x0 = {x0, x0, x0, x0};
__vector float v_x1 = {x1, x1, x1, x1};

for (i = 0; i < n; i += 4) {
__vector float vy = vec_vsx_ld(0, &y[i]);
__vector float va0 = vec_vsx_ld(0, &a0[i]);
__vector float va1 = vec_vsx_ld(0, &a1[i]);
vy += v_x0 * va0 + v_x1 * va1;
vec_vsx_st(vy, 0, &y[i]);
}
}

}
static void sgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *alpha)
{

static void sgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y,
FLOAT *alpha) {
BLASLONG i;
FLOAT x0 ;
x0 = x[0] * *alpha;
__vector float v_x0 = {x0,x0,x0,x0};
__vector float* v_y =(__vector float*)y;
__vector float* va0 = (__vector float*)ap;
for ( i=0; i< n/4; i++ )
{
v_y[i] += v_x0 * va0[i] ;
FLOAT x0 = x[0] * (*alpha);
__vector float v_x0 = {x0, x0, x0, x0};

for (i = 0; i < n; i += 4) {
__vector float vy = vec_vsx_ld(0, &y[i]);
__vector float va0 = vec_vsx_ld(0, &ap[i]);
vy += v_x0 * va0;
vec_vsx_st(vy, 0, &y[i]);
}

}
static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest)
{

static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest) {
BLASLONG i;
for ( i=0; i<n; i++ ){
*dest += *src;
src++;
dest += inc_dest;
for (i = 0; i < n; i++) {
*dest += *src;
src++;
dest += inc_dest;
}
return;

}

int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
{
BLASLONG i;
FLOAT *a_ptr;
FLOAT *x_ptr;
FLOAT *y_ptr;
FLOAT *ap[4];
BLASLONG n1;
BLASLONG m1;
BLASLONG m2;
BLASLONG m3;
BLASLONG n2;
BLASLONG lda4 = lda << 2;
BLASLONG lda8 = lda << 3;
FLOAT xbuffer[8] __attribute__((aligned(16)));
FLOAT *ybuffer;

if ( m < 1 ) return(0);
if ( n < 1 ) return(0);

ybuffer = buffer;
if ( inc_x == 1 )
{
n1 = n >> 3 ;
n2 = n & 7 ;
}
else
{
n1 = n >> 2 ;
n2 = n & 3 ;

}
m3 = m & 3 ;
m1 = m & -4 ;
m2 = (m & (NBMAX-1)) - m3 ;


y_ptr = y;

BLASLONG NB = NBMAX;

while ( NB == NBMAX )
{
m1 -= NB;
if ( m1 < 0)
{
if ( m2 == 0 ) break;
NB = m2;
}
a_ptr = a;
x_ptr = x;
ap[0] = a_ptr;
ap[1] = a_ptr + lda;
ap[2] = ap[1] + lda;
ap[3] = ap[2] + lda;

if ( inc_y != 1 )
memset(ybuffer,0,NB*4);
else
ybuffer = y_ptr;

if ( inc_x == 1 )
{


for( i = 0; i < n1 ; i++)
{
sgemv_kernel_4x8(NB,ap,x_ptr,ybuffer,lda4,&alpha);
ap[0] += lda8;
ap[1] += lda8;
ap[2] += lda8;
ap[3] += lda8;
a_ptr += lda8;
x_ptr += 8;
}


if ( n2 & 4 )
{
sgemv_kernel_4x4(NB,ap,x_ptr,ybuffer,&alpha);
ap[0] += lda4;
ap[1] += lda4;
ap[2] += lda4;
ap[3] += lda4;
a_ptr += lda4;
x_ptr += 4;
}

if ( n2 & 2 )
{
sgemv_kernel_4x2(NB,ap,x_ptr,ybuffer,&alpha);
a_ptr += lda*2;
x_ptr += 2;
}


if ( n2 & 1 )
{
sgemv_kernel_4x1(NB,a_ptr,x_ptr,ybuffer,&alpha);
int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a,
BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y,
FLOAT *buffer) {
BLASLONG i, n1, m1, m2, m3, n2, lda4, lda8;
FLOAT *a_ptr, *x_ptr, *y_ptr, *ap[4];

lda4 = lda << 2;
lda8 = lda << 3;
FLOAT xbuffer[8] __attribute__((aligned(16)));
FLOAT *ybuffer = buffer;

if (m < 1) return (0);
if (n < 1) return (0);

if (inc_x == 1) {
n1 = n >> 3;
n2 = n & 7;
} else {
n1 = n >> 2;
n2 = n & 3;
}

m3 = m & 3;
m1 = m & -4;
m2 = (m & (NBMAX - 1)) - m3;
y_ptr = y;
BLASLONG NB = NBMAX;

while (NB == NBMAX) {
m1 -= NB;
if (m1 < 0) {
if (m2 == 0) break;
NB = m2;
}

a_ptr = a;
x_ptr = x;

ap[0] = a_ptr;
ap[1] = a_ptr + lda;
ap[2] = ap[1] + lda;
ap[3] = ap[2] + lda;

if (inc_y != 1)
memset(ybuffer, 0, NB * 4);
else
ybuffer = y_ptr;

if (inc_x == 1) {
for (i = 0; i < n1; i++) {
sgemv_kernel_4x8(NB, ap, x_ptr, ybuffer, lda4, &alpha);
ap[0] += lda8;
ap[1] += lda8;
ap[2] += lda8;
ap[3] += lda8;
a_ptr += lda8;
x_ptr += 8;
}
if (n2 & 4) {
sgemv_kernel_4x4(NB, ap, x_ptr, ybuffer, &alpha);
ap[0] += lda4;
ap[1] += lda4;
ap[2] += lda4;
ap[3] += lda4;
a_ptr += lda4;
x_ptr += 4;
}

if (n2 & 2) {
sgemv_kernel_4x2(NB, ap, x_ptr, ybuffer, &alpha);
a_ptr += lda * 2;
x_ptr += 2;
}

if (n2 & 1) {
sgemv_kernel_4x1(NB, a_ptr, x_ptr, ybuffer, &alpha);
a_ptr += lda;
x_ptr += 1;
}

} else {
for (i = 0; i < n1; i++) {
xbuffer[0] = x_ptr[0];
x_ptr += inc_x;
xbuffer[1] = x_ptr[0];
x_ptr += inc_x;
xbuffer[2] = x_ptr[0];
x_ptr += inc_x;
xbuffer[3] = x_ptr[0];
x_ptr += inc_x;
sgemv_kernel_4x4(NB, ap, xbuffer, ybuffer, &alpha);
ap[0] += lda4;
ap[1] += lda4;
ap[2] += lda4;
ap[3] += lda4;
a_ptr += lda4;
}

for (i = 0; i < n2; i++) {
xbuffer[0] = x_ptr[0];
x_ptr += inc_x;
sgemv_kernel_4x1(NB, a_ptr, xbuffer, ybuffer, &alpha);
a_ptr += lda;
}
}

a += NB;
if (inc_y != 1) {
add_y(NB, ybuffer, y_ptr, inc_y);
y_ptr += NB * inc_y;
} else
y_ptr += NB;
}

if (m3 == 0) return (0);

if (m3 == 3) {
a_ptr = a;
x_ptr = x;
FLOAT temp0 = 0.0;
FLOAT temp1 = 0.0;
FLOAT temp2 = 0.0;
if (lda == 3 && inc_x == 1) {
for (i = 0; i < (n & -4); i += 4) {
temp0 += a_ptr[0] * x_ptr[0] + a_ptr[3] * x_ptr[1];
temp1 += a_ptr[1] * x_ptr[0] + a_ptr[4] * x_ptr[1];
temp2 += a_ptr[2] * x_ptr[0] + a_ptr[5] * x_ptr[1];

temp0 += a_ptr[6] * x_ptr[2] + a_ptr[9] * x_ptr[3];
temp1 += a_ptr[7] * x_ptr[2] + a_ptr[10] * x_ptr[3];
temp2 += a_ptr[8] * x_ptr[2] + a_ptr[11] * x_ptr[3];

a_ptr += 12;
x_ptr += 4;
}

for (; i < n; i++) {
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
temp2 += a_ptr[2] * x_ptr[0];
a_ptr += 3;
x_ptr++;
}

} else {
for (i = 0; i < n; i++) {
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
temp2 += a_ptr[2] * x_ptr[0];
a_ptr += lda;
x_ptr += inc_x;
}
}
y_ptr[0] += alpha * temp0;
y_ptr += inc_y;
y_ptr[0] += alpha * temp1;
y_ptr += inc_y;
y_ptr[0] += alpha * temp2;
return (0);
}

if (m3 == 2) {
a_ptr = a;
x_ptr = x;
FLOAT temp0 = 0.0;
FLOAT temp1 = 0.0;
if (lda == 2 && inc_x == 1) {
for (i = 0; i < (n & -4); i += 4) {
temp0 += a_ptr[0] * x_ptr[0] + a_ptr[2] * x_ptr[1];
temp1 += a_ptr[1] * x_ptr[0] + a_ptr[3] * x_ptr[1];
temp0 += a_ptr[4] * x_ptr[2] + a_ptr[6] * x_ptr[3];
temp1 += a_ptr[5] * x_ptr[2] + a_ptr[7] * x_ptr[3];
a_ptr += 8;
x_ptr += 4;
}

for (; i < n; i++) {
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
a_ptr += 2;
x_ptr++;
}

} else {
for (i = 0; i < n; i++) {
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
a_ptr += lda;
x_ptr += inc_x;
}
}
y_ptr[0] += alpha * temp0;
y_ptr += inc_y;
y_ptr[0] += alpha * temp1;
return (0);
}

if (m3 == 1) {
a_ptr = a;
x_ptr = x;
FLOAT temp = 0.0;
if (lda == 1 && inc_x == 1) {
for (i = 0; i < (n & -4); i += 4) {
temp += a_ptr[i] * x_ptr[i] + a_ptr[i + 1] * x_ptr[i + 1] +
a_ptr[i + 2] * x_ptr[i + 2] +
a_ptr[i + 3] * x_ptr[i + 3];
}

for (; i < n; i++) {
temp += a_ptr[i] * x_ptr[i];
}

} else {
for (i = 0; i < n; i++) {
temp += a_ptr[0] * x_ptr[0];
a_ptr += lda;
x_ptr += 1;
}


}
else
{

for( i = 0; i < n1 ; i++)
{
xbuffer[0] = x_ptr[0];
x_ptr += inc_x;
xbuffer[1] = x_ptr[0];
x_ptr += inc_x;
xbuffer[2] = x_ptr[0];
x_ptr += inc_x;
xbuffer[3] = x_ptr[0];
x_ptr += inc_x;
sgemv_kernel_4x4(NB,ap,xbuffer,ybuffer,&alpha);
ap[0] += lda4;
ap[1] += lda4;
ap[2] += lda4;
ap[3] += lda4;
a_ptr += lda4;
}

for( i = 0; i < n2 ; i++)
{
xbuffer[0] = x_ptr[0];
x_ptr += inc_x;
sgemv_kernel_4x1(NB,a_ptr,xbuffer,ybuffer,&alpha);
a_ptr += lda;

}

}

a += NB;
if ( inc_y != 1 )
{
add_y(NB,ybuffer,y_ptr,inc_y);
y_ptr += NB * inc_y;
}
else
y_ptr += NB ;

}

if ( m3 == 0 ) return(0);

if ( m3 == 3 )
{
a_ptr = a;
x_ptr = x;
FLOAT temp0 = 0.0;
FLOAT temp1 = 0.0;
FLOAT temp2 = 0.0;
if ( lda == 3 && inc_x ==1 )
{

for( i = 0; i < ( n & -4 ); i+=4 )
{

temp0 += a_ptr[0] * x_ptr[0] + a_ptr[3] * x_ptr[1];
temp1 += a_ptr[1] * x_ptr[0] + a_ptr[4] * x_ptr[1];
temp2 += a_ptr[2] * x_ptr[0] + a_ptr[5] * x_ptr[1];

temp0 += a_ptr[6] * x_ptr[2] + a_ptr[9] * x_ptr[3];
temp1 += a_ptr[7] * x_ptr[2] + a_ptr[10] * x_ptr[3];
temp2 += a_ptr[8] * x_ptr[2] + a_ptr[11] * x_ptr[3];

a_ptr += 12;
x_ptr += 4;
}

for( ; i < n; i++ )
{
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
temp2 += a_ptr[2] * x_ptr[0];
a_ptr += 3;
x_ptr ++;
}

}
else
{

for( i = 0; i < n; i++ )
{
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
temp2 += a_ptr[2] * x_ptr[0];
a_ptr += lda;
x_ptr += inc_x;


}

}
y_ptr[0] += alpha * temp0;
y_ptr += inc_y;
y_ptr[0] += alpha * temp1;
y_ptr += inc_y;
y_ptr[0] += alpha * temp2;
return(0);
}


if ( m3 == 2 )
{
a_ptr = a;
x_ptr = x;
FLOAT temp0 = 0.0;
FLOAT temp1 = 0.0;
if ( lda == 2 && inc_x ==1 )
{

for( i = 0; i < (n & -4) ; i+=4 )
{
temp0 += a_ptr[0] * x_ptr[0] + a_ptr[2] * x_ptr[1];
temp1 += a_ptr[1] * x_ptr[0] + a_ptr[3] * x_ptr[1];
temp0 += a_ptr[4] * x_ptr[2] + a_ptr[6] * x_ptr[3];
temp1 += a_ptr[5] * x_ptr[2] + a_ptr[7] * x_ptr[3];
a_ptr += 8;
x_ptr += 4;

}


for( ; i < n; i++ )
{
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
a_ptr += 2;
x_ptr ++;
}

}
else
{

for( i = 0; i < n; i++ )
{
temp0 += a_ptr[0] * x_ptr[0];
temp1 += a_ptr[1] * x_ptr[0];
a_ptr += lda;
x_ptr += inc_x;


}

}
y_ptr[0] += alpha * temp0;
y_ptr += inc_y;
y_ptr[0] += alpha * temp1;
return(0);
}

if ( m3 == 1 )
{
a_ptr = a;
x_ptr = x;
FLOAT temp = 0.0;
if ( lda == 1 && inc_x ==1 )
{

for( i = 0; i < (n & -4); i+=4 )
{
temp += a_ptr[i] * x_ptr[i] + a_ptr[i+1] * x_ptr[i+1] + a_ptr[i+2] * x_ptr[i+2] + a_ptr[i+3] * x_ptr[i+3];
}

for( ; i < n; i++ )
{
temp += a_ptr[i] * x_ptr[i];
}

}
else
{

for( i = 0; i < n; i++ )
{
temp += a_ptr[0] * x_ptr[0];
a_ptr += lda;
x_ptr += inc_x;
}

}
y_ptr[0] += alpha * temp;
return(0);
}


return(0);
x_ptr += inc_x;
}
}
y_ptr[0] += alpha * temp;
return (0);
}

return (0);
}

#endif


+ 141
- 170
kernel/power/sgemv_t.c View File

@@ -17,12 +17,12 @@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#if !defined(__VEC__) || !defined(__ALTIVEC__)
#include "../arm/gemv_t.c"
@@ -33,20 +33,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#define NBMAX 2048

#include <altivec.h>
static void sgemv_kernel_4x8(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha) {
BLASLONG i;
#include <altivec.h>

static void sgemv_kernel_4x8(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x,
FLOAT *y, FLOAT alpha) {
BLASLONG i;
FLOAT *a0, *a1, *a2, *a3, *a4, *a5, *a6, *a7;
__vector float *va0, *va1, *va2, *va3, *va4, *va5, *va6, *va7, *v_x;
register __vector float temp0 = {0,0,0,0};
register __vector float temp1 = {0,0,0,0};
register __vector float temp2 = {0,0,0,0};
register __vector float temp3 = {0,0,0,0};
register __vector float temp4 = {0,0,0,0};
register __vector float temp5 = {0,0,0,0};
register __vector float temp6 = {0,0,0,0};
register __vector float temp7 = {0,0,0,0};
register __vector float temp0 = {0, 0, 0, 0};
register __vector float temp1 = {0, 0, 0, 0};
register __vector float temp2 = {0, 0, 0, 0};
register __vector float temp3 = {0, 0, 0, 0};
register __vector float temp4 = {0, 0, 0, 0};
register __vector float temp5 = {0, 0, 0, 0};
register __vector float temp6 = {0, 0, 0, 0};
register __vector float temp7 = {0, 0, 0, 0};

a0 = ap;
a1 = ap + lda;
@@ -56,43 +56,42 @@ static void sgemv_kernel_4x8(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA
a5 = a4 + lda;
a6 = a5 + lda;
a7 = a6 + lda;
va0 = (__vector float*) a0;
va1 = (__vector float*) a1;
va2 = (__vector float*) a2;
va3 = (__vector float*) a3;
va4 = (__vector float*) a4;
va5 = (__vector float*) a5;
va6 = (__vector float*) a6;
va7 = (__vector float*) a7;
v_x = (__vector float*) x;
for (i = 0; i < n/4; i ++) {
temp0 += v_x[i] * va0[i];
temp1 += v_x[i] * va1[i];
temp2 += v_x[i] * va2[i];
temp3 += v_x[i] * va3[i];
temp4 += v_x[i] * va4[i];
temp5 += v_x[i] * va5[i];
temp6 += v_x[i] * va6[i];
temp7 += v_x[i] * va7[i];
}
#if defined(POWER8)
y[0] += alpha * (temp0[0] + temp0[1]+temp0[2] + temp0[3]);
y[1] += alpha * (temp1[0] + temp1[1]+temp1[2] + temp1[3]);
y[2] += alpha * (temp2[0] + temp2[1]+temp2[2] + temp2[3]);
y[3] += alpha * (temp3[0] + temp3[1]+temp3[2] + temp3[3]);

y[4] += alpha * (temp4[0] + temp4[1]+temp4[2] + temp4[3]);
y[5] += alpha * (temp5[0] + temp5[1]+temp5[2] + temp5[3]);
y[6] += alpha * (temp6[0] + temp6[1]+temp6[2] + temp6[3]);
y[7] += alpha * (temp7[0] + temp7[1]+temp7[2] + temp7[3]);
#else
register __vector float t0, t1, t2, t3;
register __vector float a = { alpha, alpha, alpha, alpha };
__vector float *v_y = (__vector float*) y;

for (i = 0; i < n; i += 4) {
__vector float vx = vec_vsx_ld(0, &x[i]);
__vector float vva0 = vec_vsx_ld(0, &a0[i]);
__vector float vva1 = vec_vsx_ld(0, &a1[i]);
__vector float vva2 = vec_vsx_ld(0, &a2[i]);
__vector float vva3 = vec_vsx_ld(0, &a3[i]);
__vector float vva4 = vec_vsx_ld(0, &a4[i]);
__vector float vva5 = vec_vsx_ld(0, &a5[i]);
__vector float vva6 = vec_vsx_ld(0, &a6[i]);
__vector float vva7 = vec_vsx_ld(0, &a7[i]);
temp0 += vx * vva0;
temp1 += vx * vva1;
temp2 += vx * vva2;
temp3 += vx * vva3;
temp4 += vx * vva4;
temp5 += vx * vva5;
temp6 += vx * vva6;
temp7 += vx * vva7;
}

#if defined(POWER8)
y[0] += alpha * (temp0[0] + temp0[1] + temp0[2] + temp0[3]);
y[1] += alpha * (temp1[0] + temp1[1] + temp1[2] + temp1[3]);
y[2] += alpha * (temp2[0] + temp2[1] + temp2[2] + temp2[3]);
y[3] += alpha * (temp3[0] + temp3[1] + temp3[2] + temp3[3]);

y[4] += alpha * (temp4[0] + temp4[1] + temp4[2] + temp4[3]);
y[5] += alpha * (temp5[0] + temp5[1] + temp5[2] + temp5[3]);
y[6] += alpha * (temp6[0] + temp6[1] + temp6[2] + temp6[3]);
y[7] += alpha * (temp7[0] + temp7[1] + temp7[2] + temp7[3]);
#else
register __vector float t0, t1, t2, t3;
register __vector float a = {alpha, alpha, alpha, alpha};
__vector float vy0 = vec_vsx_ld(0, y);
__vector float vy1 = vec_vsx_ld(0, &(y[4]));
t0 = vec_mergeh(temp0, temp2);
t1 = vec_mergel(temp0, temp2);
t2 = vec_mergeh(temp1, temp3);
@@ -113,44 +112,46 @@ static void sgemv_kernel_4x8(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA
temp7 = vec_mergel(t1, t3);
temp4 += temp5 + temp6 + temp7;

v_y[0] += a * temp0;
v_y[1] += a * temp4;
vy0 += a * temp0;
vy1 += a * temp4;
vec_vsx_st(vy0, 0, y);
vec_vsx_st(vy1, 0, &(y[4]));
#endif
}

static void sgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha) {
static void sgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x,
FLOAT *y, FLOAT alpha) {
BLASLONG i = 0;
FLOAT *a0, *a1, *a2, *a3;
a0 = ap;
a1 = ap + lda;
a2 = a1 + lda;
a3 = a2 + lda;
__vector float* va0 = (__vector float*) a0;
__vector float* va1 = (__vector float*) a1;
__vector float* va2 = (__vector float*) a2;
__vector float* va3 = (__vector float*) a3;
__vector float* v_x = (__vector float*) x;
register __vector float temp0 = {0,0,0,0};
register __vector float temp1 = {0,0,0,0};
register __vector float temp2 = {0,0,0,0};
register __vector float temp3 = {0,0,0,0};
for (i = 0; i < n / 4; i ++) {
temp0 += v_x[i] * va0[i];
temp1 += v_x[i] * va1[i];
temp2 += v_x[i] * va2[i];
temp3 += v_x[i] * va3[i];
register __vector float temp0 = {0, 0, 0, 0};
register __vector float temp1 = {0, 0, 0, 0};
register __vector float temp2 = {0, 0, 0, 0};
register __vector float temp3 = {0, 0, 0, 0};
for (i = 0; i < n; i += 4) {
__vector float vx = vec_vsx_ld(0, &x[i]);
__vector float vva0 = vec_vsx_ld(0, &a0[i]);
__vector float vva1 = vec_vsx_ld(0, &a1[i]);
__vector float vva2 = vec_vsx_ld(0, &a2[i]);
__vector float vva3 = vec_vsx_ld(0, &a3[i]);
temp0 += vx * vva0;
temp1 += vx * vva1;
temp2 += vx * vva2;
temp3 += vx * vva3;
}
#if defined(POWER8)
y[0] += alpha * (temp0[0] + temp0[1]+temp0[2] + temp0[3]);
y[1] += alpha * (temp1[0] + temp1[1]+temp1[2] + temp1[3]);
y[2] += alpha * (temp2[0] + temp2[1]+temp2[2] + temp2[3]);
y[3] += alpha * (temp3[0] + temp3[1]+temp3[2] + temp3[3]);
#else
#if defined(POWER8)
y[0] += alpha * (temp0[0] + temp0[1] + temp0[2] + temp0[3]);
y[1] += alpha * (temp1[0] + temp1[1] + temp1[2] + temp1[3]);
y[2] += alpha * (temp2[0] + temp2[1] + temp2[2] + temp2[3]);
y[3] += alpha * (temp3[0] + temp3[1] + temp3[2] + temp3[3]);
#else
register __vector float t0, t1, t2, t3;
register __vector float a = { alpha, alpha, alpha, alpha };
__vector float *v_y = (__vector float*) y;
register __vector float a = {alpha, alpha, alpha, alpha};
__vector float vy0 = vec_vsx_ld(0, y);

t0 = vec_mergeh(temp0, temp2);
t1 = vec_mergel(temp0, temp2);
@@ -162,47 +163,42 @@ static void sgemv_kernel_4x4(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOA
temp3 = vec_mergel(t1, t3);
temp0 += temp1 + temp2 + temp3;

v_y[0] += a * temp0;
vy0 += a * temp0;
vec_vsx_st(vy0, 0, y);
#endif
}

static void sgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha, BLASLONG inc_y) {

static void sgemv_kernel_4x2(BLASLONG n, BLASLONG lda, FLOAT *ap, FLOAT *x,
FLOAT *y, FLOAT alpha, BLASLONG inc_y) {
BLASLONG i;
FLOAT *a0, *a1;
a0 = ap;
a1 = ap + lda;
__vector float* va0 = (__vector float*) a0;
__vector float* va1 = (__vector float*) a1;
__vector float* v_x = (__vector float*) x;
__vector float temp0 = {0,0,0,0};
__vector float temp1 = {0,0,0,0};
for (i = 0; i < n / 4; i ++) {
temp0 += v_x[i] * va0[i];
temp1 += v_x[i] * va1[i];
__vector float temp0 = {0, 0, 0, 0};
__vector float temp1 = {0, 0, 0, 0};
for (i = 0; i < n; i += 4) {
__vector float vx = vec_vsx_ld(0, &x[i]);
__vector float vva0 = vec_vsx_ld(0, &a0[i]);
__vector float vva1 = vec_vsx_ld(0, &a1[i]);
temp0 += vx * vva0;
temp1 += vx * vva1;
}



y[0] += alpha * (temp0[0] + temp0[1]+temp0[2] + temp0[3]);
y[inc_y] += alpha * (temp1[0] + temp1[1]+temp1[2] + temp1[3]);
y[0] += alpha * (temp0[0] + temp0[1] + temp0[2] + temp0[3]);
y[inc_y] += alpha * (temp1[0] + temp1[1] + temp1[2] + temp1[3]);
}

static void sgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT alpha) {
static void sgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y,
FLOAT alpha) {
BLASLONG i;
FLOAT *a0;
a0 = ap;
__vector float* va0 = (__vector float*) a0;
__vector float* v_x = (__vector float*) x;
__vector float temp0 = {0,0,0,0};
for (i = 0; i < n / 4; i ++) {
temp0 += v_x[i] * va0[i] ;
__vector float temp0 = {0, 0, 0, 0};
for (i = 0; i < n; i += 4) {
__vector float vx = vec_vsx_ld(0, &x[i]);
__vector float vva0 = vec_vsx_ld(0, &ap[i]);
temp0 += vx * vva0;
}

y[0] += alpha * (temp0[0] + temp0[1]+temp0[2] + temp0[3]);

y[0] += alpha * (temp0[0] + temp0[1] + temp0[2] + temp0[3]);
}

static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) {
@@ -213,20 +209,14 @@ static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) {
}
}

int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer) {
BLASLONG i;
BLASLONG j;
FLOAT *a_ptr;
FLOAT *x_ptr;
FLOAT *y_ptr;

BLASLONG n1;
BLASLONG m1;
BLASLONG m2;
BLASLONG m3;
BLASLONG n2;
int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a,
BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y,
FLOAT *buffer) {

BLASLONG i, j, n1, m1, m2, m3, n2;
FLOAT *a_ptr, *x_ptr, *y_ptr;
FLOAT ybuffer[8] __attribute__((aligned(16)));
FLOAT *xbuffer;
FLOAT *xbuffer;
if (m < 1) return (0);
if (n < 1) return (0);

@@ -242,7 +232,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
BLASLONG NB = NBMAX;

while (NB == NBMAX) {

m1 -= NB;
if (m1 < 0) {
if (m2 == 0) break;
@@ -260,20 +249,15 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO

BLASLONG lda8 = lda << 3;


if (inc_y == 1) {

for (i = 0; i < n1; i++) {
sgemv_kernel_4x8(NB, lda, a_ptr, xbuffer, y_ptr, alpha);
y_ptr += 8;
a_ptr += lda8;
}

} else {
for (i = 0; i < n1; i++) {
ybuffer[0] = 0;
ybuffer[1] = 0;
@@ -285,8 +269,6 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
ybuffer[7] = 0;
sgemv_kernel_4x8(NB, lda, a_ptr, xbuffer, ybuffer, alpha);


*y_ptr += ybuffer[0];
y_ptr += inc_y;
*y_ptr += ybuffer[1];
@@ -307,10 +289,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO

a_ptr += lda8;
}

}


if (n2 & 4) {
ybuffer[0] = 0;
ybuffer[1] = 0;
@@ -318,7 +298,7 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
ybuffer[3] = 0;
sgemv_kernel_4x4(NB, lda, a_ptr, xbuffer, ybuffer, alpha);

a_ptr += lda<<2;
a_ptr += lda << 2;

*y_ptr += ybuffer[0];
y_ptr += inc_y;
@@ -334,20 +314,16 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
sgemv_kernel_4x2(NB, lda, a_ptr, xbuffer, y_ptr, alpha, inc_y);
a_ptr += lda << 1;
y_ptr += 2 * inc_y;

}

if (n2 & 1) {
sgemv_kernel_4x1(NB, a_ptr, xbuffer, y_ptr, alpha);
a_ptr += lda;
y_ptr += inc_y;

}

a += NB;
x += NB * inc_x;


}

if (m3 == 0) return (0);
@@ -365,13 +341,14 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
y_ptr = y;

if (lda == 3 && inc_y == 1) {

for (j = 0; j < (n & -4); j += 4) {

y_ptr[j] += aj[0] * xtemp0 + aj[1] * xtemp1 + aj[2] * xtemp2;
y_ptr[j + 1] += aj[3] * xtemp0 + aj[4] * xtemp1 + aj[5] * xtemp2;
y_ptr[j + 2] += aj[6] * xtemp0 + aj[7] * xtemp1 + aj[8] * xtemp2;
y_ptr[j + 3] += aj[9] * xtemp0 + aj[10] * xtemp1 + aj[11] * xtemp2;
y_ptr[j + 1] +=
aj[3] * xtemp0 + aj[4] * xtemp1 + aj[5] * xtemp2;
y_ptr[j + 2] +=
aj[6] * xtemp0 + aj[7] * xtemp1 + aj[8] * xtemp2;
y_ptr[j + 3] +=
aj[9] * xtemp0 + aj[10] * xtemp1 + aj[11] * xtemp2;
aj += 12;
}

@@ -381,38 +358,40 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
}

} else {

if (inc_y == 1) {

BLASLONG register lda2 = lda << 1;
BLASLONG register lda4 = lda << 2;
BLASLONG register lda3 = lda2 + lda;

for (j = 0; j < (n & -4); j += 4) {

y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2;
y_ptr[j + 1] += *(aj + lda) * xtemp0 + *(aj + lda + 1) * xtemp1 + *(aj + lda + 2) * xtemp2;
y_ptr[j + 2] += *(aj + lda2) * xtemp0 + *(aj + lda2 + 1) * xtemp1 + *(aj + lda2 + 2) * xtemp2;
y_ptr[j + 3] += *(aj + lda3) * xtemp0 + *(aj + lda3 + 1) * xtemp1 + *(aj + lda3 + 2) * xtemp2;
y_ptr[j] +=
*aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2;
y_ptr[j + 1] += *(aj + lda) * xtemp0 +
*(aj + lda + 1) * xtemp1 +
*(aj + lda + 2) * xtemp2;
y_ptr[j + 2] += *(aj + lda2) * xtemp0 +
*(aj + lda2 + 1) * xtemp1 +
*(aj + lda2 + 2) * xtemp2;
y_ptr[j + 3] += *(aj + lda3) * xtemp0 +
*(aj + lda3 + 1) * xtemp1 +
*(aj + lda3 + 2) * xtemp2;
aj += lda4;
}

for (; j < n; j++) {

y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2;
y_ptr[j] +=
*aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2;
aj += lda;
}

} else {

for (j = 0; j < n; j++) {
*y_ptr += *aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2;
*y_ptr +=
*aj * xtemp0 + *(aj + 1) * xtemp1 + *(aj + 2) * xtemp2;
y_ptr += inc_y;
aj += lda;
}

}

}
return (0);
}
@@ -426,14 +405,12 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
y_ptr = y;

if (lda == 2 && inc_y == 1) {

for (j = 0; j < (n & -4); j += 4) {
y_ptr[j] += aj[0] * xtemp0 + aj[1] * xtemp1;
y_ptr[j + 1] += aj[2] * xtemp0 + aj[3] * xtemp1;
y_ptr[j + 2] += aj[4] * xtemp0 + aj[5] * xtemp1;
y_ptr[j + 3] += aj[6] * xtemp0 + aj[7] * xtemp1;
aj += 8;

}

for (; j < n; j++) {
@@ -443,22 +420,22 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO

} else {
if (inc_y == 1) {

BLASLONG register lda2 = lda << 1;
BLASLONG register lda4 = lda << 2;
BLASLONG register lda3 = lda2 + lda;

for (j = 0; j < (n & -4); j += 4) {

y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1;
y_ptr[j + 1] += *(aj + lda) * xtemp0 + *(aj + lda + 1) * xtemp1;
y_ptr[j + 2] += *(aj + lda2) * xtemp0 + *(aj + lda2 + 1) * xtemp1;
y_ptr[j + 3] += *(aj + lda3) * xtemp0 + *(aj + lda3 + 1) * xtemp1;
y_ptr[j + 1] +=
*(aj + lda) * xtemp0 + *(aj + lda + 1) * xtemp1;
y_ptr[j + 2] +=
*(aj + lda2) * xtemp0 + *(aj + lda2 + 1) * xtemp1;
y_ptr[j + 3] +=
*(aj + lda3) * xtemp0 + *(aj + lda3 + 1) * xtemp1;
aj += lda4;
}

for (; j < n; j++) {

y_ptr[j] += *aj * xtemp0 + *(aj + 1) * xtemp1;
aj += lda;
}
@@ -470,10 +447,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
aj += lda;
}
}

}
return (0);

}

FLOAT xtemp = *x_ptr * alpha;
@@ -490,10 +465,8 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
y_ptr[j] += aj[j] * xtemp;
}


} else {
if (inc_y == 1) {

BLASLONG register lda2 = lda << 1;
BLASLONG register lda4 = lda << 2;
BLASLONG register lda3 = lda2 + lda;
@@ -516,12 +489,10 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO
y_ptr += inc_y;
aj += lda;
}

}
}

return (0);

}

#endif

Loading…
Cancel
Save