Merge pull request #419 from wernsaar/develop
added optimized sgemv kernels for Sandy Bridge, Haswell, Bullldozer, and Piledriver.
This commit is contained in:
commit
80bf3e6a35
2
Makefile
2
Makefile
|
@ -247,7 +247,7 @@ ifndef NOFORTRAN
|
||||||
-@echo "SUFFIX = $(SUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc
|
-@echo "SUFFIX = $(SUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc
|
||||||
-@echo "PSUFFIX = $(PSUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc
|
-@echo "PSUFFIX = $(PSUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc
|
||||||
-@echo "CEXTRALIB = $(EXTRALIB)" >> $(NETLIB_LAPACK_DIR)/make.inc
|
-@echo "CEXTRALIB = $(EXTRALIB)" >> $(NETLIB_LAPACK_DIR)/make.inc
|
||||||
ifeq ($(FC), GFORTRAN)
|
ifeq ($(FC), gfortran)
|
||||||
-@echo "TIMER = INT_ETIME" >> $(NETLIB_LAPACK_DIR)/make.inc
|
-@echo "TIMER = INT_ETIME" >> $(NETLIB_LAPACK_DIR)/make.inc
|
||||||
ifdef SMP
|
ifdef SMP
|
||||||
-@echo "LOADER = $(FC) -pthread" >> $(NETLIB_LAPACK_DIR)/make.inc
|
-@echo "LOADER = $(FC) -pthread" >> $(NETLIB_LAPACK_DIR)/make.inc
|
||||||
|
|
|
@ -2,12 +2,12 @@ TOPDIR = ..
|
||||||
include $(TOPDIR)/Makefile.system
|
include $(TOPDIR)/Makefile.system
|
||||||
|
|
||||||
# ACML standard
|
# ACML standard
|
||||||
ACML=/opt/acml5.3.1/gfortran64_mp/lib
|
#ACML=/opt/acml5.3.1/gfortran64_mp/lib
|
||||||
LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm
|
#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm
|
||||||
|
|
||||||
# ACML custom
|
# ACML custom
|
||||||
#ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib
|
ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib
|
||||||
#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm
|
LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm
|
||||||
|
|
||||||
# Atlas Ubuntu
|
# Atlas Ubuntu
|
||||||
#ATLAS=/usr/lib/atlas-base
|
#ATLAS=/usr/lib/atlas-base
|
||||||
|
@ -37,6 +37,7 @@ goto :: slinpack.goto dlinpack.goto clinpack.goto zlinpack.goto \
|
||||||
chemm.goto zhemm.goto \
|
chemm.goto zhemm.goto \
|
||||||
cherk.goto zherk.goto \
|
cherk.goto zherk.goto \
|
||||||
cher2k.goto zher2k.goto \
|
cher2k.goto zher2k.goto \
|
||||||
|
sgemv.goto dgemv.goto cgemv.goto zgemv.goto \
|
||||||
ssymm.goto dsymm.goto csymm.goto zsymm.goto
|
ssymm.goto dsymm.goto csymm.goto zsymm.goto
|
||||||
|
|
||||||
acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \
|
acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \
|
||||||
|
@ -49,6 +50,7 @@ acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \
|
||||||
chemm.acml zhemm.acml \
|
chemm.acml zhemm.acml \
|
||||||
cherk.acml zherk.acml \
|
cherk.acml zherk.acml \
|
||||||
cher2k.acml zher2k.acml \
|
cher2k.acml zher2k.acml \
|
||||||
|
sgemv.acml dgemv.acml cgemv.acml zgemv.acml \
|
||||||
ssymm.acml dsymm.acml csymm.acml zsymm.acml
|
ssymm.acml dsymm.acml csymm.acml zsymm.acml
|
||||||
|
|
||||||
atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \
|
atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \
|
||||||
|
@ -61,6 +63,7 @@ atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \
|
||||||
chemm.atlas zhemm.atlas \
|
chemm.atlas zhemm.atlas \
|
||||||
cherk.atlas zherk.atlas \
|
cherk.atlas zherk.atlas \
|
||||||
cher2k.atlas zher2k.atlas \
|
cher2k.atlas zher2k.atlas \
|
||||||
|
sgemv.atlas dgemv.atlas cgemv.atlas zgemv.atlas \
|
||||||
ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas
|
ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas
|
||||||
|
|
||||||
mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \
|
mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \
|
||||||
|
@ -73,6 +76,7 @@ mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \
|
||||||
chemm.mkl zhemm.mkl \
|
chemm.mkl zhemm.mkl \
|
||||||
cherk.mkl zherk.mkl \
|
cherk.mkl zherk.mkl \
|
||||||
cher2k.mkl zher2k.mkl \
|
cher2k.mkl zher2k.mkl \
|
||||||
|
sgemv.mkl dgemv.mkl cgemv.mkl zgemv.mkl \
|
||||||
ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl
|
ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl
|
||||||
|
|
||||||
all :: goto atlas acml mkl
|
all :: goto atlas acml mkl
|
||||||
|
@ -601,6 +605,61 @@ zher2k.atlas : zher2k.$(SUFFIX)
|
||||||
zher2k.mkl : zher2k.$(SUFFIX)
|
zher2k.mkl : zher2k.$(SUFFIX)
|
||||||
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
##################################### Sgemv ####################################################
|
||||||
|
sgemv.goto : sgemv.$(SUFFIX) ../$(LIBNAME)
|
||||||
|
$(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm
|
||||||
|
|
||||||
|
sgemv.acml : sgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
sgemv.atlas : sgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
sgemv.mkl : sgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
##################################### Dgemv ####################################################
|
||||||
|
dgemv.goto : dgemv.$(SUFFIX) ../$(LIBNAME)
|
||||||
|
$(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm
|
||||||
|
|
||||||
|
dgemv.acml : dgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
dgemv.atlas : dgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
dgemv.mkl : dgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
##################################### Cgemv ####################################################
|
||||||
|
|
||||||
|
cgemv.goto : cgemv.$(SUFFIX) ../$(LIBNAME)
|
||||||
|
$(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm
|
||||||
|
|
||||||
|
cgemv.acml : cgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
cgemv.atlas : cgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
cgemv.mkl : cgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
##################################### Zgemv ####################################################
|
||||||
|
|
||||||
|
zgemv.goto : zgemv.$(SUFFIX) ../$(LIBNAME)
|
||||||
|
$(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm
|
||||||
|
|
||||||
|
zgemv.acml : zgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
zgemv.atlas : zgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
zgemv.mkl : zgemv.$(SUFFIX)
|
||||||
|
-$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB)
|
||||||
|
|
||||||
|
|
||||||
###################################################################################################
|
###################################################################################################
|
||||||
|
|
||||||
slinpack.$(SUFFIX) : linpack.c
|
slinpack.$(SUFFIX) : linpack.c
|
||||||
|
@ -717,7 +776,17 @@ cher2k.$(SUFFIX) : her2k.c
|
||||||
zher2k.$(SUFFIX) : her2k.c
|
zher2k.$(SUFFIX) : her2k.c
|
||||||
$(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^
|
$(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^
|
||||||
|
|
||||||
|
sgemv.$(SUFFIX) : gemv.c
|
||||||
|
$(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^
|
||||||
|
|
||||||
|
dgemv.$(SUFFIX) : gemv.c
|
||||||
|
$(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^
|
||||||
|
|
||||||
|
cgemv.$(SUFFIX) : gemv.c
|
||||||
|
$(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^
|
||||||
|
|
||||||
|
zgemv.$(SUFFIX) : gemv.c
|
||||||
|
$(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^
|
||||||
|
|
||||||
|
|
||||||
clean ::
|
clean ::
|
||||||
|
|
|
@ -0,0 +1,229 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#ifdef __CYGWIN32__
|
||||||
|
#include <sys/time.h>
|
||||||
|
#endif
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
|
||||||
|
#undef GEMV
|
||||||
|
|
||||||
|
#ifndef COMPLEX
|
||||||
|
|
||||||
|
#ifdef DOUBLE
|
||||||
|
#define GEMV BLASFUNC(dgemv)
|
||||||
|
#else
|
||||||
|
#define GEMV BLASFUNC(sgemv)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#ifdef DOUBLE
|
||||||
|
#define GEMV BLASFUNC(zgemv)
|
||||||
|
#else
|
||||||
|
#define GEMV BLASFUNC(cgemv)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__WIN32__) || defined(__WIN64__)
|
||||||
|
|
||||||
|
#ifndef DELTA_EPOCH_IN_MICROSECS
|
||||||
|
#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int gettimeofday(struct timeval *tv, void *tz){
|
||||||
|
|
||||||
|
FILETIME ft;
|
||||||
|
unsigned __int64 tmpres = 0;
|
||||||
|
static int tzflag;
|
||||||
|
|
||||||
|
if (NULL != tv)
|
||||||
|
{
|
||||||
|
GetSystemTimeAsFileTime(&ft);
|
||||||
|
|
||||||
|
tmpres |= ft.dwHighDateTime;
|
||||||
|
tmpres <<= 32;
|
||||||
|
tmpres |= ft.dwLowDateTime;
|
||||||
|
|
||||||
|
/*converting file time to unix epoch*/
|
||||||
|
tmpres /= 10; /*convert into microseconds*/
|
||||||
|
tmpres -= DELTA_EPOCH_IN_MICROSECS;
|
||||||
|
tv->tv_sec = (long)(tmpres / 1000000UL);
|
||||||
|
tv->tv_usec = (long)(tmpres % 1000000UL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0
|
||||||
|
|
||||||
|
static void *huge_malloc(BLASLONG size){
|
||||||
|
int shmid;
|
||||||
|
void *address;
|
||||||
|
|
||||||
|
#ifndef SHM_HUGETLB
|
||||||
|
#define SHM_HUGETLB 04000
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if ((shmid =shmget(IPC_PRIVATE,
|
||||||
|
(size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1),
|
||||||
|
SHM_HUGETLB | IPC_CREAT |0600)) < 0) {
|
||||||
|
printf( "Memory allocation failed(shmget).\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
address = shmat(shmid, NULL, SHM_RND);
|
||||||
|
|
||||||
|
if ((BLASLONG)address == -1){
|
||||||
|
printf( "Memory allocation failed(shmat).\n");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
shmctl(shmid, IPC_RMID, 0);
|
||||||
|
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define malloc huge_malloc
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int MAIN__(int argc, char *argv[]){
|
||||||
|
|
||||||
|
FLOAT *a, *x, *y;
|
||||||
|
FLOAT alpha[] = {1.0, 1.0};
|
||||||
|
FLOAT beta [] = {1.0, 1.0};
|
||||||
|
char trans='N';
|
||||||
|
blasint m, i, j;
|
||||||
|
blasint inc_x=1,inc_y=1;
|
||||||
|
blasint n=0;
|
||||||
|
int has_param_n = 0;
|
||||||
|
int loops = 1;
|
||||||
|
int l;
|
||||||
|
char *p;
|
||||||
|
|
||||||
|
int from = 1;
|
||||||
|
int to = 200;
|
||||||
|
int step = 1;
|
||||||
|
|
||||||
|
struct timeval start, stop;
|
||||||
|
double time1,timeg;
|
||||||
|
|
||||||
|
argc--;argv++;
|
||||||
|
|
||||||
|
if (argc > 0) { from = atol(*argv); argc--; argv++;}
|
||||||
|
if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;}
|
||||||
|
if (argc > 0) { step = atol(*argv); argc--; argv++;}
|
||||||
|
|
||||||
|
if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p);
|
||||||
|
if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p);
|
||||||
|
if ((p = getenv("OPENBLAS_INCY"))) inc_y = atoi(p);
|
||||||
|
if ((p = getenv("OPENBLAS_TRANS"))) trans=*p;
|
||||||
|
if ((p = getenv("OPENBLAS_PARAM_N"))) {
|
||||||
|
n = atoi(p);
|
||||||
|
if ((n>0) && (n<=to)) has_param_n = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( has_param_n == 1 )
|
||||||
|
fprintf(stderr, "From : %3d To : %3d Step = %3d Trans = '%c' N = %d Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,trans,n,inc_x,inc_y,loops);
|
||||||
|
else
|
||||||
|
fprintf(stderr, "From : %3d To : %3d Step = %3d Trans = '%c' Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,trans,inc_x,inc_y,loops);
|
||||||
|
|
||||||
|
if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){
|
||||||
|
fprintf(stderr,"Out of Memory!!\n");exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){
|
||||||
|
fprintf(stderr,"Out of Memory!!\n");exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL){
|
||||||
|
fprintf(stderr,"Out of Memory!!\n");exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef linux
|
||||||
|
srandom(getpid());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
fprintf(stderr, " SIZE Flops\n");
|
||||||
|
|
||||||
|
for(m = from; m <= to; m += step)
|
||||||
|
{
|
||||||
|
|
||||||
|
timeg=0;
|
||||||
|
|
||||||
|
if ( has_param_n == 0 ) n = m;
|
||||||
|
|
||||||
|
fprintf(stderr, " %6dx%d : ", (int)m,(int)n);
|
||||||
|
|
||||||
|
for(j = 0; j < m; j++){
|
||||||
|
for(i = 0; i < n * COMPSIZE; i++){
|
||||||
|
a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
for (l=0; l<loops; l++)
|
||||||
|
{
|
||||||
|
|
||||||
|
for(i = 0; i < n * COMPSIZE * abs(inc_x); i++){
|
||||||
|
x[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5;
|
||||||
|
}
|
||||||
|
|
||||||
|
for(i = 0; i < n * COMPSIZE * abs(inc_y); i++){
|
||||||
|
y[i] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5;
|
||||||
|
}
|
||||||
|
gettimeofday( &start, (struct timezone *)0);
|
||||||
|
|
||||||
|
GEMV (&trans, &m, &n, alpha, a, &m, x, &inc_x, beta, y, &inc_y );
|
||||||
|
|
||||||
|
gettimeofday( &stop, (struct timezone *)0);
|
||||||
|
|
||||||
|
time1 = (double)(stop.tv_sec - start.tv_sec) + (double)((stop.tv_usec - start.tv_usec)) * 1.e-6;
|
||||||
|
|
||||||
|
timeg += time1;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
timeg /= loops;
|
||||||
|
|
||||||
|
fprintf(stderr,
|
||||||
|
" %10.2f MFlops\n",
|
||||||
|
COMPSIZE * COMPSIZE * 2. * (double)m * (double)n / timeg * 1.e-6);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void main(int argc, char *argv[]) __attribute__((weak, alias("MAIN__")));
|
|
@ -405,11 +405,11 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS
|
||||||
|
|
||||||
#ifndef COMPLEX
|
#ifndef COMPLEX
|
||||||
double MNK = (double) args.m * (double) args.n * (double) args.k;
|
double MNK = (double) args.m * (double) args.n * (double) args.k;
|
||||||
if ( MNK <= (1024.0 * (double) GEMM_MULTITHREAD_THRESHOLD) )
|
if ( MNK <= (16.0 * 1024.0 * (double) GEMM_MULTITHREAD_THRESHOLD) )
|
||||||
nthreads_max = 1;
|
nthreads_max = 1;
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if ( MNK <= (65536.0 * (double) GEMM_MULTITHREAD_THRESHOLD) )
|
if ( MNK <= (2.0 * 65536.0 * (double) GEMM_MULTITHREAD_THRESHOLD) )
|
||||||
{
|
{
|
||||||
nthreads_max = 4;
|
nthreads_max = 4;
|
||||||
if ( args.m < 16 * GEMM_MULTITHREAD_THRESHOLD )
|
if ( args.m < 16 * GEMM_MULTITHREAD_THRESHOLD )
|
||||||
|
|
|
@ -211,7 +211,18 @@ void CNAME(enum CBLAS_ORDER order,
|
||||||
buffer = (FLOAT *)blas_memory_alloc(1);
|
buffer = (FLOAT *)blas_memory_alloc(1);
|
||||||
|
|
||||||
#ifdef SMP
|
#ifdef SMP
|
||||||
nthreads = num_cpu_avail(2);
|
|
||||||
|
int nthreads_max = num_cpu_avail(2);
|
||||||
|
int nthreads_avail = nthreads_max;
|
||||||
|
|
||||||
|
double MNK = (double) m * (double) n;
|
||||||
|
if ( MNK <= (500.0 * 100.0 * (double) GEMM_MULTITHREAD_THRESHOLD) )
|
||||||
|
nthreads_max = 1;
|
||||||
|
|
||||||
|
if ( nthreads_max > nthreads_avail )
|
||||||
|
nthreads = nthreads_avail;
|
||||||
|
else
|
||||||
|
nthreads = nthreads_max;
|
||||||
|
|
||||||
if (nthreads == 1) {
|
if (nthreads == 1) {
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -233,7 +233,19 @@ void CNAME(enum CBLAS_ORDER order,
|
||||||
buffer = (FLOAT *)blas_memory_alloc(1);
|
buffer = (FLOAT *)blas_memory_alloc(1);
|
||||||
|
|
||||||
#ifdef SMP
|
#ifdef SMP
|
||||||
nthreads = num_cpu_avail(2);
|
|
||||||
|
int nthreads_max = num_cpu_avail(2);
|
||||||
|
int nthreads_avail = nthreads_max;
|
||||||
|
|
||||||
|
double MNK = (double) m * (double) n;
|
||||||
|
if ( MNK <= (80.0 * 20.0 * (double) GEMM_MULTITHREAD_THRESHOLD) )
|
||||||
|
nthreads_max = 1;
|
||||||
|
|
||||||
|
if ( nthreads_max > nthreads_avail )
|
||||||
|
nthreads = nthreads_avail;
|
||||||
|
else
|
||||||
|
nthreads = nthreads_max;
|
||||||
|
|
||||||
|
|
||||||
if (nthreads == 1) {
|
if (nthreads == 1) {
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1,3 +1,11 @@
|
||||||
|
ifdef OS_WINDOWS
|
||||||
|
SGEMVNKERNEL = ../arm/gemv_n.c
|
||||||
|
SGEMVTKERNEL = ../arm/gemv_t.c
|
||||||
|
else
|
||||||
|
SGEMVNKERNEL = sgemv_n_avx.c
|
||||||
|
SGEMVTKERNEL = sgemv_t_avx.c
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
ZGEMVNKERNEL = zgemv_n_dup.S
|
ZGEMVNKERNEL = zgemv_n_dup.S
|
||||||
ZGEMVTKERNEL = zgemv_t.S
|
ZGEMVTKERNEL = zgemv_t.S
|
||||||
|
|
|
@ -1,3 +1,11 @@
|
||||||
|
ifdef OS_WINDOWS
|
||||||
|
SGEMVNKERNEL = ../arm/gemv_n.c
|
||||||
|
SGEMVTKERNEL = ../arm/gemv_t.c
|
||||||
|
else
|
||||||
|
SGEMVNKERNEL = sgemv_n_avx.c
|
||||||
|
SGEMVTKERNEL = sgemv_t_avx.c
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
SGEMMKERNEL = sgemm_kernel_16x4_haswell.S
|
SGEMMKERNEL = sgemm_kernel_16x4_haswell.S
|
||||||
SGEMMINCOPY = ../generic/gemm_ncopy_16.c
|
SGEMMINCOPY = ../generic/gemm_ncopy_16.c
|
||||||
|
|
|
@ -1,3 +1,11 @@
|
||||||
|
ifdef OS_WINDOWS
|
||||||
|
SGEMVNKERNEL = ../arm/gemv_n.c
|
||||||
|
SGEMVTKERNEL = ../arm/gemv_t.c
|
||||||
|
else
|
||||||
|
SGEMVNKERNEL = sgemv_n_avx.c
|
||||||
|
SGEMVTKERNEL = sgemv_t_avx.c
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
ZGEMVNKERNEL = zgemv_n_dup.S
|
ZGEMVNKERNEL = zgemv_n_dup.S
|
||||||
ZGEMVTKERNEL = zgemv_t.S
|
ZGEMVTKERNEL = zgemv_t.S
|
||||||
|
|
|
@ -1,3 +1,11 @@
|
||||||
|
ifdef OS_WINDOWS
|
||||||
|
SGEMVNKERNEL = ../arm/gemv_n.c
|
||||||
|
SGEMVTKERNEL = ../arm/gemv_t.c
|
||||||
|
else
|
||||||
|
SGEMVNKERNEL = sgemv_n_avx.c
|
||||||
|
SGEMVTKERNEL = sgemv_t_avx.c
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
SGEMMKERNEL = sgemm_kernel_16x4_sandy.S
|
SGEMMKERNEL = sgemm_kernel_16x4_sandy.S
|
||||||
SGEMMINCOPY = ../generic/gemm_ncopy_16.c
|
SGEMMINCOPY = ../generic/gemm_ncopy_16.c
|
||||||
|
|
|
@ -0,0 +1,218 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
#if defined(BULLDOZER) || defined(PILEDRIVER)
|
||||||
|
#include "sgemv_n_microk_bulldozer.c"
|
||||||
|
#elif defined(HASWELL)
|
||||||
|
#include "sgemv_n_microk_haswell.c"
|
||||||
|
#else
|
||||||
|
#include "sgemv_n_microk_sandy.c"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src)
|
||||||
|
{
|
||||||
|
BLASLONG i;
|
||||||
|
for ( i=0; i<n; i++ )
|
||||||
|
{
|
||||||
|
*dest = *src;
|
||||||
|
dest++;
|
||||||
|
src += inc_src;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest)
|
||||||
|
{
|
||||||
|
BLASLONG i;
|
||||||
|
for ( i=0; i<n; i++ )
|
||||||
|
{
|
||||||
|
*dest += *src;
|
||||||
|
src++;
|
||||||
|
dest += inc_dest;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
|
||||||
|
{
|
||||||
|
BLASLONG i;
|
||||||
|
BLASLONG j;
|
||||||
|
FLOAT *a_ptr;
|
||||||
|
FLOAT *x_ptr;
|
||||||
|
FLOAT *y_ptr;
|
||||||
|
BLASLONG n1;
|
||||||
|
BLASLONG m1;
|
||||||
|
BLASLONG register m2;
|
||||||
|
BLASLONG register n2;
|
||||||
|
FLOAT *xbuffer,*ybuffer;
|
||||||
|
xbuffer = buffer;
|
||||||
|
ybuffer = xbuffer + 2048 + 256;
|
||||||
|
|
||||||
|
n1 = n / 512 ;
|
||||||
|
n2 = n % 512 ;
|
||||||
|
|
||||||
|
m1 = m / 64;
|
||||||
|
m2 = m % 64;
|
||||||
|
|
||||||
|
y_ptr = y;
|
||||||
|
x_ptr = x;
|
||||||
|
|
||||||
|
for (j=0; j<n1; j++)
|
||||||
|
{
|
||||||
|
|
||||||
|
if ( inc_x == 1 )
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
else
|
||||||
|
copy_x(512,x_ptr,xbuffer,inc_x);
|
||||||
|
|
||||||
|
a_ptr = a + j * 512 * lda;
|
||||||
|
y_ptr = y;
|
||||||
|
|
||||||
|
for(i = 0; i<m1; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_64(512,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(64,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 64 * inc_y;
|
||||||
|
a_ptr += 64;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( m2 & 32 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_32(512,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(32,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 32 * inc_y;
|
||||||
|
a_ptr += 32;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( m2 & 16 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_16(512,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(16,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 16 * inc_y;
|
||||||
|
a_ptr += 16;
|
||||||
|
}
|
||||||
|
if ( m2 & 8 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_8(512,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(8,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 8 * inc_y;
|
||||||
|
a_ptr += 8;
|
||||||
|
}
|
||||||
|
if ( m2 & 4 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_4(512,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(4,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 4 * inc_y;
|
||||||
|
a_ptr += 4;
|
||||||
|
}
|
||||||
|
if ( m2 & 2 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_2(512,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(2,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 2 * inc_y;
|
||||||
|
a_ptr += 2;
|
||||||
|
}
|
||||||
|
if ( m2 & 1 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_1(512,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(1,ybuffer,y_ptr,inc_y);
|
||||||
|
}
|
||||||
|
x_ptr += 512 * inc_x;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( n2 > 0 )
|
||||||
|
{
|
||||||
|
|
||||||
|
if ( inc_x == 1 )
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
else
|
||||||
|
copy_x(n2,x_ptr,xbuffer,inc_x);
|
||||||
|
|
||||||
|
a_ptr = a + n1 * 512 * lda;
|
||||||
|
y_ptr = y;
|
||||||
|
|
||||||
|
for(i = 0; i<m1; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_64(n2,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(64,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 64 * inc_y;
|
||||||
|
a_ptr += 64;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( m2 & 32 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_32(n2,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(32,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 32 * inc_y;
|
||||||
|
a_ptr += 32;
|
||||||
|
|
||||||
|
}
|
||||||
|
if ( m2 & 16 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_16(n2,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(16,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 16 * inc_y;
|
||||||
|
a_ptr += 16;
|
||||||
|
}
|
||||||
|
if ( m2 & 8 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_8(n2,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(8,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 8 * inc_y;
|
||||||
|
a_ptr += 8;
|
||||||
|
}
|
||||||
|
if ( m2 & 4 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_4(n2,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(4,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 4 * inc_y;
|
||||||
|
a_ptr += 4;
|
||||||
|
}
|
||||||
|
if ( m2 & 2 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_2(n2,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(2,ybuffer,y_ptr,inc_y);
|
||||||
|
y_ptr += 2 * inc_y;
|
||||||
|
a_ptr += 2;
|
||||||
|
}
|
||||||
|
if ( m2 & 1 )
|
||||||
|
{
|
||||||
|
sgemv_kernel_1(n2,alpha,a_ptr,lda,xbuffer,ybuffer);
|
||||||
|
add_y(1,ybuffer,y_ptr,inc_y);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
return(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,451 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
float *pre = a + lda*3;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero
|
||||||
|
"vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero
|
||||||
|
"vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero
|
||||||
|
"vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero
|
||||||
|
"vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero
|
||||||
|
"vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"nop \n\t"
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"vfmaddps %%ymm8 , 0*4(%%rsi), %%ymm0, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
"vfmaddps %%ymm9 , 8*4(%%rsi), %%ymm0, %%ymm9 \n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 128(%%r8)\n\t" // Prefetch
|
||||||
|
"vfmaddps %%ymm10, 16*4(%%rsi), %%ymm0, %%ymm10\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%ymm11, 24*4(%%rsi), %%ymm0, %%ymm11\n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 192(%%r8)\n\t" // Prefetch
|
||||||
|
"vfmaddps %%ymm12, 32*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%ymm13, 40*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%ymm14, 48*4(%%rsi), %%ymm0, %%ymm14\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%ymm15, 56*4(%%rsi), %%ymm0, %%ymm15\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
float *pre = a + lda*3;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" // set to zero
|
||||||
|
"vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" // set to zero
|
||||||
|
"vxorps %%xmm10, %%xmm10, %%xmm10\n\t" // set to zero
|
||||||
|
"vxorps %%xmm11, %%xmm11, %%xmm11\n\t" // set to zero
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
"vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero
|
||||||
|
"vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero
|
||||||
|
"vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
"nop \n\t"
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"vfmaddps %%xmm8 , 0*4(%%rsi), %%xmm0, %%xmm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
"vfmaddps %%xmm9 , 4*4(%%rsi), %%xmm0, %%xmm9 \n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm10, 8*4(%%rsi), %%xmm0, %%xmm10\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm11, 12*4(%%rsi), %%xmm0, %%xmm11\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm12, 16*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm13, 20*4(%%rsi), %%xmm0, %%xmm13\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm14, 24*4(%%rsi), %%xmm0, %%xmm14\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm15, 28*4(%%rsi), %%xmm0, %%xmm15\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%xmm8 , %%xmm1, %%xmm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%xmm9 , %%xmm1, %%xmm9 \n\t" // scale by alpha
|
||||||
|
"vmulps %%xmm10, %%xmm1, %%xmm10\n\t" // scale by alpha
|
||||||
|
"vmulps %%xmm11, %%xmm1, %%xmm11\n\t" // scale by alpha
|
||||||
|
"vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
"vmulps %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha
|
||||||
|
"vmulps %%xmm14, %%xmm1, %%xmm14\n\t" // scale by alpha
|
||||||
|
"vmulps %%xmm15, %%xmm1, %%xmm15\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%xmm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%xmm9 , 4*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%xmm10, 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%xmm11, 12*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%xmm12, 16*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%xmm13, 20*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%xmm14, 24*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%xmm15, 28*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
float *pre = a + lda*3;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero
|
||||||
|
"vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vfmaddps %%ymm12, 0*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%ymm13, 8*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm13, 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
|
||||||
|
"vfmaddps %%ymm12, 0*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
|
||||||
|
"vfmaddps %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
"vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vmovss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
|
||||||
|
"vfmaddss %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddss %%xmm13, 1*4(%%rsi), %%xmm0, %%xmm13\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
"vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vmovss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
|
||||||
|
"vfmaddss %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,461 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
float *pre = a + lda*2;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero
|
||||||
|
"vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero
|
||||||
|
"vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero
|
||||||
|
"vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero
|
||||||
|
"vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero
|
||||||
|
"vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"vfmadd231ps 0*4(%%rsi), %%ymm0, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"vfmadd231ps 8*4(%%rsi), %%ymm0, %%ymm9 \n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
"vfmadd231ps 16*4(%%rsi), %%ymm0, %%ymm10\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmadd231ps 24*4(%%rsi), %%ymm0, %%ymm11\n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 128(%%r8)\n\t" // Prefetch
|
||||||
|
"vfmadd231ps 32*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmadd231ps 40*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 192(%%r8)\n\t" // Prefetch
|
||||||
|
"vfmadd231ps 48*4(%%rsi), %%ymm0, %%ymm14\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmadd231ps 56*4(%%rsi), %%ymm0, %%ymm15\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
float *pre = a + lda*3;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero
|
||||||
|
"vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"nop \n\t"
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
float *pre = a + lda*3;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"nop \n\t"
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
"vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vmovss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 1*4(%%rsi), %%xmm0, %%xmm5 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%xmm13, %%xmm5, %%xmm13 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
"vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vmovss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
|
||||||
|
"vmulss 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddss %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,473 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
float *pre = a + lda*2;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero
|
||||||
|
"vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero
|
||||||
|
"vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero
|
||||||
|
"vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero
|
||||||
|
"vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero
|
||||||
|
"vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"nop \n\t"
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
"vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp
|
||||||
|
"prefetcht0 128(%%r8)\n\t" // Prefetch
|
||||||
|
"vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"prefetcht0 192(%%r8)\n\t" // Prefetch
|
||||||
|
"vmulps 32*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 40*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 48*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 56*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%ymm12, %%ymm4, %%ymm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm13, %%ymm5, %%ymm13\n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm14, %%ymm6, %%ymm14\n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm15, %%ymm7, %%ymm15\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
float *pre = a + lda*3;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero
|
||||||
|
"vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"nop \n\t"
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
float *pre = a + lda*3;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
"movq %6, %%r8\n\t" // address for prefetch
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
"prefetcht0 64(%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
"vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
"nop \n\t"
|
||||||
|
"leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch
|
||||||
|
|
||||||
|
"prefetcht0 (%%r8)\n\t" // Prefetch
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y), // 5
|
||||||
|
"m" (pre) // 6
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha
|
||||||
|
"vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
"vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vmovss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 1*4(%%rsi), %%xmm0, %%xmm5 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddps %%xmm13, %%xmm5, %%xmm13 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
"vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
"vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
"vmovss (%%rdi), %%xmm0 \n\t" // load values of c
|
||||||
|
"addq $4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
|
||||||
|
"vmulss 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp
|
||||||
|
"vaddss %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a
|
||||||
|
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha
|
||||||
|
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,232 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
#if defined(BULLDOZER) || defined(PILEDRIVER)
|
||||||
|
#include "sgemv_t_microk_bulldozer.c"
|
||||||
|
#elif defined(HASWELL)
|
||||||
|
#include "sgemv_t_microk_haswell.c"
|
||||||
|
#else
|
||||||
|
#include "sgemv_t_microk_sandy.c"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src)
|
||||||
|
{
|
||||||
|
BLASLONG i;
|
||||||
|
for ( i=0; i<n; i++ )
|
||||||
|
{
|
||||||
|
*dest = *src;
|
||||||
|
dest++;
|
||||||
|
src += inc_src;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sgemv_kernel_1( BLASLONG n, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, FLOAT *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
FLOAT register temp0 = 0.0;
|
||||||
|
BLASLONG i;
|
||||||
|
for ( i=0; i<n ; i++)
|
||||||
|
{
|
||||||
|
temp0 += a[i] * x[i];
|
||||||
|
}
|
||||||
|
temp0 *= alpha ;
|
||||||
|
*y += temp0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLONG lda, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *buffer)
|
||||||
|
{
|
||||||
|
BLASLONG i;
|
||||||
|
BLASLONG j;
|
||||||
|
FLOAT *a_ptr;
|
||||||
|
FLOAT *x_ptr;
|
||||||
|
FLOAT *y_ptr;
|
||||||
|
FLOAT *a_ptrl;
|
||||||
|
BLASLONG m1;
|
||||||
|
BLASLONG register m2;
|
||||||
|
FLOAT *xbuffer;
|
||||||
|
xbuffer = buffer;
|
||||||
|
BLASLONG register Mblock;
|
||||||
|
|
||||||
|
m1 = m / 1024 ;
|
||||||
|
m2 = m % 1024 ;
|
||||||
|
|
||||||
|
x_ptr = x;
|
||||||
|
a_ptr = a;
|
||||||
|
|
||||||
|
for (j=0; j<m1; j++)
|
||||||
|
{
|
||||||
|
|
||||||
|
if ( inc_x == 1 )
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
else
|
||||||
|
copy_x(1024,x_ptr,xbuffer,inc_x);
|
||||||
|
|
||||||
|
y_ptr = y;
|
||||||
|
a_ptrl = a_ptr;
|
||||||
|
|
||||||
|
for(i = 0; i<n; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_16(1024,alpha,a_ptrl,lda,xbuffer,y_ptr);
|
||||||
|
y_ptr += inc_y;
|
||||||
|
a_ptrl += lda;
|
||||||
|
}
|
||||||
|
a_ptr += 1024;
|
||||||
|
x_ptr += 1024 * inc_x;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( m2 == 0 ) return(0);
|
||||||
|
|
||||||
|
Mblock = 512;
|
||||||
|
while ( Mblock >= 16 )
|
||||||
|
{
|
||||||
|
if ( m2 & Mblock)
|
||||||
|
{
|
||||||
|
|
||||||
|
if ( inc_x == 1 )
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
else
|
||||||
|
copy_x(Mblock,x_ptr,xbuffer,inc_x);
|
||||||
|
|
||||||
|
y_ptr = y;
|
||||||
|
a_ptrl = a_ptr;
|
||||||
|
|
||||||
|
for(i = 0; i<n; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_16(Mblock,alpha,a_ptrl,lda,xbuffer,y_ptr);
|
||||||
|
y_ptr += inc_y;
|
||||||
|
a_ptrl += lda;
|
||||||
|
}
|
||||||
|
a_ptr += Mblock;
|
||||||
|
x_ptr += Mblock * inc_x;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
Mblock /= 2;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( m2 & Mblock)
|
||||||
|
{
|
||||||
|
|
||||||
|
if ( inc_x == 1 )
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
else
|
||||||
|
copy_x(Mblock,x_ptr,xbuffer,inc_x);
|
||||||
|
|
||||||
|
y_ptr = y;
|
||||||
|
a_ptrl = a_ptr;
|
||||||
|
|
||||||
|
for(i = 0; i<n; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_1(Mblock,alpha,a_ptrl,lda,xbuffer,y_ptr);
|
||||||
|
y_ptr += inc_y;
|
||||||
|
a_ptrl += lda;
|
||||||
|
}
|
||||||
|
a_ptr += Mblock;
|
||||||
|
x_ptr += Mblock * inc_x;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
Mblock /= 2;
|
||||||
|
|
||||||
|
|
||||||
|
if ( m2 & Mblock)
|
||||||
|
{
|
||||||
|
|
||||||
|
if ( inc_x == 1 )
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
else
|
||||||
|
copy_x(Mblock,x_ptr,xbuffer,inc_x);
|
||||||
|
|
||||||
|
y_ptr = y;
|
||||||
|
a_ptrl = a_ptr;
|
||||||
|
|
||||||
|
for(i = 0; i<n; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_1(Mblock,alpha,a_ptrl,lda,xbuffer,y_ptr);
|
||||||
|
y_ptr += inc_y;
|
||||||
|
a_ptrl += lda;
|
||||||
|
}
|
||||||
|
a_ptr += Mblock;
|
||||||
|
x_ptr += Mblock * inc_x;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
Mblock /= 2;
|
||||||
|
|
||||||
|
if ( m2 & Mblock)
|
||||||
|
{
|
||||||
|
|
||||||
|
if ( inc_x == 1 )
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
else
|
||||||
|
copy_x(Mblock,x_ptr,xbuffer,inc_x);
|
||||||
|
|
||||||
|
y_ptr = y;
|
||||||
|
a_ptrl = a_ptr;
|
||||||
|
|
||||||
|
for(i = 0; i<n; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_1(Mblock,alpha,a_ptrl,lda,xbuffer,y_ptr);
|
||||||
|
y_ptr += inc_y;
|
||||||
|
a_ptrl += lda;
|
||||||
|
}
|
||||||
|
a_ptr += Mblock;
|
||||||
|
x_ptr += Mblock * inc_x;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
Mblock /= 2;
|
||||||
|
|
||||||
|
if ( m2 & Mblock)
|
||||||
|
{
|
||||||
|
|
||||||
|
xbuffer = x_ptr;
|
||||||
|
|
||||||
|
y_ptr = y;
|
||||||
|
a_ptrl = a_ptr;
|
||||||
|
|
||||||
|
for(i = 0; i<n; i++ )
|
||||||
|
{
|
||||||
|
sgemv_kernel_1(Mblock,alpha,a_ptrl,lda,xbuffer,y_ptr);
|
||||||
|
y_ptr += inc_y;
|
||||||
|
a_ptrl += lda;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,99 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
//n = n / 16;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float
|
||||||
|
"leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
"vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero
|
||||||
|
"vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero
|
||||||
|
"vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero
|
||||||
|
|
||||||
|
"sarq $4, %%rax \n\t" // n = n / 16
|
||||||
|
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
// "prefetcht0 512(%%rsi) \n\t"
|
||||||
|
"prefetcht0 (%%r8) \n\t" //prefetch next line of a
|
||||||
|
"vmovups (%%rsi), %%xmm4 \n\t"
|
||||||
|
"vmovups 4*4(%%rsi), %%xmm5 \n\t"
|
||||||
|
"vmovups 8*4(%%rsi), %%xmm6 \n\t"
|
||||||
|
"vmovups 12*4(%%rsi), %%xmm7 \n\t"
|
||||||
|
|
||||||
|
"vfmaddps %%xmm12, 0*4(%%rdi), %%xmm4, %%xmm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm13, 4*4(%%rdi), %%xmm5, %%xmm13\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm14, 8*4(%%rdi), %%xmm6, %%xmm14\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmaddps %%xmm15, 12*4(%%rdi), %%xmm7, %%xmm15\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $16*4 , %%r8 \n\t" // increment prefetch pointer
|
||||||
|
"addq $16*4 , %%rsi \n\t" // increment pointer of a
|
||||||
|
"addq $16*4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vaddps %%xmm12, %%xmm14, %%xmm12\n\t"
|
||||||
|
"vaddps %%xmm13, %%xmm15, %%xmm13\n\t"
|
||||||
|
"vaddps %%xmm12, %%xmm13, %%xmm12\n\t"
|
||||||
|
"vhaddps %%xmm12, %%xmm12, %%xmm12\n\t"
|
||||||
|
"vhaddps %%xmm12, %%xmm12, %%xmm12\n\t"
|
||||||
|
|
||||||
|
"vfmaddss (%%rdx), %%xmm12, %%xmm1, %%xmm12\n\t"
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
//n = n / 16;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float
|
||||||
|
"leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
"vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero
|
||||||
|
"vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero
|
||||||
|
"vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero
|
||||||
|
|
||||||
|
"sarq $4, %%rax \n\t" // n = n / 16
|
||||||
|
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
// "prefetcht0 512(%%rsi) \n\t"
|
||||||
|
"prefetcht0 (%%r8) \n\t" //prefetch next line of a
|
||||||
|
"vmovups (%%rsi), %%xmm4 \n\t"
|
||||||
|
"vmovups 4*4(%%rsi), %%xmm5 \n\t"
|
||||||
|
"vmovups 8*4(%%rsi), %%xmm6 \n\t"
|
||||||
|
"vmovups 12*4(%%rsi), %%xmm7 \n\t"
|
||||||
|
|
||||||
|
"vfmadd231ps 0*4(%%rdi), %%xmm4, %%xmm12\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmadd231ps 4*4(%%rdi), %%xmm5, %%xmm13\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmadd231ps 8*4(%%rdi), %%xmm6, %%xmm14\n\t" // multiply a and c and add to temp
|
||||||
|
"vfmadd231ps 12*4(%%rdi), %%xmm7, %%xmm15\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"addq $16*4 , %%r8 \n\t" // increment prefetch pointer
|
||||||
|
"addq $16*4 , %%rsi \n\t" // increment pointer of a
|
||||||
|
"addq $16*4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vaddps %%xmm12, %%xmm14, %%xmm12\n\t"
|
||||||
|
"vaddps %%xmm13, %%xmm15, %%xmm13\n\t"
|
||||||
|
"vaddps %%xmm12, %%xmm13, %%xmm12\n\t"
|
||||||
|
"vhaddps %%xmm12, %%xmm12, %%xmm12\n\t"
|
||||||
|
"vhaddps %%xmm12, %%xmm12, %%xmm12\n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12\n\t"
|
||||||
|
"vaddss (%%rdx), %%xmm12,%%xmm12\n\t"
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
/***************************************************************************
|
||||||
|
Copyright (c) 2014, The OpenBLAS Project
|
||||||
|
All rights reserved.
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
3. Neither the name of the OpenBLAS project nor the names of
|
||||||
|
its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||||
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||||
|
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*****************************************************************************/
|
||||||
|
|
||||||
|
static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y)
|
||||||
|
{
|
||||||
|
|
||||||
|
//n = n / 16;
|
||||||
|
|
||||||
|
__asm__ __volatile__
|
||||||
|
(
|
||||||
|
"movq %0, %%rax\n\t" // n -> rax
|
||||||
|
"vmovss %1, %%xmm1\n\t" // alpha -> xmm1
|
||||||
|
"movq %2, %%rsi\n\t" // adress of a -> rsi
|
||||||
|
"movq %3, %%rcx\n\t" // value of lda > rcx
|
||||||
|
"movq %4, %%rdi\n\t" // adress of x -> rdi
|
||||||
|
"movq %5, %%rdx\n\t" // adress of y -> rdx
|
||||||
|
|
||||||
|
"leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float
|
||||||
|
"leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line
|
||||||
|
|
||||||
|
"vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero
|
||||||
|
"vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero
|
||||||
|
"vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero
|
||||||
|
"vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero
|
||||||
|
|
||||||
|
"sarq $4, %%rax \n\t" // n = n / 16
|
||||||
|
|
||||||
|
".align 16 \n\t"
|
||||||
|
".L01LOOP%=: \n\t"
|
||||||
|
// "prefetcht0 512(%%rsi) \n\t"
|
||||||
|
"prefetcht0 (%%r8) \n\t" //prefetch next line of a
|
||||||
|
"vmovups (%%rsi), %%xmm4 \n\t"
|
||||||
|
"vmovups 4*4(%%rsi), %%xmm5 \n\t"
|
||||||
|
"vmovups 8*4(%%rsi), %%xmm6 \n\t"
|
||||||
|
"vmovups 12*4(%%rsi), %%xmm7 \n\t"
|
||||||
|
|
||||||
|
"vmulps 0*4(%%rdi), %%xmm4, %%xmm8 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 4*4(%%rdi), %%xmm5, %%xmm9 \n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 8*4(%%rdi), %%xmm6, %%xmm10\n\t" // multiply a and c and add to temp
|
||||||
|
"vmulps 12*4(%%rdi), %%xmm7, %%xmm11\n\t" // multiply a and c and add to temp
|
||||||
|
|
||||||
|
"vaddps %%xmm12, %%xmm8 , %%xmm12\n\t"
|
||||||
|
"vaddps %%xmm13, %%xmm9 , %%xmm13\n\t"
|
||||||
|
"vaddps %%xmm14, %%xmm10, %%xmm14\n\t"
|
||||||
|
"vaddps %%xmm15, %%xmm11, %%xmm15\n\t"
|
||||||
|
|
||||||
|
"addq $16*4 , %%r8 \n\t" // increment prefetch pointer
|
||||||
|
"addq $16*4 , %%rsi \n\t" // increment pointer of a
|
||||||
|
"addq $16*4 , %%rdi \n\t" // increment pointer of c
|
||||||
|
"dec %%rax \n\t" // n = n -1
|
||||||
|
"jnz .L01LOOP%= \n\t"
|
||||||
|
|
||||||
|
"vaddps %%xmm12, %%xmm14, %%xmm12\n\t"
|
||||||
|
"vaddps %%xmm13, %%xmm15, %%xmm13\n\t"
|
||||||
|
"vaddps %%xmm12, %%xmm13, %%xmm12\n\t"
|
||||||
|
"vhaddps %%xmm12, %%xmm12, %%xmm12\n\t"
|
||||||
|
"vhaddps %%xmm12, %%xmm12, %%xmm12\n\t"
|
||||||
|
|
||||||
|
"vmulss %%xmm12, %%xmm1, %%xmm12 \n\t"
|
||||||
|
"vaddss (%%rdx), %%xmm12, %%xmm12\n\t"
|
||||||
|
"vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y
|
||||||
|
|
||||||
|
:
|
||||||
|
:
|
||||||
|
"m" (n), // 0
|
||||||
|
"m" (alpha), // 1
|
||||||
|
"m" (a), // 2
|
||||||
|
"m" (lda), // 3
|
||||||
|
"m" (x), // 4
|
||||||
|
"m" (y) // 5
|
||||||
|
: "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc",
|
||||||
|
"%xmm0", "%xmm1",
|
||||||
|
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
|
||||||
|
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
|
||||||
|
"%xmm12", "%xmm13", "%xmm14", "%xmm15",
|
||||||
|
"memory"
|
||||||
|
);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue