From bcb115b55b92b75d862d743629b29ac0b84d2fd0 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Tue, 15 Jul 2014 13:35:36 +0200 Subject: [PATCH 01/15] added benchmark for gemv --- benchmark/Makefile | 77 ++++++++++++++- benchmark/gemv.c | 229 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 302 insertions(+), 4 deletions(-) create mode 100644 benchmark/gemv.c diff --git a/benchmark/Makefile b/benchmark/Makefile index e3910ee96..db183c8ad 100644 --- a/benchmark/Makefile +++ b/benchmark/Makefile @@ -2,12 +2,12 @@ TOPDIR = .. include $(TOPDIR)/Makefile.system # ACML standard -ACML=/opt/acml5.3.1/gfortran64_mp/lib -LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm +#ACML=/opt/acml5.3.1/gfortran64_mp/lib +#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm # ACML custom -#ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib -#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm +ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib +LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm # Atlas Ubuntu #ATLAS=/usr/lib/atlas-base @@ -37,6 +37,7 @@ goto :: slinpack.goto dlinpack.goto clinpack.goto zlinpack.goto \ chemm.goto zhemm.goto \ cherk.goto zherk.goto \ cher2k.goto zher2k.goto \ + sgemv.goto dgemv.goto cgemv.goto zgemv.goto \ ssymm.goto dsymm.goto csymm.goto zsymm.goto acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ @@ -49,6 +50,7 @@ acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ chemm.acml zhemm.acml \ cherk.acml zherk.acml \ cher2k.acml zher2k.acml \ + sgemv.acml dgemv.acml cgemv.acml zgemv.acml \ ssymm.acml dsymm.acml csymm.acml zsymm.acml atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ @@ -61,6 +63,7 @@ atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ chemm.atlas zhemm.atlas \ cherk.atlas zherk.atlas \ cher2k.atlas zher2k.atlas \ + sgemv.atlas dgemv.atlas cgemv.atlas zgemv.atlas \ ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ @@ -73,6 +76,7 @@ mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ chemm.mkl zhemm.mkl \ cherk.mkl zherk.mkl \ cher2k.mkl zher2k.mkl \ + sgemv.mkl dgemv.mkl cgemv.mkl zgemv.mkl \ ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl all :: goto atlas acml mkl @@ -601,6 +605,61 @@ zher2k.atlas : zher2k.$(SUFFIX) zher2k.mkl : zher2k.$(SUFFIX) -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Sgemv #################################################### +sgemv.goto : sgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +sgemv.acml : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemv.atlas : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemv.mkl : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dgemv #################################################### +dgemv.goto : dgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +dgemv.acml : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemv.atlas : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemv.mkl : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cgemv #################################################### + +cgemv.goto : cgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +cgemv.acml : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemv.atlas : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemv.mkl : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgemv #################################################### + +zgemv.goto : zgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +zgemv.acml : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemv.atlas : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemv.mkl : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + + ################################################################################################### slinpack.$(SUFFIX) : linpack.c @@ -717,7 +776,17 @@ cher2k.$(SUFFIX) : her2k.c zher2k.$(SUFFIX) : her2k.c $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ +sgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ +dgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ clean :: diff --git a/benchmark/gemv.c b/benchmark/gemv.c new file mode 100644 index 000000000..e26a36ac1 --- /dev/null +++ b/benchmark/gemv.c @@ -0,0 +1,229 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include +#include +#ifdef __CYGWIN32__ +#include +#endif +#include "common.h" + + +#undef GEMV + +#ifndef COMPLEX + +#ifdef DOUBLE +#define GEMV BLASFUNC(dgemv) +#else +#define GEMV BLASFUNC(sgemv) +#endif + +#else + +#ifdef DOUBLE +#define GEMV BLASFUNC(zgemv) +#else +#define GEMV BLASFUNC(cgemv) +#endif + +#endif + +#if defined(__WIN32__) || defined(__WIN64__) + +#ifndef DELTA_EPOCH_IN_MICROSECS +#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL +#endif + +int gettimeofday(struct timeval *tv, void *tz){ + + FILETIME ft; + unsigned __int64 tmpres = 0; + static int tzflag; + + if (NULL != tv) + { + GetSystemTimeAsFileTime(&ft); + + tmpres |= ft.dwHighDateTime; + tmpres <<= 32; + tmpres |= ft.dwLowDateTime; + + /*converting file time to unix epoch*/ + tmpres /= 10; /*convert into microseconds*/ + tmpres -= DELTA_EPOCH_IN_MICROSECS; + tv->tv_sec = (long)(tmpres / 1000000UL); + tv->tv_usec = (long)(tmpres % 1000000UL); + } + + return 0; +} + +#endif + +#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 + +static void *huge_malloc(BLASLONG size){ + int shmid; + void *address; + +#ifndef SHM_HUGETLB +#define SHM_HUGETLB 04000 +#endif + + if ((shmid =shmget(IPC_PRIVATE, + (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), + SHM_HUGETLB | IPC_CREAT |0600)) < 0) { + printf( "Memory allocation failed(shmget).\n"); + exit(1); + } + + address = shmat(shmid, NULL, SHM_RND); + + if ((BLASLONG)address == -1){ + printf( "Memory allocation failed(shmat).\n"); + exit(1); + } + + shmctl(shmid, IPC_RMID, 0); + + return address; +} + +#define malloc huge_malloc + +#endif + +int MAIN__(int argc, char *argv[]){ + + FLOAT *a, *x, *y; + FLOAT alpha[] = {1.0, 1.0}; + FLOAT beta [] = {1.0, 1.0}; + char trans='N'; + blasint m, i, j; + blasint inc_x=1,inc_y=1; + blasint n=0; + int has_param_n = 0; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + struct timeval start, stop; + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_INCY"))) inc_y = atoi(p); + if ((p = getenv("OPENBLAS_TRANS"))) trans=*p; + if ((p = getenv("OPENBLAS_PARAM_N"))) { + n = atoi(p); + if ((n>0) && (n<=to)) has_param_n = 1; + } + + if ( has_param_n == 1 ) + fprintf(stderr, "From : %3d To : %3d Step = %3d Trans = '%c' N = %d Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,trans,n,inc_x,inc_y,loops); + else + fprintf(stderr, "From : %3d To : %3d Step = %3d Trans = '%c' Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,trans,inc_x,inc_y,loops); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + if ( has_param_n == 0 ) n = m; + + fprintf(stderr, " %6dx%d : ", (int)m,(int)n); + + for(j = 0; j < m; j++){ + for(i = 0; i < n * COMPSIZE; i++){ + a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + + for (l=0; l Date: Tue, 15 Jul 2014 14:41:35 +0200 Subject: [PATCH 02/15] adjusted number of threads for small size --- interface/gemm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/interface/gemm.c b/interface/gemm.c index 07fea153c..74908e842 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -405,11 +405,11 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS #ifndef COMPLEX double MNK = (double) args.m * (double) args.n * (double) args.k; - if ( MNK <= (1024.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) + if ( MNK <= (16.0 * 1024.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) nthreads_max = 1; else { - if ( MNK <= (65536.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) + if ( MNK <= (2.0 * 65536.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) { nthreads_max = 4; if ( args.m < 16 * GEMM_MULTITHREAD_THRESHOLD ) From b985cea65dbb4d60b51d204fd6144741fb9a7f0b Mon Sep 17 00:00:00 2001 From: wernsaar Date: Tue, 15 Jul 2014 16:04:46 +0200 Subject: [PATCH 03/15] adjust number of threads for sgemv and dgemv --- interface/gemv.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/interface/gemv.c b/interface/gemv.c index 562ceee9f..08553ad21 100644 --- a/interface/gemv.c +++ b/interface/gemv.c @@ -211,7 +211,18 @@ void CNAME(enum CBLAS_ORDER order, buffer = (FLOAT *)blas_memory_alloc(1); #ifdef SMP - nthreads = num_cpu_avail(2); + + int nthreads_max = num_cpu_avail(2); + int nthreads_avail = nthreads_max; + + double MNK = (double) m * (double) n; + if ( MNK <= (500.0 * 100.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) + nthreads_max = 1; + + if ( nthreads_max > nthreads_avail ) + nthreads = nthreads_avail; + else + nthreads = nthreads_max; if (nthreads == 1) { #endif From 51413925bdcc1fceec46e58acbd7cf03b7762aa1 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Tue, 15 Jul 2014 16:27:02 +0200 Subject: [PATCH 04/15] adjust number of threads for small size in cgemv and zgemv --- interface/zgemv.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/interface/zgemv.c b/interface/zgemv.c index fcc2fda54..50513a8e4 100644 --- a/interface/zgemv.c +++ b/interface/zgemv.c @@ -233,7 +233,19 @@ void CNAME(enum CBLAS_ORDER order, buffer = (FLOAT *)blas_memory_alloc(1); #ifdef SMP - nthreads = num_cpu_avail(2); + + int nthreads_max = num_cpu_avail(2); + int nthreads_avail = nthreads_max; + + double MNK = (double) m * (double) n; + if ( MNK <= (80.0 * 20.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) + nthreads_max = 1; + + if ( nthreads_max > nthreads_avail ) + nthreads = nthreads_avail; + else + nthreads = nthreads_max; + if (nthreads == 1) { #endif From 7ceb25d7b370f87f89ee900a47015d33dcaab8bf Mon Sep 17 00:00:00 2001 From: wernsaar Date: Wed, 16 Jul 2014 17:08:43 +0200 Subject: [PATCH 05/15] changed string GFORTRAN to lowercase --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2e378883b..3aaf092fc 100644 --- a/Makefile +++ b/Makefile @@ -247,7 +247,7 @@ ifndef NOFORTRAN -@echo "SUFFIX = $(SUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "PSUFFIX = $(PSUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "CEXTRALIB = $(EXTRALIB)" >> $(NETLIB_LAPACK_DIR)/make.inc -ifeq ($(FC), GFORTRAN) +ifeq ($(FC), gfortran) -@echo "TIMER = INT_ETIME" >> $(NETLIB_LAPACK_DIR)/make.inc ifdef SMP -@echo "LOADER = $(FC) -pthread" >> $(NETLIB_LAPACK_DIR)/make.inc From 3c5732615dd01e4d865d6c5d516e75889b165347 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Thu, 17 Jul 2014 23:15:07 +0200 Subject: [PATCH 06/15] added blocked sgemv_n and microkernel for bulldozer and piledriver --- kernel/x86_64/KERNEL.BULLDOZER | 1 + kernel/x86_64/KERNEL.PILEDRIVER | 1 + kernel/x86_64/sgemv_n_avx.c | 194 +++++++++++++ kernel/x86_64/sgemv_n_microk_bulldozer.c | 346 +++++++++++++++++++++++ 4 files changed, 542 insertions(+) create mode 100644 kernel/x86_64/sgemv_n_avx.c create mode 100644 kernel/x86_64/sgemv_n_microk_bulldozer.c diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 70370a73c..55932e69f 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -1,3 +1,4 @@ +SGEMVNKERNEL = sgemv_n_avx.c ZGEMVNKERNEL = zgemv_n_dup.S ZGEMVTKERNEL = zgemv_t.S diff --git a/kernel/x86_64/KERNEL.PILEDRIVER b/kernel/x86_64/KERNEL.PILEDRIVER index 92b5dc7c9..145d9fb2f 100644 --- a/kernel/x86_64/KERNEL.PILEDRIVER +++ b/kernel/x86_64/KERNEL.PILEDRIVER @@ -1,3 +1,4 @@ +SGEMVNKERNEL = sgemv_n_avx.c ZGEMVNKERNEL = zgemv_n_dup.S ZGEMVTKERNEL = zgemv_t.S diff --git a/kernel/x86_64/sgemv_n_avx.c b/kernel/x86_64/sgemv_n_avx.c new file mode 100644 index 000000000..8c263543c --- /dev/null +++ b/kernel/x86_64/sgemv_n_avx.c @@ -0,0 +1,194 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#if defined(BULLDOZER) || defined(PILEDRIVER) +#include "sgemv_n_microk_bulldozer.c" +#endif + +static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) +{ + BLASLONG i; + for ( i=0; i 0 ) + { + + if ( inc_x == 1 ) + xbuffer = x_ptr; + else + copy_x(n2,x_ptr,xbuffer,inc_x); + + y_ptr = y; + + for(i = 0; i rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + "vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero + "vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vfmaddps %%ymm12, 0*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm13, 8*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm14, 16*4(%%rsi), %%ymm0, %%ymm14\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm15, 24*4(%%rsi), %%ymm0, %%ymm15\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + "vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha + "vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha + + "vmovups %%ymm12, (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm14, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm15, 24*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", + "xmm0" , "xmm1", + "xmm12", "xmm13", "xmm14", "xmm15", + "memory" + ); + +} + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + float *pre = a + lda*4*3; + + __asm __volatile + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vfmaddps %%ymm12, 0*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm13, 8*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + + "vmovups %%ymm12, (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 8*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", + "xmm0" , "xmm1", + "xmm12", "xmm13", "xmm14", "xmm15", + "memory" + ); + +} + + +static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm __volatile + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddps %%ymm12, 0*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + + "vmovups %%ymm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", + "xmm0" , "xmm1", + "xmm12", "xmm13", "xmm14", "xmm15", + "memory" + ); + +} + + +static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm __volatile + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddps %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", + "xmm0" , "xmm1", + "xmm12", "xmm13", "xmm14", "xmm15", + "memory" + ); + +} + +static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm __volatile + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddss %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + "vfmaddss %%xmm13, 1*4(%%rsi), %%xmm0, %%xmm13\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + "vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", + "xmm0" , "xmm1", + "xmm12", "xmm13", "xmm14", "xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm __volatile + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddss %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", + "xmm0" , "xmm1", + "xmm12", "xmm13", "xmm14", "xmm15", + "memory" + ); + +} + + From c8a4a561773dcd4b905b7618b2518539d467daaf Mon Sep 17 00:00:00 2001 From: wernsaar Date: Fri, 18 Jul 2014 11:25:21 +0200 Subject: [PATCH 07/15] performance optimizations for sgemv_n --- kernel/x86_64/sgemv_n_avx.c | 32 ++++-- kernel/x86_64/sgemv_n_microk_bulldozer.c | 137 ++++++++++++++++++++--- 2 files changed, 146 insertions(+), 23 deletions(-) diff --git a/kernel/x86_64/sgemv_n_avx.c b/kernel/x86_64/sgemv_n_avx.c index 8c263543c..dc8d015d8 100644 --- a/kernel/x86_64/sgemv_n_avx.c +++ b/kernel/x86_64/sgemv_n_avx.c @@ -70,12 +70,11 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO n1 = n / 512 ; n2 = n % 512 ; - m1 = m / 32; - m2 = m % 32; + m1 = m / 64; + m2 = m % 64; - x_ptr = x; - a_ptr = a; y_ptr = y; + x_ptr = x; for (j=0; j y - "vmovups %%ymm13, 8*4(%%rdx) \n\t" // store temp -> y - "vmovups %%ymm14, 16*4(%%rdx) \n\t" // store temp -> y - "vmovups %%ymm15, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y : : @@ -88,6 +105,94 @@ static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, "m" (pre) // 6 : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", "xmm0" , "xmm1", + "xmm8", "xmm9", "xmm10", "xmm11", + "xmm12", "xmm13", "xmm14", "xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*3; + + __asm __volatile + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" // set to zero + "vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" // set to zero + "vxorps %%xmm10, %%xmm10, %%xmm10\n\t" // set to zero + "vxorps %%xmm11, %%xmm11, %%xmm11\n\t" // set to zero + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "vfmaddps %%xmm8 , 0*4(%%rsi), %%xmm0, %%xmm8 \n\t" // multiply a and c and add to temp + "prefetcht0 64(%%r8)\n\t" // Prefetch + "vfmaddps %%xmm9 , 4*4(%%rsi), %%xmm0, %%xmm9 \n\t" // multiply a and c and add to temp + "vfmaddps %%xmm10, 8*4(%%rsi), %%xmm0, %%xmm10\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm11, 12*4(%%rsi), %%xmm0, %%xmm11\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm12, 16*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm13, 20*4(%%rsi), %%xmm0, %%xmm13\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm14, 24*4(%%rsi), %%xmm0, %%xmm14\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm15, 28*4(%%rsi), %%xmm0, %%xmm15\n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm8 , %%xmm1, %%xmm8 \n\t" // scale by alpha + "vmulps %%xmm9 , %%xmm1, %%xmm9 \n\t" // scale by alpha + "vmulps %%xmm10, %%xmm1, %%xmm10\n\t" // scale by alpha + "vmulps %%xmm11, %%xmm1, %%xmm11\n\t" // scale by alpha + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulps %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + "vmulps %%xmm14, %%xmm1, %%xmm14\n\t" // scale by alpha + "vmulps %%xmm15, %%xmm1, %%xmm15\n\t" // scale by alpha + + "vmovups %%xmm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%xmm9 , 4*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm10, 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm11, 12*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm12, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm13, 20*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm14, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm15, 28*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", + "xmm0" , "xmm1", + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "memory" ); @@ -97,7 +202,7 @@ static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) { - float *pre = a + lda*4*3; + float *pre = a + lda*1; __asm __volatile ( From b3938fe371d1806233b06eff23eda4456d2f763a Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 19 Jul 2014 07:15:34 +0200 Subject: [PATCH 08/15] don't use this sgemv_n on Windows --- kernel/x86_64/KERNEL.BULLDOZER | 5 ++ kernel/x86_64/KERNEL.PILEDRIVER | 5 ++ kernel/x86_64/sgemv_n_avx.c | 6 +- kernel/x86_64/sgemv_n_microk_bulldozer.c | 78 ++++++++++++------------ 4 files changed, 53 insertions(+), 41 deletions(-) diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 55932e69f..fac8016a6 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -1,4 +1,9 @@ +ifdef OS_WINDOWS +SGEMVNKERNEL = ../arm/gemv_n.c +else SGEMVNKERNEL = sgemv_n_avx.c +endif + ZGEMVNKERNEL = zgemv_n_dup.S ZGEMVTKERNEL = zgemv_t.S diff --git a/kernel/x86_64/KERNEL.PILEDRIVER b/kernel/x86_64/KERNEL.PILEDRIVER index 145d9fb2f..555c8053d 100644 --- a/kernel/x86_64/KERNEL.PILEDRIVER +++ b/kernel/x86_64/KERNEL.PILEDRIVER @@ -1,4 +1,9 @@ +ifdef OS_WINDOWS +SGEMVNKERNEL = ../arm/gemv_n.c +else SGEMVNKERNEL = sgemv_n_avx.c +endif + ZGEMVNKERNEL = zgemv_n_dup.S ZGEMVTKERNEL = zgemv_t.S diff --git a/kernel/x86_64/sgemv_n_avx.c b/kernel/x86_64/sgemv_n_avx.c index dc8d015d8..91e3ee424 100644 --- a/kernel/x86_64/sgemv_n_avx.c +++ b/kernel/x86_64/sgemv_n_avx.c @@ -61,8 +61,10 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *a, BLASLO FLOAT *a_ptr; FLOAT *x_ptr; FLOAT *y_ptr; - BLASLONG n1,n2; - BLASLONG m1,m2; + BLASLONG n1; + BLASLONG m1; + BLASLONG register m2; + BLASLONG register n2; FLOAT *xbuffer,*ybuffer; xbuffer = buffer; ybuffer = xbuffer + 2048 + 256; diff --git a/kernel/x86_64/sgemv_n_microk_bulldozer.c b/kernel/x86_64/sgemv_n_microk_bulldozer.c index 1cecd96c5..1b07f0291 100644 --- a/kernel/x86_64/sgemv_n_microk_bulldozer.c +++ b/kernel/x86_64/sgemv_n_microk_bulldozer.c @@ -25,13 +25,13 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ -static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y) +static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y) { float *pre = a + lda*3; - __asm __volatile + __asm__ __volatile__ ( "movq %0, %%rax\n\t" // n -> rax "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 @@ -103,10 +103,10 @@ static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, "m" (x), // 4 "m" (y), // 5 "m" (pre) // 6 - : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", - "xmm0" , "xmm1", - "xmm8", "xmm9", "xmm10", "xmm11", - "xmm12", "xmm13", "xmm14", "xmm15", + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); @@ -114,13 +114,13 @@ static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, -static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) +static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) { float *pre = a + lda*3; - __asm __volatile + __asm__ __volatile__ ( "movq %0, %%rax\n\t" // n -> rax "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 @@ -190,21 +190,16 @@ static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, "m" (x), // 4 "m" (y), // 5 "m" (pre) // 6 - : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", - "xmm0" , "xmm1", - "xmm8", "xmm9", "xmm10", "xmm11", - "xmm12", "xmm13", "xmm14", "xmm15", - "memory" ); } -static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) { - float *pre = a + lda*1; + float *pre = a + lda*3; - __asm __volatile + __asm__ __volatile__ ( "movq %0, %%rax\n\t" // n -> rax "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 @@ -248,20 +243,21 @@ static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, "m" (x), // 4 "m" (y), // 5 "m" (pre) // 6 - : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", - "xmm0" , "xmm1", - "xmm12", "xmm13", "xmm14", "xmm15", + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); } -static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) +static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) { - __asm __volatile + __asm__ __volatile__ ( "movq %0, %%rax\n\t" // n -> rax "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 @@ -295,20 +291,21 @@ static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, f "m" (lda), // 3 "m" (x), // 4 "m" (y) // 5 - : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", - "xmm0" , "xmm1", - "xmm12", "xmm13", "xmm14", "xmm15", + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); } -static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) +static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) { - __asm __volatile + __asm__ __volatile__ ( "movq %0, %%rax\n\t" // n -> rax "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 @@ -342,19 +339,20 @@ static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, f "m" (lda), // 3 "m" (x), // 4 "m" (y) // 5 - : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", - "xmm0" , "xmm1", - "xmm12", "xmm13", "xmm14", "xmm15", + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); } -static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) +static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) { - __asm __volatile + __asm__ __volatile__ ( "movq %0, %%rax\n\t" // n -> rax "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 @@ -392,9 +390,10 @@ static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, f "m" (lda), // 3 "m" (x), // 4 "m" (y) // 5 - : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", - "xmm0" , "xmm1", - "xmm12", "xmm13", "xmm14", "xmm15", + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); @@ -402,11 +401,11 @@ static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, f -static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) +static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) { - __asm __volatile + __asm__ __volatile__ ( "movq %0, %%rax\n\t" // n -> rax "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 @@ -440,9 +439,10 @@ static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, f "m" (lda), // 3 "m" (x), // 4 "m" (y) // 5 - : "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", "r11", - "xmm0" , "xmm1", - "xmm12", "xmm13", "xmm14", "xmm15", + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", "memory" ); From 2cce125c795632e4dd6f209e5b9703ed39a7ef10 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sat, 19 Jul 2014 15:48:07 +0200 Subject: [PATCH 09/15] added optimized sgemv_t for bulldozer and piledriver --- kernel/x86_64/KERNEL.BULLDOZER | 2 + kernel/x86_64/KERNEL.PILEDRIVER | 2 + kernel/x86_64/sgemv_t_avx.c | 228 +++++++++++++++++++++++ kernel/x86_64/sgemv_t_microk_bulldozer.c | 99 ++++++++++ 4 files changed, 331 insertions(+) create mode 100644 kernel/x86_64/sgemv_t_avx.c create mode 100644 kernel/x86_64/sgemv_t_microk_bulldozer.c diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index fac8016a6..73a9ad2ec 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -1,7 +1,9 @@ ifdef OS_WINDOWS SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c else SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c endif diff --git a/kernel/x86_64/KERNEL.PILEDRIVER b/kernel/x86_64/KERNEL.PILEDRIVER index 555c8053d..453e7b762 100644 --- a/kernel/x86_64/KERNEL.PILEDRIVER +++ b/kernel/x86_64/KERNEL.PILEDRIVER @@ -1,7 +1,9 @@ ifdef OS_WINDOWS SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c else SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c endif diff --git a/kernel/x86_64/sgemv_t_avx.c b/kernel/x86_64/sgemv_t_avx.c new file mode 100644 index 000000000..c9cdb60cd --- /dev/null +++ b/kernel/x86_64/sgemv_t_avx.c @@ -0,0 +1,228 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#if defined(BULLDOZER) || defined(PILEDRIVER) +#include "sgemv_t_microk_bulldozer.c" +#endif + +static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) +{ + BLASLONG i; + for ( i=0; i= 16 ) + { + if ( m2 & Mblock) + { + + if ( inc_x == 1 ) + xbuffer = x_ptr; + else + copy_x(Mblock,x_ptr,xbuffer,inc_x); + + y_ptr = y; + a_ptrl = a_ptr; + + for(i = 0; i rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float + "leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + + "sarq $4, %%rax \n\t" // n = n / 16 + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + // "prefetcht0 512(%%rsi) \n\t" + "prefetcht0 (%%r8) \n\t" //prefetch next line of a + "vmovups (%%rsi), %%xmm4 \n\t" + "vmovups 4*4(%%rsi), %%xmm5 \n\t" + "vmovups 8*4(%%rsi), %%xmm6 \n\t" + "vmovups 12*4(%%rsi), %%xmm7 \n\t" + + "vfmaddps %%xmm12, 0*4(%%rdi), %%xmm4, %%xmm12\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm13, 4*4(%%rdi), %%xmm5, %%xmm13\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm14, 8*4(%%rdi), %%xmm6, %%xmm14\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm15, 12*4(%%rdi), %%xmm7, %%xmm15\n\t" // multiply a and c and add to temp + + "addq $16*4 , %%r8 \n\t" // increment prefetch pointer + "addq $16*4 , %%rsi \n\t" // increment pointer of a + "addq $16*4 , %%rdi \n\t" // increment pointer of c + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vaddps %%xmm12, %%xmm14, %%xmm12\n\t" + "vaddps %%xmm13, %%xmm15, %%xmm13\n\t" + "vaddps %%xmm12, %%xmm13, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + + "vfmaddss (%%rdx), %%xmm12, %%xmm1, %%xmm12\n\t" + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + From c06f9986d449bdaa109f742a3e5f7114b4ff50ad Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 20 Jul 2014 10:21:08 +0200 Subject: [PATCH 10/15] added sgemv_t microkernel for sandybridge --- kernel/x86_64/sgemv_t_avx.c | 2 + kernel/x86_64/sgemv_t_microk_sandy.c | 105 +++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 kernel/x86_64/sgemv_t_microk_sandy.c diff --git a/kernel/x86_64/sgemv_t_avx.c b/kernel/x86_64/sgemv_t_avx.c index c9cdb60cd..7a9efa35e 100644 --- a/kernel/x86_64/sgemv_t_avx.c +++ b/kernel/x86_64/sgemv_t_avx.c @@ -30,6 +30,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) #include "sgemv_t_microk_bulldozer.c" +#else +#include "sgemv_t_microk_sandy.c" #endif static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) diff --git a/kernel/x86_64/sgemv_t_microk_sandy.c b/kernel/x86_64/sgemv_t_microk_sandy.c new file mode 100644 index 000000000..1745db3a7 --- /dev/null +++ b/kernel/x86_64/sgemv_t_microk_sandy.c @@ -0,0 +1,105 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + //n = n / 16; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float + "leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + + "sarq $4, %%rax \n\t" // n = n / 16 + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + // "prefetcht0 512(%%rsi) \n\t" + "prefetcht0 (%%r8) \n\t" //prefetch next line of a + "vmovups (%%rsi), %%xmm4 \n\t" + "vmovups 4*4(%%rsi), %%xmm5 \n\t" + "vmovups 8*4(%%rsi), %%xmm6 \n\t" + "vmovups 12*4(%%rsi), %%xmm7 \n\t" + + "vmulps 0*4(%%rdi), %%xmm4, %%xmm8 \n\t" // multiply a and c and add to temp + "vmulps 4*4(%%rdi), %%xmm5, %%xmm9 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rdi), %%xmm6, %%xmm10\n\t" // multiply a and c and add to temp + "vmulps 12*4(%%rdi), %%xmm7, %%xmm11\n\t" // multiply a and c and add to temp + + "vaddps %%xmm12, %%xmm8 , %%xmm12\n\t" + "vaddps %%xmm13, %%xmm9 , %%xmm13\n\t" + "vaddps %%xmm14, %%xmm10, %%xmm14\n\t" + "vaddps %%xmm15, %%xmm11, %%xmm15\n\t" + + "addq $16*4 , %%r8 \n\t" // increment prefetch pointer + "addq $16*4 , %%rsi \n\t" // increment pointer of a + "addq $16*4 , %%rdi \n\t" // increment pointer of c + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vaddps %%xmm12, %%xmm14, %%xmm12\n\t" + "vaddps %%xmm13, %%xmm15, %%xmm13\n\t" + "vaddps %%xmm12, %%xmm13, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + + "vfmaddss (%%rdx), %%xmm12, %%xmm1, %%xmm12\n\t" + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + From 02eb72ac426226566b6b9d3cffd4beaacde88672 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 20 Jul 2014 10:48:41 +0200 Subject: [PATCH 11/15] bugfix in sgemv_t_microk_sandy.c --- kernel/x86_64/KERNEL.SANDYBRIDGE | 8 ++++++++ kernel/x86_64/sgemv_t_microk_sandy.c | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/x86_64/KERNEL.SANDYBRIDGE b/kernel/x86_64/KERNEL.SANDYBRIDGE index 7228357ce..7d6b81d54 100644 --- a/kernel/x86_64/KERNEL.SANDYBRIDGE +++ b/kernel/x86_64/KERNEL.SANDYBRIDGE @@ -1,3 +1,11 @@ +ifdef OS_WINDOWS +#SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c +else +#SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c +endif + SGEMMKERNEL = sgemm_kernel_16x4_sandy.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c diff --git a/kernel/x86_64/sgemv_t_microk_sandy.c b/kernel/x86_64/sgemv_t_microk_sandy.c index 1745db3a7..4ecd6d3d0 100644 --- a/kernel/x86_64/sgemv_t_microk_sandy.c +++ b/kernel/x86_64/sgemv_t_microk_sandy.c @@ -80,7 +80,8 @@ static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" - "vfmaddss (%%rdx), %%xmm12, %%xmm1, %%xmm12\n\t" + "vmulss %%xmm12, %%xmm1, %%xmm12 \n\t" + "vaddss (%%rdx), %%xmm12, %%xmm12\n\t" "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y : From d9d4077c9317b0c283dbce0547ea299dc5f1df82 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 20 Jul 2014 11:30:32 +0200 Subject: [PATCH 12/15] added sgemv_t microkernel for haswell --- kernel/x86_64/KERNEL.HASWELL | 8 ++ kernel/x86_64/sgemv_t_avx.c | 2 + kernel/x86_64/sgemv_t_microk_haswell.c | 100 +++++++++++++++++++++++++ 3 files changed, 110 insertions(+) create mode 100644 kernel/x86_64/sgemv_t_microk_haswell.c diff --git a/kernel/x86_64/KERNEL.HASWELL b/kernel/x86_64/KERNEL.HASWELL index ae316cff0..288e39537 100644 --- a/kernel/x86_64/KERNEL.HASWELL +++ b/kernel/x86_64/KERNEL.HASWELL @@ -1,3 +1,11 @@ +ifdef OS_WINDOWS +#SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c +else +#SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c +endif + SGEMMKERNEL = sgemm_kernel_16x4_haswell.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c diff --git a/kernel/x86_64/sgemv_t_avx.c b/kernel/x86_64/sgemv_t_avx.c index 7a9efa35e..55fb3d623 100644 --- a/kernel/x86_64/sgemv_t_avx.c +++ b/kernel/x86_64/sgemv_t_avx.c @@ -30,6 +30,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) #include "sgemv_t_microk_bulldozer.c" +#elif defined(HASWELL) +#include "sgemv_t_microk_haswell.c" #else #include "sgemv_t_microk_sandy.c" #endif diff --git a/kernel/x86_64/sgemv_t_microk_haswell.c b/kernel/x86_64/sgemv_t_microk_haswell.c new file mode 100644 index 000000000..ecb9845bb --- /dev/null +++ b/kernel/x86_64/sgemv_t_microk_haswell.c @@ -0,0 +1,100 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + //n = n / 16; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float + "leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + + "sarq $4, %%rax \n\t" // n = n / 16 + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + // "prefetcht0 512(%%rsi) \n\t" + "prefetcht0 (%%r8) \n\t" //prefetch next line of a + "vmovups (%%rsi), %%xmm4 \n\t" + "vmovups 4*4(%%rsi), %%xmm5 \n\t" + "vmovups 8*4(%%rsi), %%xmm6 \n\t" + "vmovups 12*4(%%rsi), %%xmm7 \n\t" + + "vfmadd231ps 0*4(%%rdi), %%xmm4, %%xmm12\n\t" // multiply a and c and add to temp + "vfmadd231ps 4*4(%%rdi), %%xmm5, %%xmm13\n\t" // multiply a and c and add to temp + "vfmadd231ps 8*4(%%rdi), %%xmm6, %%xmm14\n\t" // multiply a and c and add to temp + "vfmadd231ps 12*4(%%rdi), %%xmm7, %%xmm15\n\t" // multiply a and c and add to temp + + "addq $16*4 , %%r8 \n\t" // increment prefetch pointer + "addq $16*4 , %%rsi \n\t" // increment pointer of a + "addq $16*4 , %%rdi \n\t" // increment pointer of c + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vaddps %%xmm12, %%xmm14, %%xmm12\n\t" + "vaddps %%xmm13, %%xmm15, %%xmm13\n\t" + "vaddps %%xmm12, %%xmm13, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" + "vaddss (%%rdx), %%xmm12,%%xmm12\n\t" + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + From c0fe95fb725aba06ce08a114a0b79f91d91ec64a Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 20 Jul 2014 13:17:47 +0200 Subject: [PATCH 13/15] added sgemv_n microkernel for sandybridge --- kernel/x86_64/KERNEL.SANDYBRIDGE | 4 +- kernel/x86_64/sgemv_n_avx.c | 2 + kernel/x86_64/sgemv_n_microk_sandy.c | 474 +++++++++++++++++++++++++++ 3 files changed, 478 insertions(+), 2 deletions(-) create mode 100644 kernel/x86_64/sgemv_n_microk_sandy.c diff --git a/kernel/x86_64/KERNEL.SANDYBRIDGE b/kernel/x86_64/KERNEL.SANDYBRIDGE index 7d6b81d54..9d7a49562 100644 --- a/kernel/x86_64/KERNEL.SANDYBRIDGE +++ b/kernel/x86_64/KERNEL.SANDYBRIDGE @@ -1,8 +1,8 @@ ifdef OS_WINDOWS -#SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVNKERNEL = ../arm/gemv_n.c SGEMVTKERNEL = ../arm/gemv_t.c else -#SGEMVNKERNEL = sgemv_n_avx.c +SGEMVNKERNEL = sgemv_n_avx.c SGEMVTKERNEL = sgemv_t_avx.c endif diff --git a/kernel/x86_64/sgemv_n_avx.c b/kernel/x86_64/sgemv_n_avx.c index 91e3ee424..96a03ec57 100644 --- a/kernel/x86_64/sgemv_n_avx.c +++ b/kernel/x86_64/sgemv_n_avx.c @@ -30,6 +30,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) #include "sgemv_n_microk_bulldozer.c" +#else +#include "sgemv_n_microk_sandy.c" #endif static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) diff --git a/kernel/x86_64/sgemv_n_microk_sandy.c b/kernel/x86_64/sgemv_n_microk_sandy.c new file mode 100644 index 000000000..7d9360f94 --- /dev/null +++ b/kernel/x86_64/sgemv_n_microk_sandy.c @@ -0,0 +1,474 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + "vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero + "vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + "prefetcht0 128(%%r8)\n\t" // Prefetch + "prefetcht0 192(%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + "vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp + "vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp + + "vmulps 32*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 40*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "vmulps 48*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 56*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm12, %%ymm4, %%ymm12\n\t" // multiply a and c and add to temp + "vaddps %%ymm13, %%ymm5, %%ymm13\n\t" // multiply a and c and add to temp + "vaddps %%ymm14, %%ymm6, %%ymm14\n\t" // multiply a and c and add to temp + "vaddps %%ymm15, %%ymm7, %%ymm15\n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + "vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha + "vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + "vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp + "vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp + + + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + + +} + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vmulps 1*4(%%rsi), %%xmm0, %%xmm5 \n\t" // multiply a and c and add to temp + + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + "vaddps %%xmm13, %%xmm5, %%xmm13 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + "vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vmulss 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddss %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + From 5392d11b045abddfe51c45e69848c807121486e8 Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 20 Jul 2014 14:08:04 +0200 Subject: [PATCH 14/15] optimized sgemv_n_microk_sandy.c --- kernel/x86_64/sgemv_n_microk_sandy.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/x86_64/sgemv_n_microk_sandy.c b/kernel/x86_64/sgemv_n_microk_sandy.c index 7d9360f94..9bdb06600 100644 --- a/kernel/x86_64/sgemv_n_microk_sandy.c +++ b/kernel/x86_64/sgemv_n_microk_sandy.c @@ -29,7 +29,7 @@ static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, { - float *pre = a + lda*3; + float *pre = a + lda*2; __asm__ __volatile__ ( @@ -58,20 +58,19 @@ static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch "prefetcht0 (%%r8)\n\t" // Prefetch - "prefetcht0 64(%%r8)\n\t" // Prefetch - "prefetcht0 128(%%r8)\n\t" // Prefetch - "prefetcht0 192(%%r8)\n\t" // Prefetch - "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "prefetcht0 64(%%r8)\n\t" // Prefetch "vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp "vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + "prefetcht0 128(%%r8)\n\t" // Prefetch "vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp "vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp + "prefetcht0 192(%%r8)\n\t" // Prefetch "vmulps 32*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp "vmulps 40*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp "vmulps 48*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp From 6acbafe45b732b3410e67db5b1a5f05ed5e90f1e Mon Sep 17 00:00:00 2001 From: wernsaar Date: Sun, 20 Jul 2014 14:52:25 +0200 Subject: [PATCH 15/15] added sgemv_n microkernel for haswell --- kernel/x86_64/KERNEL.HASWELL | 4 +- kernel/x86_64/sgemv_n_avx.c | 2 + kernel/x86_64/sgemv_n_microk_haswell.c | 461 +++++++++++++++++++++++++ 3 files changed, 465 insertions(+), 2 deletions(-) create mode 100644 kernel/x86_64/sgemv_n_microk_haswell.c diff --git a/kernel/x86_64/KERNEL.HASWELL b/kernel/x86_64/KERNEL.HASWELL index 288e39537..871a7d490 100644 --- a/kernel/x86_64/KERNEL.HASWELL +++ b/kernel/x86_64/KERNEL.HASWELL @@ -1,8 +1,8 @@ ifdef OS_WINDOWS -#SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVNKERNEL = ../arm/gemv_n.c SGEMVTKERNEL = ../arm/gemv_t.c else -#SGEMVNKERNEL = sgemv_n_avx.c +SGEMVNKERNEL = sgemv_n_avx.c SGEMVTKERNEL = sgemv_t_avx.c endif diff --git a/kernel/x86_64/sgemv_n_avx.c b/kernel/x86_64/sgemv_n_avx.c index 96a03ec57..57aaad4b4 100644 --- a/kernel/x86_64/sgemv_n_avx.c +++ b/kernel/x86_64/sgemv_n_avx.c @@ -30,6 +30,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) #include "sgemv_n_microk_bulldozer.c" +#elif defined(HASWELL) +#include "sgemv_n_microk_haswell.c" #else #include "sgemv_n_microk_sandy.c" #endif diff --git a/kernel/x86_64/sgemv_n_microk_haswell.c b/kernel/x86_64/sgemv_n_microk_haswell.c new file mode 100644 index 000000000..9db3869d2 --- /dev/null +++ b/kernel/x86_64/sgemv_n_microk_haswell.c @@ -0,0 +1,461 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*2; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + "vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero + "vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "vfmadd231ps 0*4(%%rsi), %%ymm0, %%ymm8 \n\t" // multiply a and c and add to temp + "vfmadd231ps 8*4(%%rsi), %%ymm0, %%ymm9 \n\t" // multiply a and c and add to temp + "prefetcht0 64(%%r8)\n\t" // Prefetch + "vfmadd231ps 16*4(%%rsi), %%ymm0, %%ymm10\n\t" // multiply a and c and add to temp + "vfmadd231ps 24*4(%%rsi), %%ymm0, %%ymm11\n\t" // multiply a and c and add to temp + "prefetcht0 128(%%r8)\n\t" // Prefetch + "vfmadd231ps 32*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + "vfmadd231ps 40*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp + "prefetcht0 192(%%r8)\n\t" // Prefetch + "vfmadd231ps 48*4(%%rsi), %%ymm0, %%ymm14\n\t" // multiply a and c and add to temp + "vfmadd231ps 56*4(%%rsi), %%ymm0, %%ymm15\n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + "vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha + "vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + "vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp + "vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp + + + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + + +} + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vmulps 1*4(%%rsi), %%xmm0, %%xmm5 \n\t" // multiply a and c and add to temp + + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + "vaddps %%xmm13, %%xmm5, %%xmm13 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + "vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vmulss 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddss %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +