diff --git a/Makefile b/Makefile index 2e378883b..3aaf092fc 100644 --- a/Makefile +++ b/Makefile @@ -247,7 +247,7 @@ ifndef NOFORTRAN -@echo "SUFFIX = $(SUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "PSUFFIX = $(PSUFFIX)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "CEXTRALIB = $(EXTRALIB)" >> $(NETLIB_LAPACK_DIR)/make.inc -ifeq ($(FC), GFORTRAN) +ifeq ($(FC), gfortran) -@echo "TIMER = INT_ETIME" >> $(NETLIB_LAPACK_DIR)/make.inc ifdef SMP -@echo "LOADER = $(FC) -pthread" >> $(NETLIB_LAPACK_DIR)/make.inc diff --git a/benchmark/Makefile b/benchmark/Makefile index e3910ee96..db183c8ad 100644 --- a/benchmark/Makefile +++ b/benchmark/Makefile @@ -2,12 +2,12 @@ TOPDIR = .. include $(TOPDIR)/Makefile.system # ACML standard -ACML=/opt/acml5.3.1/gfortran64_mp/lib -LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm +#ACML=/opt/acml5.3.1/gfortran64_mp/lib +#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm # ACML custom -#ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib -#LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm +ACML=/opt/pb/acml-5-3-1-gfortran-64bit/gfortran64_fma4_mp/lib +LIBACML = -fopenmp $(ACML)/libacml_mp.a -lgfortran -lm # Atlas Ubuntu #ATLAS=/usr/lib/atlas-base @@ -37,6 +37,7 @@ goto :: slinpack.goto dlinpack.goto clinpack.goto zlinpack.goto \ chemm.goto zhemm.goto \ cherk.goto zherk.goto \ cher2k.goto zher2k.goto \ + sgemv.goto dgemv.goto cgemv.goto zgemv.goto \ ssymm.goto dsymm.goto csymm.goto zsymm.goto acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ @@ -49,6 +50,7 @@ acml :: slinpack.acml dlinpack.acml clinpack.acml zlinpack.acml \ chemm.acml zhemm.acml \ cherk.acml zherk.acml \ cher2k.acml zher2k.acml \ + sgemv.acml dgemv.acml cgemv.acml zgemv.acml \ ssymm.acml dsymm.acml csymm.acml zsymm.acml atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ @@ -61,6 +63,7 @@ atlas :: slinpack.atlas dlinpack.atlas clinpack.atlas zlinpack.atlas \ chemm.atlas zhemm.atlas \ cherk.atlas zherk.atlas \ cher2k.atlas zher2k.atlas \ + sgemv.atlas dgemv.atlas cgemv.atlas zgemv.atlas \ ssymm.atlas dsymm.atlas csymm.atlas zsymm.atlas mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ @@ -73,6 +76,7 @@ mkl :: slinpack.mkl dlinpack.mkl clinpack.mkl zlinpack.mkl \ chemm.mkl zhemm.mkl \ cherk.mkl zherk.mkl \ cher2k.mkl zher2k.mkl \ + sgemv.mkl dgemv.mkl cgemv.mkl zgemv.mkl \ ssymm.mkl dsymm.mkl csymm.mkl zsymm.mkl all :: goto atlas acml mkl @@ -601,6 +605,61 @@ zher2k.atlas : zher2k.$(SUFFIX) zher2k.mkl : zher2k.$(SUFFIX) -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) +##################################### Sgemv #################################################### +sgemv.goto : sgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +sgemv.acml : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemv.atlas : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +sgemv.mkl : sgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Dgemv #################################################### +dgemv.goto : dgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +dgemv.acml : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemv.atlas : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +dgemv.mkl : dgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Cgemv #################################################### + +cgemv.goto : cgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +cgemv.acml : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemv.atlas : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +cgemv.mkl : cgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +##################################### Zgemv #################################################### + +zgemv.goto : zgemv.$(SUFFIX) ../$(LIBNAME) + $(CC) $(CFLAGS) -o $(@F) $^ $(CEXTRALIB) $(EXTRALIB) -lm + +zgemv.acml : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBACML) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemv.atlas : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBATLAS) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + +zgemv.mkl : zgemv.$(SUFFIX) + -$(CC) $(CFLAGS) -o $(@F) $^ $(LIBMKL) $(CEXTRALIB) $(EXTRALIB) $(FEXTRALIB) + + ################################################################################################### slinpack.$(SUFFIX) : linpack.c @@ -717,7 +776,17 @@ cher2k.$(SUFFIX) : her2k.c zher2k.$(SUFFIX) : her2k.c $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ +sgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -UDOUBLE -o $(@F) $^ +dgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -UCOMPLEX -DDOUBLE -o $(@F) $^ + +cgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -UDOUBLE -o $(@F) $^ + +zgemv.$(SUFFIX) : gemv.c + $(CC) $(CFLAGS) -c -DCOMPLEX -DDOUBLE -o $(@F) $^ clean :: diff --git a/benchmark/gemv.c b/benchmark/gemv.c new file mode 100644 index 000000000..e26a36ac1 --- /dev/null +++ b/benchmark/gemv.c @@ -0,0 +1,229 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +#include +#include +#ifdef __CYGWIN32__ +#include +#endif +#include "common.h" + + +#undef GEMV + +#ifndef COMPLEX + +#ifdef DOUBLE +#define GEMV BLASFUNC(dgemv) +#else +#define GEMV BLASFUNC(sgemv) +#endif + +#else + +#ifdef DOUBLE +#define GEMV BLASFUNC(zgemv) +#else +#define GEMV BLASFUNC(cgemv) +#endif + +#endif + +#if defined(__WIN32__) || defined(__WIN64__) + +#ifndef DELTA_EPOCH_IN_MICROSECS +#define DELTA_EPOCH_IN_MICROSECS 11644473600000000ULL +#endif + +int gettimeofday(struct timeval *tv, void *tz){ + + FILETIME ft; + unsigned __int64 tmpres = 0; + static int tzflag; + + if (NULL != tv) + { + GetSystemTimeAsFileTime(&ft); + + tmpres |= ft.dwHighDateTime; + tmpres <<= 32; + tmpres |= ft.dwLowDateTime; + + /*converting file time to unix epoch*/ + tmpres /= 10; /*convert into microseconds*/ + tmpres -= DELTA_EPOCH_IN_MICROSECS; + tv->tv_sec = (long)(tmpres / 1000000UL); + tv->tv_usec = (long)(tmpres % 1000000UL); + } + + return 0; +} + +#endif + +#if !defined(__WIN32__) && !defined(__WIN64__) && !defined(__CYGWIN32__) && 0 + +static void *huge_malloc(BLASLONG size){ + int shmid; + void *address; + +#ifndef SHM_HUGETLB +#define SHM_HUGETLB 04000 +#endif + + if ((shmid =shmget(IPC_PRIVATE, + (size + HUGE_PAGESIZE) & ~(HUGE_PAGESIZE - 1), + SHM_HUGETLB | IPC_CREAT |0600)) < 0) { + printf( "Memory allocation failed(shmget).\n"); + exit(1); + } + + address = shmat(shmid, NULL, SHM_RND); + + if ((BLASLONG)address == -1){ + printf( "Memory allocation failed(shmat).\n"); + exit(1); + } + + shmctl(shmid, IPC_RMID, 0); + + return address; +} + +#define malloc huge_malloc + +#endif + +int MAIN__(int argc, char *argv[]){ + + FLOAT *a, *x, *y; + FLOAT alpha[] = {1.0, 1.0}; + FLOAT beta [] = {1.0, 1.0}; + char trans='N'; + blasint m, i, j; + blasint inc_x=1,inc_y=1; + blasint n=0; + int has_param_n = 0; + int loops = 1; + int l; + char *p; + + int from = 1; + int to = 200; + int step = 1; + + struct timeval start, stop; + double time1,timeg; + + argc--;argv++; + + if (argc > 0) { from = atol(*argv); argc--; argv++;} + if (argc > 0) { to = MAX(atol(*argv), from); argc--; argv++;} + if (argc > 0) { step = atol(*argv); argc--; argv++;} + + if ((p = getenv("OPENBLAS_LOOPS"))) loops = atoi(p); + if ((p = getenv("OPENBLAS_INCX"))) inc_x = atoi(p); + if ((p = getenv("OPENBLAS_INCY"))) inc_y = atoi(p); + if ((p = getenv("OPENBLAS_TRANS"))) trans=*p; + if ((p = getenv("OPENBLAS_PARAM_N"))) { + n = atoi(p); + if ((n>0) && (n<=to)) has_param_n = 1; + } + + if ( has_param_n == 1 ) + fprintf(stderr, "From : %3d To : %3d Step = %3d Trans = '%c' N = %d Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,trans,n,inc_x,inc_y,loops); + else + fprintf(stderr, "From : %3d To : %3d Step = %3d Trans = '%c' Inc_x = %d Inc_y = %d Loops = %d\n", from, to, step,trans,inc_x,inc_y,loops); + + if (( a = (FLOAT *)malloc(sizeof(FLOAT) * to * to * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( x = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_x) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + + if (( y = (FLOAT *)malloc(sizeof(FLOAT) * to * abs(inc_y) * COMPSIZE)) == NULL){ + fprintf(stderr,"Out of Memory!!\n");exit(1); + } + +#ifdef linux + srandom(getpid()); +#endif + + fprintf(stderr, " SIZE Flops\n"); + + for(m = from; m <= to; m += step) + { + + timeg=0; + + if ( has_param_n == 0 ) n = m; + + fprintf(stderr, " %6dx%d : ", (int)m,(int)n); + + for(j = 0; j < m; j++){ + for(i = 0; i < n * COMPSIZE; i++){ + a[i + j * m * COMPSIZE] = ((FLOAT) rand() / (FLOAT) RAND_MAX) - 0.5; + } + } + + + for (l=0; l nthreads_avail ) + nthreads = nthreads_avail; + else + nthreads = nthreads_max; if (nthreads == 1) { #endif diff --git a/interface/zgemv.c b/interface/zgemv.c index fcc2fda54..50513a8e4 100644 --- a/interface/zgemv.c +++ b/interface/zgemv.c @@ -233,7 +233,19 @@ void CNAME(enum CBLAS_ORDER order, buffer = (FLOAT *)blas_memory_alloc(1); #ifdef SMP - nthreads = num_cpu_avail(2); + + int nthreads_max = num_cpu_avail(2); + int nthreads_avail = nthreads_max; + + double MNK = (double) m * (double) n; + if ( MNK <= (80.0 * 20.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) + nthreads_max = 1; + + if ( nthreads_max > nthreads_avail ) + nthreads = nthreads_avail; + else + nthreads = nthreads_max; + if (nthreads == 1) { #endif diff --git a/kernel/x86_64/KERNEL.BULLDOZER b/kernel/x86_64/KERNEL.BULLDOZER index 70370a73c..73a9ad2ec 100644 --- a/kernel/x86_64/KERNEL.BULLDOZER +++ b/kernel/x86_64/KERNEL.BULLDOZER @@ -1,3 +1,11 @@ +ifdef OS_WINDOWS +SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c +else +SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c +endif + ZGEMVNKERNEL = zgemv_n_dup.S ZGEMVTKERNEL = zgemv_t.S diff --git a/kernel/x86_64/KERNEL.HASWELL b/kernel/x86_64/KERNEL.HASWELL index ae316cff0..871a7d490 100644 --- a/kernel/x86_64/KERNEL.HASWELL +++ b/kernel/x86_64/KERNEL.HASWELL @@ -1,3 +1,11 @@ +ifdef OS_WINDOWS +SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c +else +SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c +endif + SGEMMKERNEL = sgemm_kernel_16x4_haswell.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c diff --git a/kernel/x86_64/KERNEL.PILEDRIVER b/kernel/x86_64/KERNEL.PILEDRIVER index 92b5dc7c9..453e7b762 100644 --- a/kernel/x86_64/KERNEL.PILEDRIVER +++ b/kernel/x86_64/KERNEL.PILEDRIVER @@ -1,3 +1,11 @@ +ifdef OS_WINDOWS +SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c +else +SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c +endif + ZGEMVNKERNEL = zgemv_n_dup.S ZGEMVTKERNEL = zgemv_t.S diff --git a/kernel/x86_64/KERNEL.SANDYBRIDGE b/kernel/x86_64/KERNEL.SANDYBRIDGE index 7228357ce..9d7a49562 100644 --- a/kernel/x86_64/KERNEL.SANDYBRIDGE +++ b/kernel/x86_64/KERNEL.SANDYBRIDGE @@ -1,3 +1,11 @@ +ifdef OS_WINDOWS +SGEMVNKERNEL = ../arm/gemv_n.c +SGEMVTKERNEL = ../arm/gemv_t.c +else +SGEMVNKERNEL = sgemv_n_avx.c +SGEMVTKERNEL = sgemv_t_avx.c +endif + SGEMMKERNEL = sgemm_kernel_16x4_sandy.S SGEMMINCOPY = ../generic/gemm_ncopy_16.c diff --git a/kernel/x86_64/sgemv_n_avx.c b/kernel/x86_64/sgemv_n_avx.c new file mode 100644 index 000000000..57aaad4b4 --- /dev/null +++ b/kernel/x86_64/sgemv_n_avx.c @@ -0,0 +1,218 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#if defined(BULLDOZER) || defined(PILEDRIVER) +#include "sgemv_n_microk_bulldozer.c" +#elif defined(HASWELL) +#include "sgemv_n_microk_haswell.c" +#else +#include "sgemv_n_microk_sandy.c" +#endif + +static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) +{ + BLASLONG i; + for ( i=0; i 0 ) + { + + if ( inc_x == 1 ) + xbuffer = x_ptr; + else + copy_x(n2,x_ptr,xbuffer,inc_x); + + a_ptr = a + n1 * 512 * lda; + y_ptr = y; + + for(i = 0; i rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + "vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero + "vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "vfmaddps %%ymm8 , 0*4(%%rsi), %%ymm0, %%ymm8 \n\t" // multiply a and c and add to temp + "prefetcht0 64(%%r8)\n\t" // Prefetch + "vfmaddps %%ymm9 , 8*4(%%rsi), %%ymm0, %%ymm9 \n\t" // multiply a and c and add to temp + "prefetcht0 128(%%r8)\n\t" // Prefetch + "vfmaddps %%ymm10, 16*4(%%rsi), %%ymm0, %%ymm10\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm11, 24*4(%%rsi), %%ymm0, %%ymm11\n\t" // multiply a and c and add to temp + "prefetcht0 192(%%r8)\n\t" // Prefetch + "vfmaddps %%ymm12, 32*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm13, 40*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm14, 48*4(%%rsi), %%ymm0, %%ymm14\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm15, 56*4(%%rsi), %%ymm0, %%ymm15\n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + "vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha + "vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%xmm8 , %%xmm8 , %%xmm8 \n\t" // set to zero + "vxorps %%xmm9 , %%xmm9 , %%xmm9 \n\t" // set to zero + "vxorps %%xmm10, %%xmm10, %%xmm10\n\t" // set to zero + "vxorps %%xmm11, %%xmm11, %%xmm11\n\t" // set to zero + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "vfmaddps %%xmm8 , 0*4(%%rsi), %%xmm0, %%xmm8 \n\t" // multiply a and c and add to temp + "prefetcht0 64(%%r8)\n\t" // Prefetch + "vfmaddps %%xmm9 , 4*4(%%rsi), %%xmm0, %%xmm9 \n\t" // multiply a and c and add to temp + "vfmaddps %%xmm10, 8*4(%%rsi), %%xmm0, %%xmm10\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm11, 12*4(%%rsi), %%xmm0, %%xmm11\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm12, 16*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm13, 20*4(%%rsi), %%xmm0, %%xmm13\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm14, 24*4(%%rsi), %%xmm0, %%xmm14\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm15, 28*4(%%rsi), %%xmm0, %%xmm15\n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm8 , %%xmm1, %%xmm8 \n\t" // scale by alpha + "vmulps %%xmm9 , %%xmm1, %%xmm9 \n\t" // scale by alpha + "vmulps %%xmm10, %%xmm1, %%xmm10\n\t" // scale by alpha + "vmulps %%xmm11, %%xmm1, %%xmm11\n\t" // scale by alpha + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulps %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + "vmulps %%xmm14, %%xmm1, %%xmm14\n\t" // scale by alpha + "vmulps %%xmm15, %%xmm1, %%xmm15\n\t" // scale by alpha + + "vmovups %%xmm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%xmm9 , 4*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm10, 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm11, 12*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm12, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm13, 20*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm14, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%xmm15, 28*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + ); + +} + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vfmaddps %%ymm12, 0*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + "vfmaddps %%ymm13, 8*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + + "vmovups %%ymm12, (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 8*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + +static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddps %%ymm12, 0*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + + "vmovups %%ymm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + +static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddps %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddss %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + "vfmaddss %%xmm13, 1*4(%%rsi), %%xmm0, %%xmm13\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + "vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vfmaddss %%xmm12, 0*4(%%rsi), %%xmm0, %%xmm12\n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + diff --git a/kernel/x86_64/sgemv_n_microk_haswell.c b/kernel/x86_64/sgemv_n_microk_haswell.c new file mode 100644 index 000000000..9db3869d2 --- /dev/null +++ b/kernel/x86_64/sgemv_n_microk_haswell.c @@ -0,0 +1,461 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*2; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + "vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero + "vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "vfmadd231ps 0*4(%%rsi), %%ymm0, %%ymm8 \n\t" // multiply a and c and add to temp + "vfmadd231ps 8*4(%%rsi), %%ymm0, %%ymm9 \n\t" // multiply a and c and add to temp + "prefetcht0 64(%%r8)\n\t" // Prefetch + "vfmadd231ps 16*4(%%rsi), %%ymm0, %%ymm10\n\t" // multiply a and c and add to temp + "vfmadd231ps 24*4(%%rsi), %%ymm0, %%ymm11\n\t" // multiply a and c and add to temp + "prefetcht0 128(%%r8)\n\t" // Prefetch + "vfmadd231ps 32*4(%%rsi), %%ymm0, %%ymm12\n\t" // multiply a and c and add to temp + "vfmadd231ps 40*4(%%rsi), %%ymm0, %%ymm13\n\t" // multiply a and c and add to temp + "prefetcht0 192(%%r8)\n\t" // Prefetch + "vfmadd231ps 48*4(%%rsi), %%ymm0, %%ymm14\n\t" // multiply a and c and add to temp + "vfmadd231ps 56*4(%%rsi), %%ymm0, %%ymm15\n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + "vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha + "vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + "vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp + "vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp + + + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + + +} + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vmulps 1*4(%%rsi), %%xmm0, %%xmm5 \n\t" // multiply a and c and add to temp + + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + "vaddps %%xmm13, %%xmm5, %%xmm13 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + "vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vmulss 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddss %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + diff --git a/kernel/x86_64/sgemv_n_microk_sandy.c b/kernel/x86_64/sgemv_n_microk_sandy.c new file mode 100644 index 000000000..9bdb06600 --- /dev/null +++ b/kernel/x86_64/sgemv_n_microk_sandy.c @@ -0,0 +1,473 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_64( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*2; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + "vxorps %%ymm12, %%ymm12, %%ymm12\n\t" // set to zero + "vxorps %%ymm13, %%ymm13, %%ymm13\n\t" // set to zero + "vxorps %%ymm14, %%ymm14, %%ymm14\n\t" // set to zero + "vxorps %%ymm15, %%ymm15, %%ymm15\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "prefetcht0 64(%%r8)\n\t" // Prefetch + "vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + "prefetcht0 128(%%r8)\n\t" // Prefetch + "vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp + "vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp + + "prefetcht0 192(%%r8)\n\t" // Prefetch + "vmulps 32*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 40*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "vmulps 48*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 56*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm12, %%ymm4, %%ymm12\n\t" // multiply a and c and add to temp + "vaddps %%ymm13, %%ymm5, %%ymm13\n\t" // multiply a and c and add to temp + "vaddps %%ymm14, %%ymm6, %%ymm14\n\t" // multiply a and c and add to temp + "vaddps %%ymm15, %%ymm7, %%ymm15\n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + "vmulps %%ymm12, %%ymm1, %%ymm12\n\t" // scale by alpha + "vmulps %%ymm13, %%ymm1, %%ymm13\n\t" // scale by alpha + "vmulps %%ymm14, %%ymm1, %%ymm14\n\t" // scale by alpha + "vmulps %%ymm15, %%ymm1, %%ymm15\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm12, 32*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm13, 40*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm14, 48*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm15, 56*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_32( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + "vxorps %%ymm10, %%ymm10, %%ymm10\n\t" // set to zero + "vxorps %%ymm11, %%ymm11, %%ymm11\n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + "vmulps 16*4(%%rsi), %%ymm0, %%ymm6 \n\t" // multiply a and c and add to temp + "vmulps 24*4(%%rsi), %%ymm0, %%ymm7 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + "vaddps %%ymm10, %%ymm6, %%ymm10\n\t" // multiply a and c and add to temp + "vaddps %%ymm11, %%ymm7, %%ymm11\n\t" // multiply a and c and add to temp + + + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + "vmulps %%ymm10, %%ymm1, %%ymm10\n\t" // scale by alpha + "vmulps %%ymm11, %%ymm1, %%ymm11\n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm10, 16*4(%%rdx) \n\t" // store temp -> y + "vmovups %%ymm11, 24*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + + +} + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + float *pre = a + lda*3; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + "movq %6, %%r8\n\t" // address for prefetch + "prefetcht0 (%%r8)\n\t" // Prefetch + "prefetcht0 64(%%r8)\n\t" // Prefetch + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + "vxorps %%ymm9 , %%ymm9 , %%ymm9 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + "nop \n\t" + "leaq (%%r8 , %%rcx, 4), %%r8 \n\t" // add lda to pointer for prefetch + + "prefetcht0 (%%r8)\n\t" // Prefetch + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rsi), %%ymm0, %%ymm5 \n\t" // multiply a and c and add to temp + + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + "vaddps %%ymm9 , %%ymm5, %%ymm9 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmulps %%ymm9 , %%ymm1, %%ymm9 \n\t" // scale by alpha + + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + "vmovups %%ymm9 , 8*4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y), // 5 + "m" (pre) // 6 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_8( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%ymm1\n\t" // alpha -> ymm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%ymm8 , %%ymm8 , %%ymm8 \n\t" // set to zero + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%ymm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%ymm0, %%ymm4 \n\t" // multiply a and c and add to temp + "vaddps %%ymm8 , %%ymm4, %%ymm8 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%ymm8 , %%ymm1, %%ymm8 \n\t" // scale by alpha + "vmovups %%ymm8 , (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "memory" + ); + + +} + + +static void sgemv_kernel_4( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vbroadcastss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vbroadcastss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulps %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovups %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + +static void sgemv_kernel_2( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + + "vmulps 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vmulps 1*4(%%rsi), %%xmm0, %%xmm5 \n\t" // multiply a and c and add to temp + + "vaddps %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + "vaddps %%xmm13, %%xmm5, %%xmm13 \n\t" // multiply a and c and add to temp + + "addq $4 , %%rdi \n\t" // increment pointer of c + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + "vmulss %%xmm13, %%xmm1, %%xmm13\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + "vmovss %%xmm13, 4(%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + +static void sgemv_kernel_1( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + + ".L01LOOP%=: \n\t" + "vmovss (%%rdi), %%xmm0 \n\t" // load values of c + "addq $4 , %%rdi \n\t" // increment pointer of c + + "vmulss 0*4(%%rsi), %%xmm0, %%xmm4 \n\t" // multiply a and c and add to temp + "vaddss %%xmm12, %%xmm4, %%xmm12 \n\t" // multiply a and c and add to temp + + "leaq (%%rsi, %%rcx, 4), %%rsi \n\t" // add lda to pointer of a + + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" // scale by alpha + + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + diff --git a/kernel/x86_64/sgemv_t_avx.c b/kernel/x86_64/sgemv_t_avx.c new file mode 100644 index 000000000..55fb3d623 --- /dev/null +++ b/kernel/x86_64/sgemv_t_avx.c @@ -0,0 +1,232 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +#include "common.h" + +#if defined(BULLDOZER) || defined(PILEDRIVER) +#include "sgemv_t_microk_bulldozer.c" +#elif defined(HASWELL) +#include "sgemv_t_microk_haswell.c" +#else +#include "sgemv_t_microk_sandy.c" +#endif + +static void copy_x(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_src) +{ + BLASLONG i; + for ( i=0; i= 16 ) + { + if ( m2 & Mblock) + { + + if ( inc_x == 1 ) + xbuffer = x_ptr; + else + copy_x(Mblock,x_ptr,xbuffer,inc_x); + + y_ptr = y; + a_ptrl = a_ptr; + + for(i = 0; i rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float + "leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + + "sarq $4, %%rax \n\t" // n = n / 16 + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + // "prefetcht0 512(%%rsi) \n\t" + "prefetcht0 (%%r8) \n\t" //prefetch next line of a + "vmovups (%%rsi), %%xmm4 \n\t" + "vmovups 4*4(%%rsi), %%xmm5 \n\t" + "vmovups 8*4(%%rsi), %%xmm6 \n\t" + "vmovups 12*4(%%rsi), %%xmm7 \n\t" + + "vfmaddps %%xmm12, 0*4(%%rdi), %%xmm4, %%xmm12\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm13, 4*4(%%rdi), %%xmm5, %%xmm13\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm14, 8*4(%%rdi), %%xmm6, %%xmm14\n\t" // multiply a and c and add to temp + "vfmaddps %%xmm15, 12*4(%%rdi), %%xmm7, %%xmm15\n\t" // multiply a and c and add to temp + + "addq $16*4 , %%r8 \n\t" // increment prefetch pointer + "addq $16*4 , %%rsi \n\t" // increment pointer of a + "addq $16*4 , %%rdi \n\t" // increment pointer of c + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vaddps %%xmm12, %%xmm14, %%xmm12\n\t" + "vaddps %%xmm13, %%xmm15, %%xmm13\n\t" + "vaddps %%xmm12, %%xmm13, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + + "vfmaddss (%%rdx), %%xmm12, %%xmm1, %%xmm12\n\t" + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + diff --git a/kernel/x86_64/sgemv_t_microk_haswell.c b/kernel/x86_64/sgemv_t_microk_haswell.c new file mode 100644 index 000000000..ecb9845bb --- /dev/null +++ b/kernel/x86_64/sgemv_t_microk_haswell.c @@ -0,0 +1,100 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + //n = n / 16; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float + "leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + + "sarq $4, %%rax \n\t" // n = n / 16 + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + // "prefetcht0 512(%%rsi) \n\t" + "prefetcht0 (%%r8) \n\t" //prefetch next line of a + "vmovups (%%rsi), %%xmm4 \n\t" + "vmovups 4*4(%%rsi), %%xmm5 \n\t" + "vmovups 8*4(%%rsi), %%xmm6 \n\t" + "vmovups 12*4(%%rsi), %%xmm7 \n\t" + + "vfmadd231ps 0*4(%%rdi), %%xmm4, %%xmm12\n\t" // multiply a and c and add to temp + "vfmadd231ps 4*4(%%rdi), %%xmm5, %%xmm13\n\t" // multiply a and c and add to temp + "vfmadd231ps 8*4(%%rdi), %%xmm6, %%xmm14\n\t" // multiply a and c and add to temp + "vfmadd231ps 12*4(%%rdi), %%xmm7, %%xmm15\n\t" // multiply a and c and add to temp + + "addq $16*4 , %%r8 \n\t" // increment prefetch pointer + "addq $16*4 , %%rsi \n\t" // increment pointer of a + "addq $16*4 , %%rdi \n\t" // increment pointer of c + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vaddps %%xmm12, %%xmm14, %%xmm12\n\t" + "vaddps %%xmm13, %%xmm15, %%xmm13\n\t" + "vaddps %%xmm12, %%xmm13, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12\n\t" + "vaddss (%%rdx), %%xmm12,%%xmm12\n\t" + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + + diff --git a/kernel/x86_64/sgemv_t_microk_sandy.c b/kernel/x86_64/sgemv_t_microk_sandy.c new file mode 100644 index 000000000..4ecd6d3d0 --- /dev/null +++ b/kernel/x86_64/sgemv_t_microk_sandy.c @@ -0,0 +1,106 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +static void sgemv_kernel_16( long n, float alpha, float *a, long lda, float *x, float *y) +{ + + //n = n / 16; + + __asm__ __volatile__ + ( + "movq %0, %%rax\n\t" // n -> rax + "vmovss %1, %%xmm1\n\t" // alpha -> xmm1 + "movq %2, %%rsi\n\t" // adress of a -> rsi + "movq %3, %%rcx\n\t" // value of lda > rcx + "movq %4, %%rdi\n\t" // adress of x -> rdi + "movq %5, %%rdx\n\t" // adress of y -> rdx + + "leaq (, %%rcx,4), %%rcx \n\t" // scale lda by size of float + "leaq (%%rsi,%%rcx,1), %%r8 \n\t" // pointer to next line + + "vxorps %%xmm12, %%xmm12, %%xmm12\n\t" // set to zero + "vxorps %%xmm13, %%xmm13, %%xmm13\n\t" // set to zero + "vxorps %%xmm14, %%xmm14, %%xmm14\n\t" // set to zero + "vxorps %%xmm15, %%xmm15, %%xmm15\n\t" // set to zero + + "sarq $4, %%rax \n\t" // n = n / 16 + + ".align 16 \n\t" + ".L01LOOP%=: \n\t" + // "prefetcht0 512(%%rsi) \n\t" + "prefetcht0 (%%r8) \n\t" //prefetch next line of a + "vmovups (%%rsi), %%xmm4 \n\t" + "vmovups 4*4(%%rsi), %%xmm5 \n\t" + "vmovups 8*4(%%rsi), %%xmm6 \n\t" + "vmovups 12*4(%%rsi), %%xmm7 \n\t" + + "vmulps 0*4(%%rdi), %%xmm4, %%xmm8 \n\t" // multiply a and c and add to temp + "vmulps 4*4(%%rdi), %%xmm5, %%xmm9 \n\t" // multiply a and c and add to temp + "vmulps 8*4(%%rdi), %%xmm6, %%xmm10\n\t" // multiply a and c and add to temp + "vmulps 12*4(%%rdi), %%xmm7, %%xmm11\n\t" // multiply a and c and add to temp + + "vaddps %%xmm12, %%xmm8 , %%xmm12\n\t" + "vaddps %%xmm13, %%xmm9 , %%xmm13\n\t" + "vaddps %%xmm14, %%xmm10, %%xmm14\n\t" + "vaddps %%xmm15, %%xmm11, %%xmm15\n\t" + + "addq $16*4 , %%r8 \n\t" // increment prefetch pointer + "addq $16*4 , %%rsi \n\t" // increment pointer of a + "addq $16*4 , %%rdi \n\t" // increment pointer of c + "dec %%rax \n\t" // n = n -1 + "jnz .L01LOOP%= \n\t" + + "vaddps %%xmm12, %%xmm14, %%xmm12\n\t" + "vaddps %%xmm13, %%xmm15, %%xmm13\n\t" + "vaddps %%xmm12, %%xmm13, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + "vhaddps %%xmm12, %%xmm12, %%xmm12\n\t" + + "vmulss %%xmm12, %%xmm1, %%xmm12 \n\t" + "vaddss (%%rdx), %%xmm12, %%xmm12\n\t" + "vmovss %%xmm12, (%%rdx) \n\t" // store temp -> y + + : + : + "m" (n), // 0 + "m" (alpha), // 1 + "m" (a), // 2 + "m" (lda), // 3 + "m" (x), // 4 + "m" (y) // 5 + : "%rax", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "cc", + "%xmm0", "%xmm1", + "%xmm4", "%xmm5", "%xmm6", "%xmm7", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", + "%xmm12", "%xmm13", "%xmm14", "%xmm15", + "memory" + ); + +} + + +