From d74dc39b0faeebb7aeb97e4099dcb50a1fcc7533 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Oct 2018 13:47:20 +0000 Subject: [PATCH 1/6] Add optimized *copy versions for skylakex Add optimized n/t copy versions for skylakex; in the patch the tcopy is also rewritten using intrinsics; the ncopy file will be worked on in a future commit --- kernel/x86_64/KERNEL.SKYLAKEX | 8 +- kernel/x86_64/dgemm_ncopy_8_skylakex.c | 422 +++++++++++++++++++++++++ kernel/x86_64/dgemm_tcopy_8_skylakex.c | 417 ++++++++++++++++++++++++ 3 files changed, 843 insertions(+), 4 deletions(-) create mode 100644 kernel/x86_64/dgemm_ncopy_8_skylakex.c create mode 100644 kernel/x86_64/dgemm_tcopy_8_skylakex.c diff --git a/kernel/x86_64/KERNEL.SKYLAKEX b/kernel/x86_64/KERNEL.SKYLAKEX index ba149512d..e34cda770 100644 --- a/kernel/x86_64/KERNEL.SKYLAKEX +++ b/kernel/x86_64/KERNEL.SKYLAKEX @@ -4,10 +4,10 @@ SGEMMKERNEL = sgemm_kernel_16x4_skylakex.S DGEMMKERNEL = dgemm_kernel_4x8_skylakex.c -DGEMMINCOPY = ../generic/gemm_ncopy_8.c -DGEMMITCOPY = ../generic/gemm_tcopy_8.c -DGEMMONCOPY = ../generic/gemm_ncopy_8.c -DGEMMOTCOPY = ../generic/gemm_tcopy_8.c +DGEMMINCOPY = dgemm_ncopy_8_skylakex.c +DGEMMITCOPY = dgemm_tcopy_8_skylakex.c +DGEMMONCOPY = dgemm_ncopy_8_skylakex.c +DGEMMOTCOPY = dgemm_tcopy_8_skylakex.c SGEMM_BETA = ../generic/gemm_beta.c DGEMM_BETA = ../generic/gemm_beta.c diff --git a/kernel/x86_64/dgemm_ncopy_8_skylakex.c b/kernel/x86_64/dgemm_ncopy_8_skylakex.c new file mode 100644 index 000000000..3bc55b8cc --- /dev/null +++ b/kernel/x86_64/dgemm_ncopy_8_skylakex.c @@ -0,0 +1,422 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){ + BLASLONG i, j; + + FLOAT *aoffset; + FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + FLOAT *boffset; + FLOAT ctemp01, ctemp02, ctemp03, ctemp04; + FLOAT ctemp05, ctemp06, ctemp07, ctemp08; + FLOAT ctemp09, ctemp10, ctemp11, ctemp12; + FLOAT ctemp13, ctemp14, ctemp15, ctemp16; + FLOAT ctemp17, ctemp18, ctemp19, ctemp20; + FLOAT ctemp21, ctemp22, ctemp23, ctemp24; + FLOAT ctemp25, ctemp26, ctemp27, ctemp28; + FLOAT ctemp29, ctemp30, ctemp31, ctemp32; + FLOAT ctemp33, ctemp34, ctemp35, ctemp36; + FLOAT ctemp37, ctemp38, ctemp39, ctemp40; + FLOAT ctemp41, ctemp42, ctemp43, ctemp44; + FLOAT ctemp45, ctemp46, ctemp47, ctemp48; + FLOAT ctemp49, ctemp50, ctemp51, ctemp52; + FLOAT ctemp53, ctemp54, ctemp55, ctemp56; + FLOAT ctemp57, ctemp58, ctemp59, ctemp60; + FLOAT ctemp61, ctemp62, ctemp63, ctemp64; + + + aoffset = a; + boffset = b; + + j = (n >> 3); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + i = (m >> 3); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + + ctemp09 = *(aoffset2 + 0); + ctemp10 = *(aoffset2 + 1); + ctemp11 = *(aoffset2 + 2); + ctemp12 = *(aoffset2 + 3); + ctemp13 = *(aoffset2 + 4); + ctemp14 = *(aoffset2 + 5); + ctemp15 = *(aoffset2 + 6); + ctemp16 = *(aoffset2 + 7); + + ctemp17 = *(aoffset3 + 0); + ctemp18 = *(aoffset3 + 1); + ctemp19 = *(aoffset3 + 2); + ctemp20 = *(aoffset3 + 3); + ctemp21 = *(aoffset3 + 4); + ctemp22 = *(aoffset3 + 5); + ctemp23 = *(aoffset3 + 6); + ctemp24 = *(aoffset3 + 7); + + ctemp25 = *(aoffset4 + 0); + ctemp26 = *(aoffset4 + 1); + ctemp27 = *(aoffset4 + 2); + ctemp28 = *(aoffset4 + 3); + ctemp29 = *(aoffset4 + 4); + ctemp30 = *(aoffset4 + 5); + ctemp31 = *(aoffset4 + 6); + ctemp32 = *(aoffset4 + 7); + + ctemp33 = *(aoffset5 + 0); + ctemp34 = *(aoffset5 + 1); + ctemp35 = *(aoffset5 + 2); + ctemp36 = *(aoffset5 + 3); + ctemp37 = *(aoffset5 + 4); + ctemp38 = *(aoffset5 + 5); + ctemp39 = *(aoffset5 + 6); + ctemp40 = *(aoffset5 + 7); + + ctemp41 = *(aoffset6 + 0); + ctemp42 = *(aoffset6 + 1); + ctemp43 = *(aoffset6 + 2); + ctemp44 = *(aoffset6 + 3); + ctemp45 = *(aoffset6 + 4); + ctemp46 = *(aoffset6 + 5); + ctemp47 = *(aoffset6 + 6); + ctemp48 = *(aoffset6 + 7); + + ctemp49 = *(aoffset7 + 0); + ctemp50 = *(aoffset7 + 1); + ctemp51 = *(aoffset7 + 2); + ctemp52 = *(aoffset7 + 3); + ctemp53 = *(aoffset7 + 4); + ctemp54 = *(aoffset7 + 5); + ctemp55 = *(aoffset7 + 6); + ctemp56 = *(aoffset7 + 7); + + ctemp57 = *(aoffset8 + 0); + ctemp58 = *(aoffset8 + 1); + ctemp59 = *(aoffset8 + 2); + ctemp60 = *(aoffset8 + 3); + ctemp61 = *(aoffset8 + 4); + ctemp62 = *(aoffset8 + 5); + ctemp63 = *(aoffset8 + 6); + ctemp64 = *(aoffset8 + 7); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp09; + *(boffset + 2) = ctemp17; + *(boffset + 3) = ctemp25; + *(boffset + 4) = ctemp33; + *(boffset + 5) = ctemp41; + *(boffset + 6) = ctemp49; + *(boffset + 7) = ctemp57; + + *(boffset + 8) = ctemp02; + *(boffset + 9) = ctemp10; + *(boffset + 10) = ctemp18; + *(boffset + 11) = ctemp26; + *(boffset + 12) = ctemp34; + *(boffset + 13) = ctemp42; + *(boffset + 14) = ctemp50; + *(boffset + 15) = ctemp58; + + *(boffset + 16) = ctemp03; + *(boffset + 17) = ctemp11; + *(boffset + 18) = ctemp19; + *(boffset + 19) = ctemp27; + *(boffset + 20) = ctemp35; + *(boffset + 21) = ctemp43; + *(boffset + 22) = ctemp51; + *(boffset + 23) = ctemp59; + + *(boffset + 24) = ctemp04; + *(boffset + 25) = ctemp12; + *(boffset + 26) = ctemp20; + *(boffset + 27) = ctemp28; + *(boffset + 28) = ctemp36; + *(boffset + 29) = ctemp44; + *(boffset + 30) = ctemp52; + *(boffset + 31) = ctemp60; + + *(boffset + 32) = ctemp05; + *(boffset + 33) = ctemp13; + *(boffset + 34) = ctemp21; + *(boffset + 35) = ctemp29; + *(boffset + 36) = ctemp37; + *(boffset + 37) = ctemp45; + *(boffset + 38) = ctemp53; + *(boffset + 39) = ctemp61; + + *(boffset + 40) = ctemp06; + *(boffset + 41) = ctemp14; + *(boffset + 42) = ctemp22; + *(boffset + 43) = ctemp30; + *(boffset + 44) = ctemp38; + *(boffset + 45) = ctemp46; + *(boffset + 46) = ctemp54; + *(boffset + 47) = ctemp62; + + *(boffset + 48) = ctemp07; + *(boffset + 49) = ctemp15; + *(boffset + 50) = ctemp23; + *(boffset + 51) = ctemp31; + *(boffset + 52) = ctemp39; + *(boffset + 53) = ctemp47; + *(boffset + 54) = ctemp55; + *(boffset + 55) = ctemp63; + + *(boffset + 56) = ctemp08; + *(boffset + 57) = ctemp16; + *(boffset + 58) = ctemp24; + *(boffset + 59) = ctemp32; + *(boffset + 60) = ctemp40; + *(boffset + 61) = ctemp48; + *(boffset + 62) = ctemp56; + *(boffset + 63) = ctemp64; + + aoffset1 += 8; + aoffset2 += 8; + aoffset3 += 8; + aoffset4 += 8; + aoffset5 += 8; + aoffset6 += 8; + aoffset7 += 8; + aoffset8 += 8; + boffset += 64; + i --; + }while(i > 0); + } + + i = (m & 7); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp09 = *(aoffset2 + 0); + ctemp17 = *(aoffset3 + 0); + ctemp25 = *(aoffset4 + 0); + ctemp33 = *(aoffset5 + 0); + ctemp41 = *(aoffset6 + 0); + ctemp49 = *(aoffset7 + 0); + ctemp57 = *(aoffset8 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp09; + *(boffset + 2) = ctemp17; + *(boffset + 3) = ctemp25; + *(boffset + 4) = ctemp33; + *(boffset + 5) = ctemp41; + *(boffset + 6) = ctemp49; + *(boffset + 7) = ctemp57; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + aoffset5 ++; + aoffset6 ++; + aoffset7 ++; + aoffset8 ++; + + boffset += 8; + i --; + }while(i > 0); + } + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + i = (m >> 2); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + + ctemp09 = *(aoffset3 + 0); + ctemp10 = *(aoffset3 + 1); + ctemp11 = *(aoffset3 + 2); + ctemp12 = *(aoffset3 + 3); + + ctemp13 = *(aoffset4 + 0); + ctemp14 = *(aoffset4 + 1); + ctemp15 = *(aoffset4 + 2); + ctemp16 = *(aoffset4 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp05; + *(boffset + 2) = ctemp09; + *(boffset + 3) = ctemp13; + + *(boffset + 4) = ctemp02; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp10; + *(boffset + 7) = ctemp14; + + *(boffset + 8) = ctemp03; + *(boffset + 9) = ctemp07; + *(boffset + 10) = ctemp11; + *(boffset + 11) = ctemp15; + + *(boffset + 12) = ctemp04; + *(boffset + 13) = ctemp08; + *(boffset + 14) = ctemp12; + *(boffset + 15) = ctemp16; + + aoffset1 += 4; + aoffset2 += 4; + aoffset3 += 4; + aoffset4 += 4; + boffset += 16; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + ctemp03 = *(aoffset3 + 0); + ctemp04 = *(aoffset4 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + + boffset += 4; + i --; + }while(i > 0); + } + } /* end of if(j > 0) */ + + if (n & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp02; + *(boffset + 3) = ctemp04; + + aoffset1 += 2; + aoffset2 += 2; + boffset += 4; + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 ++; + aoffset2 ++; + boffset += 2; + } + } /* end of if(j > 0) */ + + if (n & 1){ + aoffset1 = aoffset; + + i = m; + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + + *(boffset + 0) = ctemp01; + + aoffset1 ++; + boffset ++; + i --; + }while(i > 0); + } + + } /* end of if(j > 0) */ + + return 0; +} diff --git a/kernel/x86_64/dgemm_tcopy_8_skylakex.c b/kernel/x86_64/dgemm_tcopy_8_skylakex.c new file mode 100644 index 000000000..472ad6349 --- /dev/null +++ b/kernel/x86_64/dgemm_tcopy_8_skylakex.c @@ -0,0 +1,417 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#include + +int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){ + + BLASLONG i, j; + + FLOAT *aoffset; + FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + FLOAT *boffset, *boffset1, *boffset2, *boffset3, *boffset4; + + FLOAT ctemp01, ctemp02, ctemp03, ctemp04; + FLOAT ctemp05, ctemp06, ctemp07, ctemp08; + + aoffset = a; + boffset = b; + +#if 0 + fprintf(stderr, "M = %d N = %d\n", m, n); +#endif + + boffset2 = b + m * (n & ~7); + boffset3 = b + m * (n & ~3); + boffset4 = b + m * (n & ~1); + + j = (m >> 3); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + boffset1 = boffset; + boffset += 64; + + i = (n >> 3); + if (i > 0){ + do{ + __m512d row1, row2, row3, row4, row5, row6, row7, row8; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + row2 = _mm512_loadu_pd(aoffset2); + aoffset2 += 8; + row3 = _mm512_loadu_pd(aoffset3); + aoffset3 += 8; + row4 = _mm512_loadu_pd(aoffset4); + aoffset4 += 8; + row5 = _mm512_loadu_pd(aoffset5); + aoffset5 += 8; + row6 = _mm512_loadu_pd(aoffset6); + aoffset6 += 8; + row7 = _mm512_loadu_pd(aoffset7); + aoffset7 += 8; + row8 = _mm512_loadu_pd(aoffset8); + aoffset8 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + _mm512_storeu_pd(boffset1 + 8, row2); + _mm512_storeu_pd(boffset1 + 16, row3); + _mm512_storeu_pd(boffset1 + 24, row4); + _mm512_storeu_pd(boffset1 + 32, row5); + _mm512_storeu_pd(boffset1 + 40, row6); + _mm512_storeu_pd(boffset1 + 48, row7); + _mm512_storeu_pd(boffset1 + 56, row8); + boffset1 += m * 8; + i --; + }while(i > 0); + } + + if (n & 4){ + __m256d row1, row2, row3, row4, row5, row6, row7, row8; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + row2 = _mm256_loadu_pd(aoffset2); + aoffset2 += 4; + row3 = _mm256_loadu_pd(aoffset3); + aoffset3 += 4; + row4 = _mm256_loadu_pd(aoffset4); + aoffset4 += 4; + row5 = _mm256_loadu_pd(aoffset5); + aoffset5 += 4; + row6 = _mm256_loadu_pd(aoffset6); + aoffset6 += 4; + row7 = _mm256_loadu_pd(aoffset7); + aoffset7 += 4; + row8 = _mm256_loadu_pd(aoffset8); + aoffset8 += 4; + + _mm256_storeu_pd(boffset2 + 0, row1); + _mm256_storeu_pd(boffset2 + 4, row2); + _mm256_storeu_pd(boffset2 + 8, row3); + _mm256_storeu_pd(boffset2 + 12, row4); + _mm256_storeu_pd(boffset2 + 16, row5); + _mm256_storeu_pd(boffset2 + 20, row6); + _mm256_storeu_pd(boffset2 + 24, row7); + _mm256_storeu_pd(boffset2 + 28, row8); + boffset2 += 32; + } + + if (n & 2){ + __m128d row1, row2, row3, row4, row5, row6, row7, row8; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + row2 = _mm_loadu_pd(aoffset2); + aoffset2 += 2; + + row3 = _mm_loadu_pd(aoffset3); + aoffset3 += 2; + + row4 = _mm_loadu_pd(aoffset4); + aoffset4 += 2; + + row5 = _mm_loadu_pd(aoffset5); + aoffset5 += 2; + + row6 = _mm_loadu_pd(aoffset6); + aoffset6 += 2; + + row7 = _mm_loadu_pd(aoffset7); + aoffset7 += 2; + + row8 = _mm_loadu_pd(aoffset8); + aoffset8 += 2; + + _mm_storeu_pd(boffset3 + 0, row1); + _mm_storeu_pd(boffset3 + 2, row2); + _mm_storeu_pd(boffset3 + 4, row3); + _mm_storeu_pd(boffset3 + 6, row4); + _mm_storeu_pd(boffset3 + 8, row5); + _mm_storeu_pd(boffset3 + 10, row6); + _mm_storeu_pd(boffset3 + 12, row7); + _mm_storeu_pd(boffset3 + 14, row8); + boffset3 += 16; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + ctemp03 = *(aoffset3 + 0); + aoffset3 ++; + ctemp04 = *(aoffset4 + 0); + aoffset4 ++; + ctemp05 = *(aoffset5 + 0); + aoffset5 ++; + ctemp06 = *(aoffset6 + 0); + aoffset6 ++; + ctemp07 = *(aoffset7 + 0); + aoffset7 ++; + ctemp08 = *(aoffset8 + 0); + aoffset8 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + *(boffset4 + 2) = ctemp03; + *(boffset4 + 3) = ctemp04; + *(boffset4 + 4) = ctemp05; + *(boffset4 + 5) = ctemp06; + *(boffset4 + 6) = ctemp07; + *(boffset4 + 7) = ctemp08; + boffset4 += 8; + } + + j--; + }while(j > 0); + } + + if (m & 4){ + + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + boffset1 = boffset; + boffset += 32; + + i = (n >> 3); + if (i > 0){ + + do{ + __m512d row1, row2, row3, row4; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + row2 = _mm512_loadu_pd(aoffset2); + aoffset2 += 8; + row3 = _mm512_loadu_pd(aoffset3); + aoffset3 += 8; + row4 = _mm512_loadu_pd(aoffset4); + aoffset4 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + _mm512_storeu_pd(boffset1 + 8, row2); + _mm512_storeu_pd(boffset1 + 16, row3); + _mm512_storeu_pd(boffset1 + 24, row4); + + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4) { + __m256d row1, row2, row3, row4; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + row2 = _mm256_loadu_pd(aoffset2); + aoffset2 += 4; + row3 = _mm256_loadu_pd(aoffset3); + aoffset3 += 4; + row4 = _mm256_loadu_pd(aoffset4); + aoffset4 += 4; + _mm256_storeu_pd(boffset2 + 0, row1); + _mm256_storeu_pd(boffset2 + 4, row2); + _mm256_storeu_pd(boffset2 + 8, row3); + _mm256_storeu_pd(boffset2 + 12, row4); + boffset2 += 16; + } + + if (n & 2){ + __m128d row1, row2, row3, row4; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + row2 = _mm_loadu_pd(aoffset2); + aoffset2 += 2; + + row3 = _mm_loadu_pd(aoffset3); + aoffset3 += 2; + + row4 = _mm_loadu_pd(aoffset4); + aoffset4 += 2; + + + _mm_storeu_pd(boffset3 + 0, row1); + _mm_storeu_pd(boffset3 + 2, row2); + _mm_storeu_pd(boffset3 + 4, row3); + _mm_storeu_pd(boffset3 + 6, row4); + boffset3 += 8; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + ctemp03 = *(aoffset3 + 0); + aoffset3 ++; + ctemp04 = *(aoffset4 + 0); + aoffset4 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + *(boffset4 + 2) = ctemp03; + *(boffset4 + 3) = ctemp04; + boffset4 += 4; + } + } + + if (m & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + boffset1 = boffset; + boffset += 16; + + i = (n >> 3); + if (i > 0){ + do{ + __m512d row1, row2; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + row2 = _mm512_loadu_pd(aoffset2); + aoffset2 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + _mm512_storeu_pd(boffset1 + 8, row2); + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4){ + __m256d row1, row2; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + row2 = _mm256_loadu_pd(aoffset2); + aoffset2 += 4; + _mm256_storeu_pd(boffset2 + 0, row1); + _mm256_storeu_pd(boffset2 + 4, row2); + boffset2 += 8; + } + + if (n & 2){ + __m128d row1, row2; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + row2 = _mm_loadu_pd(aoffset2); + aoffset2 += 2; + + + _mm_storeu_pd(boffset3 + 0, row1); + _mm_storeu_pd(boffset3 + 2, row2); + boffset3 += 4; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + boffset4 += 2; + } + } + + if (m & 1){ + aoffset1 = aoffset; + // aoffset += lda; + + boffset1 = boffset; + // boffset += 8; + + i = (n >> 3); + if (i > 0){ + do{ + __m512d row1; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4){ + __m256d row1; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + _mm256_storeu_pd(boffset2 + 0, row1); + // boffset2 += 4; + } + + if (n & 2){ + __m128d row1; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + _mm_storeu_pd(boffset3 + 0, row1); + + // boffset3 += 2; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + *(boffset4 + 0) = ctemp01; + boffset4 ++; + } + } + + return 0; +} From 6d43c51ccf7de3d0f41c2e2b382ada07159cf599 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Oct 2018 14:00:37 +0000 Subject: [PATCH 2/6] undo slow dgemm/skylake microoptimization the compare is more costly than the work --- kernel/x86_64/dgemm_kernel_4x8_skylakex.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/kernel/x86_64/dgemm_kernel_4x8_skylakex.c b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c index 293bd4a99..b5693ea2c 100644 --- a/kernel/x86_64/dgemm_kernel_4x8_skylakex.c +++ b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c @@ -647,11 +647,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SAVE2x2(ALPHA) \ - if (ALPHA != 1.0) { \ - xmm0 = _mm_set1_pd(ALPHA); \ - xmm4 *= xmm0; \ - xmm6 *= xmm0; \ - } \ + xmm0 = _mm_set1_pd(ALPHA); \ + xmm4 *= xmm0; \ + xmm6 *= xmm0; \ \ xmm4 += _mm_loadu_pd(CO1); \ xmm6 += _mm_loadu_pd(CO1 + ldc); \ From 20c5d668fe316d6f431a34f8734600194644e736 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Oct 2018 14:12:32 +0000 Subject: [PATCH 3/6] dgemm/avx512 simplify and speed up the 4x4 kernel --- kernel/x86_64/dgemm_kernel_4x8_skylakex.c | 26 ++++------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/kernel/x86_64/dgemm_kernel_4x8_skylakex.c b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c index b5693ea2c..bb121ca69 100644 --- a/kernel/x86_64/dgemm_kernel_4x8_skylakex.c +++ b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c @@ -333,17 +333,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define KERNEL4x4_SUB() \ ymm0 = _mm256_loadu_pd(AO - 16); \ - ymm1 = _mm256_loadu_pd(BO - 12); \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 12)); \ \ ymm4 += ymm0 * ymm1; \ \ - ymm0 = _mm256_permute4x64_pd(ymm0, 0xb1); \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 11)); \ ymm5 += ymm0 * ymm1; \ \ - ymm0 = _mm256_permute4x64_pd(ymm0, 0x1b); \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 10)); \ ymm6 += ymm0 * ymm1; \ \ - ymm0 = _mm256_permute4x64_pd(ymm0, 0xb1); \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 9)); \ ymm7 += ymm0 * ymm1; \ AO += 4; \ BO += 4; @@ -356,24 +356,6 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ymm6 *= ymm0; \ ymm7 *= ymm0; \ \ - ymm5 = _mm256_permute4x64_pd(ymm5, 0xb1); \ - ymm7 = _mm256_permute4x64_pd(ymm7, 0xb1); \ - \ - ymm0 = _mm256_blend_pd(ymm4, ymm5, 0x0a); \ - ymm1 = _mm256_blend_pd(ymm4, ymm5, 0x05); \ - ymm2 = _mm256_blend_pd(ymm6, ymm7, 0x0a); \ - ymm3 = _mm256_blend_pd(ymm6, ymm7, 0x05); \ - \ - ymm2 = _mm256_permute4x64_pd(ymm2, 0x1b); \ - ymm3 = _mm256_permute4x64_pd(ymm3, 0x1b); \ - ymm2 = _mm256_permute4x64_pd(ymm2, 0xb1); \ - ymm3 = _mm256_permute4x64_pd(ymm3, 0xb1); \ - \ - ymm4 = _mm256_blend_pd(ymm2, ymm0, 0x03); \ - ymm5 = _mm256_blend_pd(ymm3, ymm1, 0x03); \ - ymm6 = _mm256_blend_pd(ymm0, ymm2, 0x03); \ - ymm7 = _mm256_blend_pd(ymm1, ymm3, 0x03); \ - \ ymm4 += _mm256_loadu_pd(CO1 + (0 * ldc)); \ ymm5 += _mm256_loadu_pd(CO1 + (1 * ldc)); \ ymm6 += _mm256_loadu_pd(CO1 + (2 * ldc)); \ From 32bec8afbbdb94df4e5a4b127fa8aa5857fccc54 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Oct 2018 16:36:26 +0000 Subject: [PATCH 4/6] add a skylakex optimized dgemm beta function --- kernel/x86_64/KERNEL.SKYLAKEX | 2 +- kernel/x86_64/dgemm_beta_skylakex.c | 150 ++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 kernel/x86_64/dgemm_beta_skylakex.c diff --git a/kernel/x86_64/KERNEL.SKYLAKEX b/kernel/x86_64/KERNEL.SKYLAKEX index e34cda770..48c81e80b 100644 --- a/kernel/x86_64/KERNEL.SKYLAKEX +++ b/kernel/x86_64/KERNEL.SKYLAKEX @@ -10,4 +10,4 @@ DGEMMONCOPY = dgemm_ncopy_8_skylakex.c DGEMMOTCOPY = dgemm_tcopy_8_skylakex.c SGEMM_BETA = ../generic/gemm_beta.c -DGEMM_BETA = ../generic/gemm_beta.c +DGEMM_BETA = dgemm_beta_skylakex.c diff --git a/kernel/x86_64/dgemm_beta_skylakex.c b/kernel/x86_64/dgemm_beta_skylakex.c new file mode 100644 index 000000000..384e9f60b --- /dev/null +++ b/kernel/x86_64/dgemm_beta_skylakex.c @@ -0,0 +1,150 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +#include + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta, + FLOAT *dummy2, BLASLONG dummy3, FLOAT *dummy4, BLASLONG dummy5, + FLOAT *c, BLASLONG ldc){ + + BLASLONG i, j; + FLOAT *c_offset1, *c_offset; + FLOAT ctemp1, ctemp2, ctemp3, ctemp4; + FLOAT ctemp5, ctemp6, ctemp7, ctemp8; + + /* fast path.. just zero the whole matrix */ + if (m == ldc && (unsigned long)beta == (unsigned long)ZERO) { + memset(c, 0, m * n * sizeof(FLOAT)); + return 0; + } + + + c_offset = c; + + if (beta == ZERO){ + __m512d z_zero; + + z_zero = _mm512_setzero_pd(); + j = n; + do { + c_offset1 = c_offset; + c_offset += ldc; + + i = m; + + while (i > 32) { + _mm512_storeu_pd(c_offset1, z_zero); + _mm512_storeu_pd(c_offset1 + 8, z_zero); + _mm512_storeu_pd(c_offset1 + 16, z_zero); + _mm512_storeu_pd(c_offset1 + 24 , z_zero); + c_offset1 += 32; + i -= 32; + } + while (i > 8) { + _mm512_storeu_pd(c_offset1, z_zero); + c_offset1 += 8; + i -= 8; + } + + while (i > 0) { + *c_offset1 = ZERO; + c_offset1 ++; + i --; + } + j --; + } while (j > 0); + + } else { + + j = n; + do { + c_offset1 = c_offset; + c_offset += ldc; + + i = (m >> 3); + if (i > 0){ + do { + ctemp1 = *(c_offset1 + 0); + ctemp2 = *(c_offset1 + 1); + ctemp3 = *(c_offset1 + 2); + ctemp4 = *(c_offset1 + 3); + ctemp5 = *(c_offset1 + 4); + ctemp6 = *(c_offset1 + 5); + ctemp7 = *(c_offset1 + 6); + ctemp8 = *(c_offset1 + 7); + + ctemp1 *= beta; + ctemp2 *= beta; + ctemp3 *= beta; + ctemp4 *= beta; + ctemp5 *= beta; + ctemp6 *= beta; + ctemp7 *= beta; + ctemp8 *= beta; + + *(c_offset1 + 0) = ctemp1; + *(c_offset1 + 1) = ctemp2; + *(c_offset1 + 2) = ctemp3; + *(c_offset1 + 3) = ctemp4; + *(c_offset1 + 4) = ctemp5; + *(c_offset1 + 5) = ctemp6; + *(c_offset1 + 6) = ctemp7; + *(c_offset1 + 7) = ctemp8; + c_offset1 += 8; + i --; + } while (i > 0); + } + + i = (m & 7); + if (i > 0){ + do { + ctemp1 = *c_offset1; + ctemp1 *= beta; + *c_offset1 = ctemp1; + c_offset1 ++; + i --; + } while (i > 0); + } + j --; + } while (j > 0); + + } + return 0; +}; From adbf6afa25ca5383d48df296262bb4f2bfc0e311 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Oct 2018 21:18:12 +0000 Subject: [PATCH 5/6] Add vector optimizations for ncopy as well for dgemm/skylakex --- kernel/x86_64/dgemm_ncopy_8_skylakex.c | 201 ++++++++++++------------- 1 file changed, 100 insertions(+), 101 deletions(-) diff --git a/kernel/x86_64/dgemm_ncopy_8_skylakex.c b/kernel/x86_64/dgemm_ncopy_8_skylakex.c index 3bc55b8cc..74b336f3d 100644 --- a/kernel/x86_64/dgemm_ncopy_8_skylakex.c +++ b/kernel/x86_64/dgemm_ncopy_8_skylakex.c @@ -38,6 +38,7 @@ #include #include "common.h" +#include int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){ BLASLONG i, j; @@ -84,131 +85,129 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __ i = (m >> 3); if (i > 0){ do{ - ctemp01 = *(aoffset1 + 0); - ctemp02 = *(aoffset1 + 1); - ctemp03 = *(aoffset1 + 2); - ctemp04 = *(aoffset1 + 3); - ctemp05 = *(aoffset1 + 4); - ctemp06 = *(aoffset1 + 5); + __m128d xmm0, xmm1; + xmm0 = _mm_load_pd1(aoffset2 + 0); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 0); + _mm_storeu_pd(boffset + 0, xmm0); + ctemp07 = *(aoffset1 + 6); ctemp08 = *(aoffset1 + 7); - ctemp09 = *(aoffset2 + 0); - ctemp10 = *(aoffset2 + 1); - ctemp11 = *(aoffset2 + 2); - ctemp12 = *(aoffset2 + 3); - ctemp13 = *(aoffset2 + 4); - ctemp14 = *(aoffset2 + 5); + xmm1 = _mm_load_pd1(aoffset4 + 0); + xmm1 = _mm_loadl_pd(xmm1, aoffset3 + 0); + _mm_storeu_pd(boffset + 2, xmm1); + + xmm0 = _mm_load_pd1(aoffset6 + 0); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 0); + _mm_storeu_pd(boffset + 4, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 0); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 0); + _mm_storeu_pd(boffset + 6, xmm0); + ctemp15 = *(aoffset2 + 6); ctemp16 = *(aoffset2 + 7); - ctemp17 = *(aoffset3 + 0); - ctemp18 = *(aoffset3 + 1); - ctemp19 = *(aoffset3 + 2); - ctemp20 = *(aoffset3 + 3); - ctemp21 = *(aoffset3 + 4); - ctemp22 = *(aoffset3 + 5); + xmm0 = _mm_load_pd1(aoffset2 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 1); + _mm_storeu_pd(boffset + 8, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 1); + _mm_storeu_pd(boffset + 10, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 1); + _mm_storeu_pd(boffset + 12, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 1); + _mm_storeu_pd(boffset + 14, xmm0); + + xmm0 = _mm_load_pd1(aoffset2 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 2); + _mm_storeu_pd(boffset + 16, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 2); + _mm_storeu_pd(boffset + 18, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 2); + _mm_storeu_pd(boffset + 20, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 2); + _mm_storeu_pd(boffset + 22, xmm0); + ctemp23 = *(aoffset3 + 6); ctemp24 = *(aoffset3 + 7); - ctemp25 = *(aoffset4 + 0); - ctemp26 = *(aoffset4 + 1); - ctemp27 = *(aoffset4 + 2); - ctemp28 = *(aoffset4 + 3); - ctemp29 = *(aoffset4 + 4); - ctemp30 = *(aoffset4 + 5); + xmm0 = _mm_load_pd1(aoffset2 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 3); + _mm_storeu_pd(boffset + 24, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 3); + _mm_storeu_pd(boffset + 26, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 3); + _mm_storeu_pd(boffset + 28, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 3); + _mm_storeu_pd(boffset + 30, xmm0); + ctemp31 = *(aoffset4 + 6); ctemp32 = *(aoffset4 + 7); - ctemp33 = *(aoffset5 + 0); - ctemp34 = *(aoffset5 + 1); - ctemp35 = *(aoffset5 + 2); - ctemp36 = *(aoffset5 + 3); - ctemp37 = *(aoffset5 + 4); - ctemp38 = *(aoffset5 + 5); + + xmm0 = _mm_load_pd1(aoffset2 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 4); + _mm_storeu_pd(boffset + 32, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 4); + _mm_storeu_pd(boffset + 34, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 4); + _mm_storeu_pd(boffset + 36, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 4); + _mm_storeu_pd(boffset + 38, xmm0); + ctemp39 = *(aoffset5 + 6); ctemp40 = *(aoffset5 + 7); - ctemp41 = *(aoffset6 + 0); - ctemp42 = *(aoffset6 + 1); - ctemp43 = *(aoffset6 + 2); - ctemp44 = *(aoffset6 + 3); - ctemp45 = *(aoffset6 + 4); - ctemp46 = *(aoffset6 + 5); + xmm0 = _mm_load_pd1(aoffset2 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 5); + _mm_storeu_pd(boffset + 40, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 5); + _mm_storeu_pd(boffset + 42, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 5); + _mm_storeu_pd(boffset + 44, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 5); + _mm_storeu_pd(boffset + 46, xmm0); + + ctemp47 = *(aoffset6 + 6); ctemp48 = *(aoffset6 + 7); - ctemp49 = *(aoffset7 + 0); - ctemp50 = *(aoffset7 + 1); - ctemp51 = *(aoffset7 + 2); - ctemp52 = *(aoffset7 + 3); - ctemp53 = *(aoffset7 + 4); - ctemp54 = *(aoffset7 + 5); ctemp55 = *(aoffset7 + 6); ctemp56 = *(aoffset7 + 7); - ctemp57 = *(aoffset8 + 0); - ctemp58 = *(aoffset8 + 1); - ctemp59 = *(aoffset8 + 2); - ctemp60 = *(aoffset8 + 3); - ctemp61 = *(aoffset8 + 4); - ctemp62 = *(aoffset8 + 5); ctemp63 = *(aoffset8 + 6); ctemp64 = *(aoffset8 + 7); - *(boffset + 0) = ctemp01; - *(boffset + 1) = ctemp09; - *(boffset + 2) = ctemp17; - *(boffset + 3) = ctemp25; - *(boffset + 4) = ctemp33; - *(boffset + 5) = ctemp41; - *(boffset + 6) = ctemp49; - *(boffset + 7) = ctemp57; - - *(boffset + 8) = ctemp02; - *(boffset + 9) = ctemp10; - *(boffset + 10) = ctemp18; - *(boffset + 11) = ctemp26; - *(boffset + 12) = ctemp34; - *(boffset + 13) = ctemp42; - *(boffset + 14) = ctemp50; - *(boffset + 15) = ctemp58; - - *(boffset + 16) = ctemp03; - *(boffset + 17) = ctemp11; - *(boffset + 18) = ctemp19; - *(boffset + 19) = ctemp27; - *(boffset + 20) = ctemp35; - *(boffset + 21) = ctemp43; - *(boffset + 22) = ctemp51; - *(boffset + 23) = ctemp59; - - *(boffset + 24) = ctemp04; - *(boffset + 25) = ctemp12; - *(boffset + 26) = ctemp20; - *(boffset + 27) = ctemp28; - *(boffset + 28) = ctemp36; - *(boffset + 29) = ctemp44; - *(boffset + 30) = ctemp52; - *(boffset + 31) = ctemp60; - - *(boffset + 32) = ctemp05; - *(boffset + 33) = ctemp13; - *(boffset + 34) = ctemp21; - *(boffset + 35) = ctemp29; - *(boffset + 36) = ctemp37; - *(boffset + 37) = ctemp45; - *(boffset + 38) = ctemp53; - *(boffset + 39) = ctemp61; - - *(boffset + 40) = ctemp06; - *(boffset + 41) = ctemp14; - *(boffset + 42) = ctemp22; - *(boffset + 43) = ctemp30; - *(boffset + 44) = ctemp38; - *(boffset + 45) = ctemp46; - *(boffset + 46) = ctemp54; - *(boffset + 47) = ctemp62; *(boffset + 48) = ctemp07; *(boffset + 49) = ctemp15; From 582c589727302938e99bf594bf072d3d9913575e Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 6 Oct 2018 23:13:26 +0000 Subject: [PATCH 6/6] dgemm/skylakex: replace discrete mul/add with fma very minor gains since it's not super hot code, but general principles --- kernel/x86_64/dgemm_kernel_4x8_skylakex.c | 155 +++++++--------------- 1 file changed, 49 insertions(+), 106 deletions(-) diff --git a/kernel/x86_64/dgemm_kernel_4x8_skylakex.c b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c index bb121ca69..a83ca98fa 100644 --- a/kernel/x86_64/dgemm_kernel_4x8_skylakex.c +++ b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c @@ -927,39 +927,15 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, "jg .label24\n" /* multiply the result by alpha */ "vbroadcastsd (%[alpha]), %%zmm9\n" - "vmulpd %%zmm9, %%zmm1, %%zmm1\n" - "vmulpd %%zmm9, %%zmm2, %%zmm2\n" - "vmulpd %%zmm9, %%zmm3, %%zmm3\n" - "vmulpd %%zmm9, %%zmm4, %%zmm4\n" - "vmulpd %%zmm9, %%zmm5, %%zmm5\n" - "vmulpd %%zmm9, %%zmm6, %%zmm6\n" - "vmulpd %%zmm9, %%zmm7, %%zmm7\n" - "vmulpd %%zmm9, %%zmm8, %%zmm8\n" - "vmulpd %%zmm9, %%zmm11, %%zmm11\n" - "vmulpd %%zmm9, %%zmm12, %%zmm12\n" - "vmulpd %%zmm9, %%zmm13, %%zmm13\n" - "vmulpd %%zmm9, %%zmm14, %%zmm14\n" - "vmulpd %%zmm9, %%zmm15, %%zmm15\n" - "vmulpd %%zmm9, %%zmm16, %%zmm16\n" - "vmulpd %%zmm9, %%zmm17, %%zmm17\n" - "vmulpd %%zmm9, %%zmm18, %%zmm18\n" - "vmulpd %%zmm9, %%zmm21, %%zmm21\n" - "vmulpd %%zmm9, %%zmm22, %%zmm22\n" - "vmulpd %%zmm9, %%zmm23, %%zmm23\n" - "vmulpd %%zmm9, %%zmm24, %%zmm24\n" - "vmulpd %%zmm9, %%zmm25, %%zmm25\n" - "vmulpd %%zmm9, %%zmm26, %%zmm26\n" - "vmulpd %%zmm9, %%zmm27, %%zmm27\n" - "vmulpd %%zmm9, %%zmm28, %%zmm28\n" /* And store additively in C */ - "vaddpd (%[C0]), %%zmm1, %%zmm1\n" - "vaddpd (%[C1]), %%zmm2, %%zmm2\n" - "vaddpd (%[C2]), %%zmm3, %%zmm3\n" - "vaddpd (%[C3]), %%zmm4, %%zmm4\n" - "vaddpd (%[C4]), %%zmm5, %%zmm5\n" - "vaddpd (%[C5]), %%zmm6, %%zmm6\n" - "vaddpd (%[C6]), %%zmm7, %%zmm7\n" - "vaddpd (%[C7]), %%zmm8, %%zmm8\n" + "vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n" + "vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n" + "vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n" + "vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n" + "vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n" + "vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n" + "vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n" + "vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n" "vmovupd %%zmm1, (%[C0])\n" "vmovupd %%zmm2, (%[C1])\n" "vmovupd %%zmm3, (%[C2])\n" @@ -969,14 +945,14 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, "vmovupd %%zmm7, (%[C6])\n" "vmovupd %%zmm8, (%[C7])\n" - "vaddpd 64(%[C0]), %%zmm11, %%zmm11\n" - "vaddpd 64(%[C1]), %%zmm12, %%zmm12\n" - "vaddpd 64(%[C2]), %%zmm13, %%zmm13\n" - "vaddpd 64(%[C3]), %%zmm14, %%zmm14\n" - "vaddpd 64(%[C4]), %%zmm15, %%zmm15\n" - "vaddpd 64(%[C5]), %%zmm16, %%zmm16\n" - "vaddpd 64(%[C6]), %%zmm17, %%zmm17\n" - "vaddpd 64(%[C7]), %%zmm18, %%zmm18\n" + "vfmadd213pd 64(%[C0]), %%zmm9, %%zmm11\n" + "vfmadd213pd 64(%[C1]), %%zmm9, %%zmm12\n" + "vfmadd213pd 64(%[C2]), %%zmm9, %%zmm13\n" + "vfmadd213pd 64(%[C3]), %%zmm9, %%zmm14\n" + "vfmadd213pd 64(%[C4]), %%zmm9, %%zmm15\n" + "vfmadd213pd 64(%[C5]), %%zmm9, %%zmm16\n" + "vfmadd213pd 64(%[C6]), %%zmm9, %%zmm17\n" + "vfmadd213pd 64(%[C7]), %%zmm9, %%zmm18\n" "vmovupd %%zmm11, 64(%[C0])\n" "vmovupd %%zmm12, 64(%[C1])\n" "vmovupd %%zmm13, 64(%[C2])\n" @@ -986,14 +962,14 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, "vmovupd %%zmm17, 64(%[C6])\n" "vmovupd %%zmm18, 64(%[C7])\n" - "vaddpd 128(%[C0]), %%zmm21, %%zmm21\n" - "vaddpd 128(%[C1]), %%zmm22, %%zmm22\n" - "vaddpd 128(%[C2]), %%zmm23, %%zmm23\n" - "vaddpd 128(%[C3]), %%zmm24, %%zmm24\n" - "vaddpd 128(%[C4]), %%zmm25, %%zmm25\n" - "vaddpd 128(%[C5]), %%zmm26, %%zmm26\n" - "vaddpd 128(%[C6]), %%zmm27, %%zmm27\n" - "vaddpd 128(%[C7]), %%zmm28, %%zmm28\n" + "vfmadd213pd 128(%[C0]), %%zmm9, %%zmm21\n" + "vfmadd213pd 128(%[C1]), %%zmm9, %%zmm22\n" + "vfmadd213pd 128(%[C2]), %%zmm9, %%zmm23\n" + "vfmadd213pd 128(%[C3]), %%zmm9, %%zmm24\n" + "vfmadd213pd 128(%[C4]), %%zmm9, %%zmm25\n" + "vfmadd213pd 128(%[C5]), %%zmm9, %%zmm26\n" + "vfmadd213pd 128(%[C6]), %%zmm9, %%zmm27\n" + "vfmadd213pd 128(%[C7]), %%zmm9, %%zmm28\n" "vmovupd %%zmm21, 128(%[C0])\n" "vmovupd %%zmm22, 128(%[C1])\n" "vmovupd %%zmm23, 128(%[C2])\n" @@ -1108,31 +1084,15 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, "jg .label16\n" /* multiply the result by alpha */ "vbroadcastsd (%[alpha]), %%zmm9\n" - "vmulpd %%zmm9, %%zmm1, %%zmm1\n" - "vmulpd %%zmm9, %%zmm2, %%zmm2\n" - "vmulpd %%zmm9, %%zmm3, %%zmm3\n" - "vmulpd %%zmm9, %%zmm4, %%zmm4\n" - "vmulpd %%zmm9, %%zmm5, %%zmm5\n" - "vmulpd %%zmm9, %%zmm6, %%zmm6\n" - "vmulpd %%zmm9, %%zmm7, %%zmm7\n" - "vmulpd %%zmm9, %%zmm8, %%zmm8\n" - "vmulpd %%zmm9, %%zmm11, %%zmm11\n" - "vmulpd %%zmm9, %%zmm12, %%zmm12\n" - "vmulpd %%zmm9, %%zmm13, %%zmm13\n" - "vmulpd %%zmm9, %%zmm14, %%zmm14\n" - "vmulpd %%zmm9, %%zmm15, %%zmm15\n" - "vmulpd %%zmm9, %%zmm16, %%zmm16\n" - "vmulpd %%zmm9, %%zmm17, %%zmm17\n" - "vmulpd %%zmm9, %%zmm18, %%zmm18\n" /* And store additively in C */ - "vaddpd (%[C0]), %%zmm1, %%zmm1\n" - "vaddpd (%[C1]), %%zmm2, %%zmm2\n" - "vaddpd (%[C2]), %%zmm3, %%zmm3\n" - "vaddpd (%[C3]), %%zmm4, %%zmm4\n" - "vaddpd (%[C4]), %%zmm5, %%zmm5\n" - "vaddpd (%[C5]), %%zmm6, %%zmm6\n" - "vaddpd (%[C6]), %%zmm7, %%zmm7\n" - "vaddpd (%[C7]), %%zmm8, %%zmm8\n" + "vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n" + "vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n" + "vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n" + "vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n" + "vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n" + "vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n" + "vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n" + "vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n" "vmovupd %%zmm1, (%[C0])\n" "vmovupd %%zmm2, (%[C1])\n" "vmovupd %%zmm3, (%[C2])\n" @@ -1142,14 +1102,14 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, "vmovupd %%zmm7, (%[C6])\n" "vmovupd %%zmm8, (%[C7])\n" - "vaddpd 64(%[C0]), %%zmm11, %%zmm11\n" - "vaddpd 64(%[C1]), %%zmm12, %%zmm12\n" - "vaddpd 64(%[C2]), %%zmm13, %%zmm13\n" - "vaddpd 64(%[C3]), %%zmm14, %%zmm14\n" - "vaddpd 64(%[C4]), %%zmm15, %%zmm15\n" - "vaddpd 64(%[C5]), %%zmm16, %%zmm16\n" - "vaddpd 64(%[C6]), %%zmm17, %%zmm17\n" - "vaddpd 64(%[C7]), %%zmm18, %%zmm18\n" + "vfmadd213pd 64(%[C0]), %%zmm9, %%zmm11\n" + "vfmadd213pd 64(%[C1]), %%zmm9, %%zmm12\n" + "vfmadd213pd 64(%[C2]), %%zmm9, %%zmm13\n" + "vfmadd213pd 64(%[C3]), %%zmm9, %%zmm14\n" + "vfmadd213pd 64(%[C4]), %%zmm9, %%zmm15\n" + "vfmadd213pd 64(%[C5]), %%zmm9, %%zmm16\n" + "vfmadd213pd 64(%[C6]), %%zmm9, %%zmm17\n" + "vfmadd213pd 64(%[C7]), %%zmm9, %%zmm18\n" "vmovupd %%zmm11, 64(%[C0])\n" "vmovupd %%zmm12, 64(%[C1])\n" "vmovupd %%zmm13, 64(%[C2])\n" @@ -1221,24 +1181,15 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, "add $64, %[BO]\n" "subl $1, %[kloop]\n" "jg .label1\n" - /* multiply the result by alpha */ - "vmulpd %%zmm9, %%zmm1, %%zmm1\n" - "vmulpd %%zmm9, %%zmm2, %%zmm2\n" - "vmulpd %%zmm9, %%zmm3, %%zmm3\n" - "vmulpd %%zmm9, %%zmm4, %%zmm4\n" - "vmulpd %%zmm9, %%zmm5, %%zmm5\n" - "vmulpd %%zmm9, %%zmm6, %%zmm6\n" - "vmulpd %%zmm9, %%zmm7, %%zmm7\n" - "vmulpd %%zmm9, %%zmm8, %%zmm8\n" - /* And store additively in C */ - "vaddpd (%[C0]), %%zmm1, %%zmm1\n" - "vaddpd (%[C1]), %%zmm2, %%zmm2\n" - "vaddpd (%[C2]), %%zmm3, %%zmm3\n" - "vaddpd (%[C3]), %%zmm4, %%zmm4\n" - "vaddpd (%[C4]), %%zmm5, %%zmm5\n" - "vaddpd (%[C5]), %%zmm6, %%zmm6\n" - "vaddpd (%[C6]), %%zmm7, %%zmm7\n" - "vaddpd (%[C7]), %%zmm8, %%zmm8\n" + /* multiply the result by alpha and add to the memory */ + "vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n" + "vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n" + "vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n" + "vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n" + "vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n" + "vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n" + "vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n" + "vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n" "vmovupd %%zmm1, (%[C0])\n" "vmovupd %%zmm2, (%[C1])\n" "vmovupd %%zmm3, (%[C2])\n" @@ -1247,14 +1198,6 @@ CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, "vmovupd %%zmm6, (%[C5])\n" "vmovupd %%zmm7, (%[C6])\n" "vmovupd %%zmm8, (%[C7])\n" - "prefetchw 64(%[C0])\n" - "prefetchw 64(%[C1])\n" - "prefetchw 64(%[C2])\n" - "prefetchw 64(%[C3])\n" - "prefetchw 64(%[C4])\n" - "prefetchw 64(%[C5])\n" - "prefetchw 64(%[C6])\n" - "prefetchw 64(%[C7])\n" : [AO] "+r" (AO), [BO] "+r" (BO),