diff --git a/.travis.yml b/.travis.yml index 0b280c2fc..51679af62 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,10 +4,10 @@ dist: precise sudo: true language: c -jobs: +matrix: include: - &test-ubuntu - stage: test + os: linux compiler: gcc addons: apt: @@ -57,7 +57,7 @@ jobs: - TARGET_BOX=LINUX32 - BTYPE="BINARY=32" - - stage: test + - os: linux compiler: gcc addons: apt: @@ -77,13 +77,13 @@ jobs: # which is slower than container-based infrastructure used for jobs # that don't require sudo. - &test-alpine - stage: test + os: linux dist: trusty sudo: true language: minimal before_install: - - "wget 'https://raw.githubusercontent.com/alpinelinux/alpine-chroot-install/v0.6.0/alpine-chroot-install' \ - && echo 'a827a4ba3d0817e7c88bae17fe34e50204983d1e alpine-chroot-install' | sha1sum -c || exit 1" + - "wget 'https://raw.githubusercontent.com/alpinelinux/alpine-chroot-install/v0.9.0/alpine-chroot-install' \ + && echo 'e5dfbbdc0c4b3363b99334510976c86bfa6cb251 alpine-chroot-install' | sha1sum -c || exit 1" - alpine() { /alpine/enter-chroot -u "$USER" "$@"; } install: - sudo sh alpine-chroot-install -p 'build-base gfortran perl linux-headers' @@ -117,10 +117,10 @@ jobs: - <<: *test-alpine env: - TARGET_BOX=LINUX64_MUSL - - BTYPE="BINARY=64 NO_AFFINITY=1 USE_OPENMP=0 NO_LAPACK=0 TARGET=core2" + - BTYPE="BINARY=64 NO_AFFINITY=1 USE_OPENMP=0 NO_LAPACK=0 TARGET=CORE2" - &test-cmake - stage: test + os: linux compiler: clang addons: apt: @@ -147,6 +147,58 @@ jobs: env: - CMAKE=1 + - &test-macos + os: osx + osx_image: xcode8.3 + before_script: + - COMMON_FLAGS="DYNAMIC_ARCH=1 TARGET=NEHALEM NUM_THREADS=32" + - brew update + - brew install gcc # for gfortran + script: + - travis_wait 45 make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE + env: + - BTYPE="BINARY=64 INTERFACE64=1" + + - <<: *test-macos + env: + - BTYPE="BINARY=32" + + - &emulated-arm + dist: trusty + sudo: required + services: docker + env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=gcc + name: "Emulated Build for ARMV6 with gcc" + before_install: sudo docker run --rm --privileged multiarch/qemu-user-static:register --reset + script: | + echo "FROM openblas/alpine:${IMAGE_ARCH} + COPY . /tmp/openblas + RUN mkdir /tmp/openblas/build && \ + cd /tmp/openblas/build && \ + CC=${COMPILER} cmake -D DYNAMIC_ARCH=OFF \ + -D TARGET=${TARGET_ARCH} \ + -D BUILD_SHARED_LIBS=ON \ + -D BUILD_WITHOUT_LAPACK=ON \ + -D BUILD_WITHOUT_CBLAS=ON \ + -D CMAKE_BUILD_TYPE=Release ../ && \ + cmake --build ." > Dockerfile + docker build . + - <<: *emulated-arm + env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=clang + name: "Emulated Build for ARMV6 with clang" + - <<: *emulated-arm + env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=gcc + name: "Emulated Build for ARMV8 with gcc" + - <<: *emulated-arm + env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=clang + name: "Emulated Build for ARMV8 with clang" + + allow_failures: + - env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=gcc + - env: IMAGE_ARCH=arm32 TARGET_ARCH=ARMV6 COMPILER=clang + - env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=gcc + - env: IMAGE_ARCH=arm64 TARGET_ARCH=ARMV8 COMPILER=clang + # whitelist branches: only: diff --git a/CMakeLists.txt b/CMakeLists.txt index b5789119a..812e6bf6f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,21 +6,30 @@ cmake_minimum_required(VERSION 2.8.5) project(OpenBLAS C ASM) set(OpenBLAS_MAJOR_VERSION 0) set(OpenBLAS_MINOR_VERSION 3) -set(OpenBLAS_PATCH_VERSION 0.dev) +set(OpenBLAS_PATCH_VERSION 6.dev) set(OpenBLAS_VERSION "${OpenBLAS_MAJOR_VERSION}.${OpenBLAS_MINOR_VERSION}.${OpenBLAS_PATCH_VERSION}") # Adhere to GNU filesystem layout conventions include(GNUInstallDirs) -set(OpenBLAS_LIBNAME openblas) +include(CMakePackageConfigHelpers) + ####### if(MSVC) -option(BUILD_WITHOUT_LAPACK "Without LAPACK and LAPACKE (Only BLAS or CBLAS)" ON) +option(BUILD_WITHOUT_LAPACK "Do not build LAPACK and LAPACKE (Only BLAS or CBLAS)" ON) endif() -option(BUILD_WITHOUT_CBLAS "Without CBLAS" OFF) -option(DYNAMIC_ARCH "Build with DYNAMIC_ARCH" OFF) -option(BUILD_RELAPACK "Build with ReLAPACK (recursive LAPACK" OFF) +option(BUILD_WITHOUT_CBLAS "Do not build the C interface (CBLAS) to the BLAS functions" OFF) +option(DYNAMIC_ARCH "Include support for multiple CPU targets, with automatic selection at runtime (x86/x86_64 only)" OFF) +option(DYNAMIC_OLDER "Include specific support for older cpu models (Penryn,Dunnington,Atom,Nano,Opteron) with DYNAMIC_ARCH" OFF) +option(BUILD_RELAPACK "Build with ReLAPACK (recursive implementation of several LAPACK functions on top of standard LAPACK)" OFF) + +# Add a prefix or suffix to all exported symbol names in the shared library. +# Avoids conflicts with other BLAS libraries, especially when using +# 64 bit integer interfaces in OpenBLAS. + +set(SYMBOLPREFIX "" CACHE STRING "Add a prefix to all exported symbol names in the shared library to avoid conflicts with other BLAS libraries" ) +set(SYMBOLSUFFIX "" CACHE STRING "Add a suffix to all exported symbol names in the shared library, e.g. _64 for INTERFACE64 builds" ) ####### if(BUILD_WITHOUT_LAPACK) set(NO_LAPACK 1) @@ -34,11 +43,13 @@ endif() ####### -message(WARNING "CMake support is experimental. This will not produce the same Makefiles that OpenBLAS ships with. Only x86 support is currently available.") +message(WARNING "CMake support is experimental. It does not yet support all build options and may not produce the same Makefiles that OpenBLAS ships with.") include("${PROJECT_SOURCE_DIR}/cmake/utils.cmake") include("${PROJECT_SOURCE_DIR}/cmake/system.cmake") +set(OpenBLAS_LIBNAME openblas${SUFFIX64_UNDERSCORE}) + set(BLASDIRS interface driver/level2 driver/level3 driver/others) if (NOT DYNAMIC_ARCH) @@ -146,6 +157,7 @@ endif() # add objects to the openblas lib add_library(${OpenBLAS_LIBNAME} ${LA_SOURCES} ${LAPACKE_SOURCES} ${RELA_SOURCES} ${TARGET_OBJS} ${OpenBLAS_DEF_FILE}) +target_include_directories(${OpenBLAS_LIBNAME} INTERFACE $) # Android needs to explicitly link against libm if(ANDROID) @@ -165,6 +177,7 @@ endif() # Set output for libopenblas set_target_properties( ${OpenBLAS_LIBNAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib) set_target_properties( ${OpenBLAS_LIBNAME} PROPERTIES LIBRARY_OUTPUT_NAME_DEBUG "${OpenBLAS_LIBNAME}_d") +set_target_properties( ${OpenBLAS_LIBNAME} PROPERTIES EXPORT_NAME "OpenBLAS") foreach (OUTPUTCONFIG ${CMAKE_CONFIGURATION_TYPES}) string( TOUPPER ${OUTPUTCONFIG} OUTPUTCONFIG ) @@ -204,14 +217,84 @@ set_target_properties(${OpenBLAS_LIBNAME} PROPERTIES SOVERSION ${OpenBLAS_MAJOR_VERSION} ) +if (BUILD_SHARED_LIBS AND NOT ${SYMBOLPREFIX}${SYMBOLSUFIX} STREQUAL "") +if (NOT DEFINED ARCH) + set(ARCH_IN "x86_64") +else() + set(ARCH_IN ${ARCH}) +endif() + +if (${CORE} STREQUAL "generic") + set(ARCH_IN "GENERIC") +endif () + +if (NOT DEFINED EXPRECISION) + set(EXPRECISION_IN 0) +else() + set(EXPRECISION_IN ${EXPRECISION}) +endif() + +if (NOT DEFINED NO_CBLAS) + set(NO_CBLAS_IN 0) +else() + set(NO_CBLAS_IN ${NO_CBLAS}) +endif() + +if (NOT DEFINED NO_LAPACK) + set(NO_LAPACK_IN 0) +else() + set(NO_LAPACK_IN ${NO_LAPACK}) +endif() + +if (NOT DEFINED NO_LAPACKE) + set(NO_LAPACKE_IN 0) +else() + set(NO_LAPACKE_IN ${NO_LAPACKE}) +endif() + +if (NOT DEFINED NEED2UNDERSCORES) + set(NEED2UNDERSCORES_IN 0) +else() + set(NEED2UNDERSCORES_IN ${NEED2UNDERSCORES}) +endif() + +if (NOT DEFINED ONLY_CBLAS) + set(ONLY_CBLAS_IN 0) +else() + set(ONLY_CBLAS_IN ${ONLY_CBLAS}) +endif() + +if (NOT DEFINED BU) + set(BU _) +endif() + +if (NOT ${SYMBOLPREFIX} STREQUAL "") +message(STATUS "adding prefix ${SYMBOLPREFIX} to names of exported symbols in ${OpenBLAS_LIBNAME}") +endif() +if (NOT ${SYMBOLSUFFIX} STREQUAL "") +message(STATUS "adding suffix ${SYMBOLSUFFIX} to names of exported symbols in ${OpenBLAS_LIBNAME}") +endif() + add_custom_command(TARGET ${OpenBLAS_LIBNAME} POST_BUILD + COMMAND perl ${PROJECT_SOURCE_DIR}/exports/gensymbol "objcopy" "${ARCH}" "${BU}" "${EXPRECISION_IN}" "${NO_CBLAS_IN}" "${NO_LAPACK_IN}" "${NO_LAPACKE_IN}" "${NEED2UNDERSCORES_IN}" "${ONLY_CBLAS_IN}" \"${SYMBOLPREFIX}\" \"${SYMBOLSUFFIX}\" "${BUILD_LAPACK_DEPRECATED}" > ${PROJECT_BINARY_DIR}/objcopy.def + COMMAND objcopy -v --redefine-syms ${PROJECT_BINARY_DIR}/objcopy.def ${PROJECT_BINARY_DIR}/lib/lib${OpenBLAS_LIBNAME}.so + COMMENT "renaming symbols" + ) +endif() + + # Install project # Install libraries install(TARGETS ${OpenBLAS_LIBNAME} + EXPORT "OpenBLAS${SUFFIX64}Targets" RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ) +# Install headers +set(CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_INCLUDEDIR}/openblas${SUFFIX64}) +set(CMAKE_INSTALL_FULL_INCLUDEDIR ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}) + message(STATUS "Generating openblas_config.h in ${CMAKE_INSTALL_INCLUDEDIR}") set(OPENBLAS_CONFIG_H ${CMAKE_BINARY_DIR}/openblas_config.h) @@ -259,11 +342,31 @@ if(NOT NO_LAPACKE) ADD_CUSTOM_TARGET(genlapacke COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/lapack-netlib/LAPACKE/include/lapacke_mangling_with_flags.h.in "${CMAKE_BINARY_DIR}/lapacke_mangling.h" ) - install (FILES ${CMAKE_BINARY_DIR}/lapacke_mangling.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + install (FILES ${CMAKE_BINARY_DIR}/lapacke_mangling.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/openblas${SUFFIX64}) endif() include(FindPkgConfig QUIET) if(PKG_CONFIG_FOUND) - configure_file(${PROJECT_SOURCE_DIR}/cmake/openblas.pc.in ${PROJECT_BINARY_DIR}/openblas.pc @ONLY) - install (FILES ${PROJECT_BINARY_DIR}/openblas.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig/) + configure_file(${PROJECT_SOURCE_DIR}/cmake/openblas.pc.in ${PROJECT_BINARY_DIR}/openblas${SUFFIX64}.pc @ONLY) + install (FILES ${PROJECT_BINARY_DIR}/openblas${SUFFIX64}.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig/) endif() + + +# GNUInstallDirs "DATADIR" wrong here; CMake search path wants "share". +set(PN OpenBLAS) +set(CMAKECONFIG_INSTALL_DIR "share/cmake/${PN}${SUFFIX64}") +configure_package_config_file(cmake/${PN}Config.cmake.in + "${CMAKE_CURRENT_BINARY_DIR}/${PN}${SUFFIX64}Config.cmake" + INSTALL_DESTINATION ${CMAKECONFIG_INSTALL_DIR}) +write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/${PN}ConfigVersion.cmake + VERSION ${${PN}_VERSION} + COMPATIBILITY AnyNewerVersion) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PN}${SUFFIX64}Config.cmake + DESTINATION ${CMAKECONFIG_INSTALL_DIR}) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PN}ConfigVersion.cmake + RENAME ${PN}${SUFFIX64}ConfigVersion.cmake + DESTINATION ${CMAKECONFIG_INSTALL_DIR}) +install(EXPORT "${PN}${SUFFIX64}Targets" + NAMESPACE "${PN}${SUFFIX64}::" + DESTINATION ${CMAKECONFIG_INSTALL_DIR}) + diff --git a/Changelog.txt b/Changelog.txt index cb6fee70a..49b26873a 100644 --- a/Changelog.txt +++ b/Changelog.txt @@ -1,4 +1,247 @@ OpenBLAS ChangeLog +==================================================================== +Version 0.3.5 +31-Dec-2018 + +common: + * loop unrolling in TRMV has been enabled again. + * A domain error in the thread workload distribution for SYRK + has been fixed. + * gmake builds will now automatically add -fPIC to the build + options if the platform requires it. + * a pthreads key leakage (and associate crash on dlclose) in + the USE_TLS codepath was fixed. + * building of the utest cases on systems that do not provide + an implementation of complex.h was fixed. + +x86_64: + * the SkylakeX code was changed to compile on OSX. + * unwanted application of the -march=skylake-avx512 option + to the common code parts of a DYNAMIC_ARCH build was fixed. + * improved performance of SGEMM for small workloads on Skylake X. + * performance of SGEMM and DGEMM was improved on Haswell. + +ARMV8: + * a configuration error that broke the CNRM2 kernel was corrected. + * compilation of the GEMM kernels with CMAKE was fixed. + * DYNAMIC_ARCH builds are now available with CMAKE as well. + * using CMAKE for cross-compilation to the new cpu TARGETs + introduced in 0.3.4 now works. + +POWER: + * a problem in cpu autodetection for AIX has been corrected. + +==================================================================== +Version 0.3.4 +02-Dec-2018 + +common: + * the new, experimental thread-local memory allocation had + inadvertently been left enabled for gmake builds in 0.3.3 + despite the announcement. It is now disabled by default, and + single-threaded builds will keep using the old allocator even + if the USE_TLS option is turned on. + * OpenBLAS will now provide enough buffer space for at least 50 + threads by default. + * The output of openblas_get_config() now contains the version + number. + * A serious thread safety bug in GEMV operation with small M and + large N size has been fixed. + * The code will now automatically call blas_thread_init after a + fork if needed before handling a call to openblas_set_num_threads + * Accesses to parallelized level3 functions from multiple callers + are now serialized to avoid thread races (unless using OpenMP). + This should provide better performance than the known-threadsafe + (but non-default) USE_SIMPLE_THREADED_LEVEL3 option. + * When building LAPACK with gfortran, -frecursive is now (again) + enabled by default to ensure correct behaviour. + * The OpenBLAS version cblas.h now supports both CBLAS_ORDER and + CBLAS_LAYOUT as the name of the matrix row/column order option. + * Externally set LDFLAGS are now passed through to the final compile/link + steps to facilitate setting platform-specific linker flags. + * A potential race condition during the build of LAPACK (that would + usually manifest itself as a failure to build TESTING/MATGEN) has been + fixed. + * xHEMV has been changed to stay single-threaded for small input sizes + where the overhead of multithreading exceeds any possible gains + * CSWAP and ZSWAP have been limited to a single thread except on ARMV8 or + ThunderX hardware with sizable input. + * Linker flags for the PGI compiler have been updated + * Behaviour of AXPY with zero increments is now handled in the C interface, + correcting the result on at least Intel Atom. + * The result matrix from calling SGELSS with an all-zero input matrix is + now zeroed completely. + +x86_64: + * Autodetection of AMD Ryzen2 has been fixed (again). + * CMAKE builds now support labeling of an INTERFACE64=1 build of + the library with the _64 suffix. + * AVX512 version of DGEMM has been added and the AVX512 SGEMM kernel + has been sped up by rewriting with C intrinsics + * Fixed compilation on RHEL5/CENTOS5 (issue with typename __WAIT_STATUS) + +POWER: + * added support for building on AIX (with gcc and GNU tools from AIX Toolbox). + * CPU type detection has been implemented for AIX. + * CPU type detection has been fixed for NETBSD. + +MIPS64: + * AXPY on LOONGSON3A has been corrected to pass "zero increment" utest. + * DSDOT on LOONGSON3A has been fixed. + * the SGEMM microkernel has been hardened against potential data loss. + +ARMV8: + * DYNAMic_ARCH support is now available for 64bit ARM + * cross-compiling for ARMV8 under iOS now works. + * cpu-specific code has been rearranged to make better use of both + hardware commonalities and model-specific compiler optimizations. + * XGENE1 has been removed as a TARGET, superseded by the improved generic + ARMV8 support. + +ARMV7: + * Older assembly mnemonics have been converted to UAL form to allow + building with clang 7.0 + * Cross compiling LAPACKE for Android has been fixed again (broken by + update to LAPACK 3.7.0 some while ago). + +==================================================================== +Version 0.3.3 +31-Aug-2018 + +common: + * thread memory allocation has been switched back to the method + used before version 0.3.1 due to unexpected problems caused by + the new code under some circumstances. A new compile-time option + USE_TLS has been added to enable the new code, and it is hoped + that this can become the default again in the next version. + * LAPAck PR272 has been integrated, which fixes spurious errors + in DSYEVR and related functions caused by missing conversion + from ILAENV to ILAENV_2STAGE in several _2stage routines. + * the cmake-generated OpenBLASConfig.cmake now uses correct case + for the name of the library + * added support for Haiku OS + +x86_64: + * added AVX512 implementations of SDOT, DDOT, SAXPY, DAXPY, + DSCAL, DGEMVN and DSYMVL + * added a workaround for a cygwin issue that prevented compilation + of AVX512 code + +IBM Z: + * added autodetection of Z14 + * fixed TRMM errors in the generic target + +==================================================================== +Version 0.3.2 +30-Jul-2018 + +common: + * fixes for regressions caused by the rewrite of the thread + initialization code in 0.3.1 + +POWER: + * fixed cpu autodetection for the BSDs + +MIPS64: + * fixed utest errors in AXPY, DSDOT, ROT and SWAP + +x86_64: + * added autodetection of AMD Ryzen 2 + * fixed build with older versions of MSVC + +==================================================================== +Version 0.3.1 +01-Jul-2018 + +common: + * rewritten thread initialization code with significantly reduced overhead + * added CBLAS interfaces to the IxAMIN BLAS extension functions + * fixed the lapack-test target + * CMAKE builds now create an OpenBLASConfig.cmake file + * ZAXPY now uses a single thread for small input sizes + * the LAPACK code was updated from Reference-LAPACK/lapack#253 + (fixing LAPACKE interfaces to Aasen's functions) + +POWER: + * corrected CROT and ZROT behaviour with zero INC_X + +ARMV7: + * corrected xDOT behaviour with zero INC_X or INC_Y + +x86_64: + * retired some older targets of DYNAMIC_ARCH builds to a new option DYNAMIC_OLDER, + this affects PENRYN,DUNNINGTON,OPTERON,OPTERON_SSE3,BOBCAT,ATOM and NANO + (which will still be supported via the slower PRESCOTT kernels when this option is not set) + * added an option DYNAMIC_LIST that (used in conjunction with DYNAMIC_ARCH) allows to + specify the list of x86_64 targets to include. Any target not on the list will be supported + by the Sandybridge or Nehalem kernels if available, or by Prescott. + * improved SWITCH_RATIO on Haswell for increased GEMM throughput + * added initial support for Intel Skylake X, including an AVX512 SGEMM kernel + * added autodetection of Intel Cannon Lake series as Skylake X + * added a default L2 cache size for hypervisors that return zero here (Chromebook) + * fixed a name clash with recent Windows10 headers that broke the build with (at least) + recent mingw from MSYS2 + * fixed a link error in mixed clang/gfortran builds with OpenMP + * updated the OSX deployment target to 10.8 + * switched on parallel make for builds on MS Windows by default + +x86: + * fixed SSWAP and DSWAP behaviour with zero INC_X and INC_Y + +==================================================================== +Version 0.3.0 +23-May-2108 + +common: + * fixed some more thread race and locking bugs + * added preliminary support for calling an OpenMP build of the library from multiple threads + * removed performance impact of thread locks added in 0.2.20 on OpenMP code + * general code cleanup + * optimized DSDOT implementation + * improved thread distribution for GEMM + * corrected IMATCOPY/OMATCOPY implementation + * fixed out-of-bounds accesses in the multithreaded xBMV/xPMV and SYMV implementations + * cmake build improvements + * pkgconfig file now contains build options + * openblas_get_config() now reports USE_OPENMP and NUM_THREADS settings used for the build + * corrections and improvements for systems with more than 64 cpus + * LAPACK code updated to 3.8.0 including later fixes + * added ReLAPACK, a recursive implementation of several LAPACK functions + * Rewrote ROTMG to handle cases that the netlib code failed to address + * Disabled (broken) multithreading code for xTRMV + * corrected prototypes of complex CBLAS functions to make our cblas.h match the generally accepted standard + * shared memory access failures on startup are now handled more gracefully + * restored utests from earlier releases (and made them pass on all affected systems) + +SPARC: + * several fixes for cpu autodetection + +POWER: + * corrected vector register overwriting in several Power8 kernels + * optimized additional BLAS functions + +ARM: + * added support for CortexA53 and A72 + * added autodetection for ThunderX2T99 + * made most optimized kernels the default for generic ARMv8 targets + +x86_64: + * parallelized DDOT kernel for Haswell + * changed alignment directives in assembly kernels to boost performance on OSX + * fixed register handling in the GEMV microkernels (bug exposed by gcc7) + * added support for building on OpenBSD and Dragonfly + * updated compiler options to work with Intel release 2018 + * support fully optimized build with clang/flang on Microsoft Windows + * fixed building on AIX + +IBM Z: + * added optimized BLAS 1/2 functions + +MIPS: + * fixed cpu autodetection helper code + * added mips32 1004K cpu (Mediatek MT7621 and similar SoC) + * added mips64 I6500 cpu + ==================================================================== Version 0.2.20 24-Jul-2017 diff --git a/Makefile b/Makefile index 5198f9e2b..21096f893 100644 --- a/Makefile +++ b/Makefile @@ -21,6 +21,17 @@ ifeq ($(BUILD_RELAPACK), 1) RELA = re_lapack endif +ifeq ($(NO_FORTRAN), 1) +define NOFORTRAN +1 +endef +define NO_LAPACK +1 +endef +export NOFORTRAN +export NO_LAPACK +endif + LAPACK_NOOPT := $(filter-out -O0 -O1 -O2 -O3 -Ofast,$(LAPACK_FFLAGS)) SUBDIRS_ALL = $(SUBDIRS) test ctest utest exports benchmark ../laswp ../bench @@ -47,7 +58,7 @@ endif endif @echo " C compiler ... $(C_COMPILER) (command line : $(CC))" -ifndef NOFORTRAN +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) @echo " Fortran compiler ... $(F_COMPILER) (command line : $(FC))" endif ifneq ($(OSNAME), AIX) @@ -86,16 +97,12 @@ endif shared : ifndef NO_SHARED -ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android)) +ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku)) @$(MAKE) -C exports so @ln -fs $(LIBSONAME) $(LIBPREFIX).so @ln -fs $(LIBSONAME) $(LIBPREFIX).so.$(MAJOR_VERSION) endif -ifeq ($(OSNAME), FreeBSD) - @$(MAKE) -C exports so - @ln -fs $(LIBSONAME) $(LIBPREFIX).so -endif -ifeq ($(OSNAME), NetBSD) +ifeq ($(OSNAME), $(filter $(OSNAME),FreeBSD OpenBSD NetBSD DragonFly)) @$(MAKE) -C exports so @ln -fs $(LIBSONAME) $(LIBPREFIX).so endif @@ -112,7 +119,7 @@ endif endif tests : -ifndef NOFORTRAN +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) touch $(LIBNAME) ifndef NO_FBLAS $(MAKE) -C test all @@ -124,7 +131,7 @@ endif endif libs : -ifeq ($(CORE), UNKOWN) +ifeq ($(CORE), UNKNOWN) $(error OpenBLAS: Detecting CPU failed. Please set TARGET explicitly, e.g. make TARGET=your_cpu_target. Please read README for the detail.) endif ifeq ($(NOFORTRAN), 1) @@ -157,6 +164,9 @@ ifeq ($(DYNAMIC_ARCH), 1) do $(MAKE) GOTOBLAS_MAKEFILE= -C kernel TARGET_CORE=$$d kernel || exit 1 ;\ done @echo DYNAMIC_ARCH=1 >> Makefile.conf_last +ifeq ($(DYNAMIC_OLDER), 1) + @echo DYNAMIC_OLDER=1 >> Makefile.conf_last +endif endif ifdef USE_THREAD @echo USE_THREAD=$(USE_THREAD) >> Makefile.conf_last @@ -211,7 +221,7 @@ netlib : else netlib : lapack_prebuild -ifndef NOFORTRAN +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) @$(MAKE) -C $(NETLIB_LAPACK_DIR) lapacklib @$(MAKE) -C $(NETLIB_LAPACK_DIR) tmglib endif @@ -232,7 +242,7 @@ prof_lapack : lapack_prebuild @$(MAKE) -C $(NETLIB_LAPACK_DIR) lapack_prof lapack_prebuild : -ifndef NOFORTRAN +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) -@echo "FORTRAN = $(FC)" > $(NETLIB_LAPACK_DIR)/make.inc -@echo "OPTS = $(LAPACK_FFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "POPTS = $(LAPACK_FPFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc @@ -241,7 +251,7 @@ ifndef NOFORTRAN -@echo "LOADOPTS = $(FFLAGS) $(EXTRALIB)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "CC = $(CC)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "override CFLAGS = $(LAPACK_CFLAGS)" >> $(NETLIB_LAPACK_DIR)/make.inc - -@echo "ARCH = $(AR)" >> $(NETLIB_LAPACK_DIR)/make.inc + -@echo "override ARCH = $(AR)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "ARCHFLAGS = $(ARFLAGS) -ru" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "RANLIB = $(RANLIB)" >> $(NETLIB_LAPACK_DIR)/make.inc -@echo "LAPACKLIB = ../$(LIBNAME)" >> $(NETLIB_LAPACK_DIR)/make.inc @@ -257,6 +267,8 @@ ifeq ($(F_COMPILER), GFORTRAN) ifdef SMP ifeq ($(OSNAME), WINNT) -@echo "LOADER = $(FC)" >> $(NETLIB_LAPACK_DIR)/make.inc +else ifeq ($(OSNAME), Haiku) + -@echo "LOADER = $(FC)" >> $(NETLIB_LAPACK_DIR)/make.inc else -@echo "LOADER = $(FC) -pthread" >> $(NETLIB_LAPACK_DIR)/make.inc endif @@ -275,21 +287,21 @@ endif endif large.tgz : -ifndef NOFORTRAN +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) if [ ! -a $< ]; then -wget http://www.netlib.org/lapack/timing/large.tgz; fi endif timing.tgz : -ifndef NOFORTRAN +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) if [ ! -a $< ]; then -wget http://www.netlib.org/lapack/timing/timing.tgz; fi endif lapack-timing : large.tgz timing.tgz -ifndef NOFORTRAN +ifeq ($(NOFORTRAN), $(filter 0,$(NOFORTRAN))) (cd $(NETLIB_LAPACK_DIR); $(TAR) zxf ../timing.tgz TIMING) (cd $(NETLIB_LAPACK_DIR)/TIMING; $(TAR) zxf ../../large.tgz ) $(MAKE) -C $(NETLIB_LAPACK_DIR)/TIMING @@ -298,9 +310,10 @@ endif lapack-test : (cd $(NETLIB_LAPACK_DIR)/TESTING && rm -f x* *.out) - $(MAKE) -j 1 -C $(NETLIB_LAPACK_DIR)/TESTING xeigtstc xeigtstd xeigtsts xeigtstz xlintstc xlintstd xlintstds xlintstrfd xlintstrfz xlintsts xlintstz xlintstzc xlintstrfs xlintstrfc + $(MAKE) -j 1 -C $(NETLIB_LAPACK_DIR)/TESTING/EIG xeigtstc xeigtstd xeigtsts xeigtstz + $(MAKE) -j 1 -C $(NETLIB_LAPACK_DIR)/TESTING/LIN xlintstc xlintstd xlintstds xlintstrfd xlintstrfz xlintsts xlintstz xlintstzc xlintstrfs xlintstrfc ifneq ($(CROSS), 1) - ( cd $(NETLIB_LAPACK_DIR)/INSTALL; ./testlsame; ./testslamch; ./testdlamch; \ + ( cd $(NETLIB_LAPACK_DIR)/INSTALL; make all; ./testlsame; ./testslamch; ./testdlamch; \ ./testsecond; ./testdsecnd; ./testieee; ./testversion ) (cd $(NETLIB_LAPACK_DIR); ./lapack_testing.py -r ) endif @@ -312,9 +325,9 @@ lapack-runtest: blas-test: - (cd $(NETLIB_LAPACK_DIR)/BLAS && rm -f x* *.out) + (cd $(NETLIB_LAPACK_DIR)/BLAS/TESTING && rm -f x* *.out) $(MAKE) -j 1 -C $(NETLIB_LAPACK_DIR) blas_testing - (cd $(NETLIB_LAPACK_DIR)/BLAS && cat *.out) + (cd $(NETLIB_LAPACK_DIR)/BLAS/TESTING && cat *.out) dummy : diff --git a/Makefile.arm64 b/Makefile.arm64 index d19e796a5..cd16dbfae 100644 --- a/Makefile.arm64 +++ b/Makefile.arm64 @@ -4,22 +4,37 @@ CCOMMON_OPT += -march=armv8-a FCOMMON_OPT += -march=armv8-a endif -ifeq ($(CORE), CORTEXA57) -CCOMMON_OPT += -march=armv8-a+crc+crypto+fp+simd -mtune=cortex-a57 -FCOMMON_OPT += -march=armv8-a+crc+crypto+fp+simd -mtune=cortex-a57 +ifeq ($(CORE), CORTEXA53) +CCOMMON_OPT += -march=armv8-a -mtune=cortex-a53 +FCOMMON_OPT += -march=armv8-a -mtune=cortex-a53 endif -ifeq ($(CORE), VULCAN) -CCOMMON_OPT += -mtune=vulcan -mcpu=vulcan -FCOMMON_OPT += -mtune=vulcan -mcpu=vulcan +ifeq ($(CORE), CORTEXA57) +CCOMMON_OPT += -march=armv8-a -mtune=cortex-a57 +FCOMMON_OPT += -march=armv8-a -mtune=cortex-a57 +endif + +ifeq ($(CORE), CORTEXA72) +CCOMMON_OPT += -march=armv8-a -mtune=cortex-a72 +FCOMMON_OPT += -march=armv8-a -mtune=cortex-a72 +endif + +ifeq ($(CORE), CORTEXA73) +CCOMMON_OPT += -march=armv8-a -mtune=cortex-a73 +FCOMMON_OPT += -march=armv8-a -mtune=cortex-a73 endif ifeq ($(CORE), THUNDERX) -CCOMMON_OPT += -mtune=thunderx -mcpu=thunderx -FCOMMON_OPT += -mtune=thunderx -mcpu=thunderx +CCOMMON_OPT += -march=armv8-a -mtune=thunderx +FCOMMON_OPT += -march=armv8-a -mtune=thunderx +endif + +ifeq ($(CORE), FALKOR) +CCOMMON_OPT += -march=armv8-a -mtune=falkor +FCOMMON_OPT += -march=armv8-a -mtune=falkor endif ifeq ($(CORE), THUNDERX2T99) -CCOMMON_OPT += -mtune=thunderx2t99 -mcpu=thunderx2t99 -FCOMMON_OPT += -mtune=thunderx2t99 -mcpu=thunderx2t99 +CCOMMON_OPT += -march=armv8.1-a -mtune=thunderx2t99 +FCOMMON_OPT += -march=armv8.1-a -mtune=thunderx2t99 endif diff --git a/Makefile.install b/Makefile.install index 81d097215..069c96c6a 100644 --- a/Makefile.install +++ b/Makefile.install @@ -48,6 +48,7 @@ ifndef NO_CBLAS @sed 's/common/openblas_config/g' cblas.h > "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/cblas.h" endif +ifneq ($(OSNAME), AIX) ifndef NO_LAPACKE @echo Copying LAPACKE header files to $(DESTDIR)$(OPENBLAS_INCLUDE_DIR) @-install -pm644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke.h" @@ -66,18 +67,14 @@ endif #for install shared library ifndef NO_SHARED @echo Copying the shared library to $(DESTDIR)$(OPENBLAS_LIBRARY_DIR) -ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android)) +ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku)) @install -pm755 $(LIBSONAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ ln -fs $(LIBSONAME) $(LIBPREFIX).so ; \ ln -fs $(LIBSONAME) $(LIBPREFIX).so.$(MAJOR_VERSION) endif -ifeq ($(OSNAME), FreeBSD) - @cp $(LIBSONAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" - @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ - ln -fs $(LIBSONAME) $(LIBPREFIX).so -endif -ifeq ($(OSNAME), NetBSD) + +ifeq ($(OSNAME), $(filter $(OSNAME),FreeBSD OpenBSD NetBSD DragonFly)) @cp $(LIBSONAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ ln -fs $(LIBSONAME) $(LIBPREFIX).so @@ -98,11 +95,39 @@ ifeq ($(OSNAME), CYGWIN_NT) endif endif +else +#install on AIX has different options syntax +ifndef NO_LAPACKE + @echo Copying LAPACKE header files to $(DESTDIR)$(OPENBLAS_INCLUDE_DIR) + @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke.h" + @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke_config.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke_config.h" + @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke_mangling_with_flags.h.in "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke_mangling.h" + @-installbsd -c -m 644 $(NETLIB_LAPACK_DIR)/LAPACKE/include/lapacke_utils.h "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/lapacke_utils.h" +endif + +#for install static library +ifndef NO_STATIC + @echo Copying the static library to $(DESTDIR)$(OPENBLAS_LIBRARY_DIR) + @installbsd -c -m 644 $(LIBNAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" + @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ + ln -fs $(LIBNAME) $(LIBPREFIX).$(LIBSUFFIX) +endif +#for install shared library +ifndef NO_SHARED + @echo Copying the shared library to $(DESTDIR)$(OPENBLAS_LIBRARY_DIR) + @installbsd -c -m 755 $(LIBSONAME) "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" + @cd "$(DESTDIR)$(OPENBLAS_LIBRARY_DIR)" ; \ + ln -fs $(LIBSONAME) $(LIBPREFIX).so ; \ + ln -fs $(LIBSONAME) $(LIBPREFIX).so.$(MAJOR_VERSION) +endif + +endif #Generating openblas.pc @echo Generating openblas.pc in "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)" - @echo 'libdir='$(OPENBLAS_LIBRARY_DIR) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" + @echo 'libdir='$(OPENBLAS_LIBRARY_DIR) > "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" @echo 'includedir='$(OPENBLAS_INCLUDE_DIR) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" + @echo 'openblas_config= USE_64BITINT='$(USE_64BITINT) 'DYNAMIC_ARCH='$(DYNAMIC_ARCH) 'DYNAMIC_OLDER='$(DYNAMIC_OLDER) 'NO_CBLAS='$(NO_CBLAS) 'NO_LAPACK='$(NO_LAPACK) 'NO_LAPACKE='$(NO_LAPACKE) 'NO_AFFINITY='$(NO_AFFINITY) 'USE_OPENMP='$(USE_OPENMP) $(CORE) 'MAX_THREADS='$(NUM_THREADS)>> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" @echo 'version='$(VERSION) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" @echo 'extralib='$(EXTRALIB) >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" @cat openblas.pc.in >> "$(DESTDIR)$(OPENBLAS_PKGCONFIG_DIR)/openblas.pc" @@ -115,7 +140,7 @@ endif ifndef NO_SHARED #ifeq logical or -ifeq ($(OSNAME), $(filter $(OSNAME),Linux FreeBSD NetBSD)) +ifeq ($(OSNAME), $(filter $(OSNAME),Linux FreeBSD NetBSD OpenBSD DragonFly)) @echo "SET(OpenBLAS_LIBRARIES ${OPENBLAS_LIBRARY_DIR}/$(LIBPREFIX).so)" >> "$(DESTDIR)$(OPENBLAS_CMAKE_DIR)/$(OPENBLAS_CMAKE_CONFIG)" endif ifeq ($(OSNAME), $(filter $(OSNAME),WINNT CYGWIN_NT)) diff --git a/Makefile.prebuild b/Makefile.prebuild index daa556f65..a366004a1 100644 --- a/Makefile.prebuild +++ b/Makefile.prebuild @@ -17,6 +17,10 @@ ifdef CPUIDEMU EXFLAGS = -DCPUIDEMU -DVENDOR=99 endif +ifeq ($(TARGET), 1004K) +TARGET_FLAGS = -mips32r2 +endif + ifeq ($(TARGET), P5600) TARGET_FLAGS = -mips32r5 endif diff --git a/Makefile.rule b/Makefile.rule index 718f04090..7c128fb49 100644 --- a/Makefile.rule +++ b/Makefile.rule @@ -3,7 +3,7 @@ # # This library's version -VERSION = 0.3.0.dev +VERSION = 0.3.6.dev # If you set the suffix, the library name will be libopenblas_$(LIBNAMESUFFIX).a # and libopenblas_$(LIBNAMESUFFIX).so. Meanwhile, the soname in shared library @@ -17,6 +17,11 @@ VERSION = 0.3.0.dev # If you want to support multiple architecture in one binary # DYNAMIC_ARCH = 1 +# If you want the full list of x86_64 architectures supported in DYNAMIC_ARCH +# mode (including individual optimizied codes for PENRYN, DUNNINGTON, OPTERON, +# OPTERON_SSE3, ATOM and NANO rather than fallbacks to older architectures) +# DYNAMIC_OLDER = 1 + # C compiler including binary type(32bit / 64bit). Default is gcc. # Don't use Intel Compiler or PGI, it won't generate right codes as I expect. # CC = gcc @@ -55,11 +60,26 @@ VERSION = 0.3.0.dev # This flag is always set for POWER8. Don't modify the flag # USE_OPENMP = 1 +# The OpenMP scheduler to use - by default this is "static" and you +# will normally not want to change this unless you know that your main +# workload will involve tasks that have highly unbalanced running times +# for individual threads. Changing away from "static" may also adversely +# affect memory access locality in NUMA systems. Setting to "runtime" will +# allow you to select the scheduler from the environment variable OMP_SCHEDULE +# CCOMMON_OPT += -DOMP_SCHED=dynamic + # You can define maximum number of threads. Basically it should be # less than actual number of cores. If you don't specify one, it's # automatically detected by the the script. # NUM_THREADS = 24 +# If you have enabled USE_OPENMP and your application would call +# OpenBLAS's calculation API from multi threads, please comment it in. +# This flag defines how many instances of OpenBLAS's calculation API can +# actually run in parallel. If more threads call OpenBLAS's calculation API, +# they need to wait for the preceding API calls to finish or risk data corruption. +# NUM_PARALLEL = 2 + # if you don't need to install the static library, please comment it in. # NO_STATIC = 1 @@ -89,6 +109,12 @@ BUILD_LAPACK_DEPRECATED = 1 # If you want to use legacy threaded Level 3 implementation. # USE_SIMPLE_THREADED_LEVEL3 = 1 +# If you want to use the new, still somewhat experimental code that uses +# thread-local storage instead of a central memory buffer in memory.c +# Note that if your system uses GLIBC, it needs to have at least glibc 2.21 +# for this to work. +# USE_TLS = 1 + # If you want to drive whole 64bit region by BLAS. Not all Fortran # compiler supports this. It's safe to keep comment it out if you # are not sure(equivalent to "-i8" option). @@ -100,7 +126,7 @@ BUILD_LAPACK_DEPRECATED = 1 NO_WARMUP = 1 # If you want to disable CPU/Memory affinity on Linux. -#NO_AFFINITY = 1 +NO_AFFINITY = 1 # if you are compiling for Linux and you have more than 16 numa nodes or more than 256 cpus # BIGNUMA = 1 @@ -126,6 +152,9 @@ NO_WARMUP = 1 # FUNCTION_PROFILE = 1 # Support for IEEE quad precision(it's *real* REAL*16)( under testing) +# This option should not be used - it is a holdover from unfinished code present +# in the original GotoBLAS2 library that may be usable as a starting point but +# is not even expected to compile in its present form. # QUAD_PRECISION = 1 # Theads are still working for a while after finishing BLAS operation @@ -144,8 +173,11 @@ NO_WARMUP = 1 # CONSISTENT_FPCSR = 1 # If any gemm arguement m, n or k is less or equal this threshold, gemm will be execute -# with single thread. You can use this flag to avoid the overhead of multi-threading -# in small matrix sizes. The default value is 4. +# with single thread. (Actually in recent versions this is a factor proportional to the +# number of floating point operations necessary for the given problem size, no longer +# an individual dimension). You can use this setting to avoid the overhead of multi- +# threading in small matrix sizes. The default value is 4, but values as high as 50 have +# been reported to be optimal for certain workloads (50 is the recommended value for Julia). # GEMM_MULTITHREAD_THRESHOLD = 4 # If you need santy check by comparing reference BLAS. It'll be very @@ -160,8 +192,8 @@ NO_WARMUP = 1 # Flags for POWER8 are defined in Makefile.power. Don't modify COMMON_OPT # COMMON_OPT = -O2 -# gfortran option for LAPACK -# enable this flag only on 64bit Linux and if you need a thread safe lapack library +# gfortran option for LAPACK to improve thread-safety +# It is enabled by default in Makefile.system for gfortran # Flags for POWER8 are defined in Makefile.power. Don't modify FCOMMON_OPT # FCOMMON_OPT = -frecursive diff --git a/Makefile.system b/Makefile.system index 5caad0b03..20d4f6492 100644 --- a/Makefile.system +++ b/Makefile.system @@ -9,6 +9,17 @@ ifndef TOPDIR TOPDIR = . endif +# Catch conflicting usage of ARCH in some BSD environments +ifeq ($(ARCH), amd64) +override ARCH=x86_64 +else ifeq ($(ARCH), powerpc64) +override ARCH=power +else ifeq ($(ARCH), i386) +override ARCH=x86 +else ifeq ($(ARCH), aarch64) +override ARCH=arm64 +endif + NETLIB_LAPACK_DIR = $(TOPDIR)/lapack-netlib # Default C compiler @@ -17,15 +28,24 @@ NETLIB_LAPACK_DIR = $(TOPDIR)/lapack-netlib # http://stackoverflow.com/questions/4029274/mingw-and-make-variables # - Default value is 'cc' which is not always a valid command (e.g. MinGW). ifeq ($(origin CC),default) + +# Check if $(CC) refers to a valid command and set the value to gcc if not +ifneq ($(findstring cmd.exe,$(SHELL)),) +ifeq ($(shell where $(CC) 2>NUL),) CC = gcc -# Change the default compile to clang on Mac OSX. -# http://stackoverflow.com/questions/714100/os-detecting-makefile -UNAME_S := $(shell uname -s) -ifeq ($(UNAME_S),Darwin) - CC = clang -# EXTRALIB += -Wl,-no_compact_unwind -endif endif +else # POSIX-ish +ifeq ($(shell command -v $(CC) 2>/dev/null),) +ifeq ($(shell uname -s),Darwin) +CC = clang +# EXTRALIB += -Wl,-no_compact_unwind +else +CC = gcc +endif # Darwin +endif # CC exists +endif # Shell is sane + +endif # CC is set to default # Default Fortran compiler (FC) is selected by f_check. @@ -45,6 +65,7 @@ endif ifdef TARGET GETARCH_FLAGS := -DFORCE_$(TARGET) +GETARCH_FLAGS += -DUSER_TARGET endif # Force fallbacks for 32bit @@ -53,6 +74,9 @@ ifeq ($(BINARY), 32) ifeq ($(TARGET), HASWELL) GETARCH_FLAGS := -DFORCE_NEHALEM endif +ifeq ($(TARGET), SKYLAKEX) +GETARCH_FLAGS := -DFORCE_NEHALEM +endif ifeq ($(TARGET), SANDYBRIDGE) GETARCH_FLAGS := -DFORCE_NEHALEM endif @@ -86,6 +110,9 @@ ifeq ($(BINARY), 32) ifeq ($(TARGET_CORE), HASWELL) GETARCH_FLAGS := -DFORCE_NEHALEM endif +ifeq ($(TARGET_CORE), SKYLAKEX) +GETARCH_FLAGS := -DFORCE_NEHALEM +endif ifeq ($(TARGET_CORE), SANDYBRIDGE) GETARCH_FLAGS := -DFORCE_NEHALEM endif @@ -132,6 +159,10 @@ ifeq ($(NO_AVX2), 1) GETARCH_FLAGS += -DNO_AVX2 endif +ifeq ($(NO_AVX512), 1) +GETARCH_FLAGS += -DNO_AVX512 +endif + ifeq ($(DEBUG), 1) GETARCH_FLAGS += -g endif @@ -175,6 +206,10 @@ endif endif +ifndef NUM_PARALLEL +NUM_PARALLEL = 1 +endif + ifndef NUM_THREADS NUM_THREADS = $(NUM_CORES) endif @@ -225,12 +260,12 @@ endif ifeq ($(OSNAME), Darwin) ifndef MACOSX_DEPLOYMENT_TARGET -export MACOSX_DEPLOYMENT_TARGET=10.6 +export MACOSX_DEPLOYMENT_TARGET=10.8 endif MD5SUM = md5 -r endif -ifeq ($(OSNAME), FreeBSD) +ifneq (,$(findstring $(OSNAME), FreeBSD OpenBSD DragonFly)) MD5SUM = md5 -r endif @@ -304,6 +339,7 @@ endif ifeq ($(OSNAME), CYGWIN_NT) NEED_PIC = 0 NO_EXPRECISION = 1 +OS_CYGWIN_NT = 1 endif ifneq ($(OSNAME), WINNT) @@ -423,7 +459,7 @@ CCOMMON_OPT += -fopenmp endif ifeq ($(C_COMPILER), INTEL) -CCOMMON_OPT += -openmp +CCOMMON_OPT += -fopenmp endif ifeq ($(C_COMPILER), PGI) @@ -448,13 +484,44 @@ DYNAMIC_CORE = KATMAI COPPERMINE NORTHWOOD PRESCOTT BANIAS \ endif ifeq ($(ARCH), x86_64) -DYNAMIC_CORE = PRESCOTT CORE2 PENRYN DUNNINGTON NEHALEM OPTERON OPTERON_SSE3 BARCELONA BOBCAT ATOM NANO +DYNAMIC_CORE = PRESCOTT CORE2 +ifeq ($(DYNAMIC_OLDER), 1) +DYNAMIC_CORE += PENRYN DUNNINGTON +endif +DYNAMIC_CORE += NEHALEM +ifeq ($(DYNAMIC_OLDER), 1) +DYNAMIC_CORE += OPTERON OPTERON_SSE3 +endif +DYNAMIC_CORE += BARCELONA +ifeq ($(DYNAMIC_OLDER), 1) +DYNAMIC_CORE += BOBCAT ATOM NANO +endif ifneq ($(NO_AVX), 1) DYNAMIC_CORE += SANDYBRIDGE BULLDOZER PILEDRIVER STEAMROLLER EXCAVATOR endif ifneq ($(NO_AVX2), 1) DYNAMIC_CORE += HASWELL ZEN endif +ifneq ($(NO_AVX512), 1) +ifneq ($(NO_AVX2), 1) +DYNAMIC_CORE += SKYLAKEX +endif +endif +endif + +ifdef DYNAMIC_LIST +override DYNAMIC_CORE = PRESCOTT $(DYNAMIC_LIST) +XCCOMMON_OPT = -DDYNAMIC_LIST -DDYN_PRESCOTT +XCCOMMON_OPT += $(foreach dcore,$(DYNAMIC_LIST),-DDYN_$(dcore)) +CCOMMON_OPT += $(XCCOMMON_OPT) +#CCOMMON_OPT += -DDYNAMIC_LIST='$(DYNAMIC_LIST)' +endif + +ifeq ($(ARCH), arm64) +DYNAMIC_CORE = ARMV8 +DYNAMIC_CORE += CORTEXA57 +DYNAMIC_CORE += THUNDERX +DYNAMIC_CORE += THUNDERX2T99 endif # If DYNAMIC_CORE is not set, DYNAMIC_ARCH cannot do anything, so force it to empty @@ -554,9 +621,14 @@ CCOMMON_OPT += -march=mips64 FCOMMON_OPT += -march=mips64 endif +ifeq ($(CORE), 1004K) +CCOMMON_OPT += -mips32r2 $(MSA_FLAGS) +FCOMMON_OPT += -mips32r2 $(MSA_FLAGS) +endif + ifeq ($(CORE), P5600) -CCOMMON_OPT += -mips32r5 -mnan=2008 -mtune=p5600 $(MSA_FLAGS) -FCOMMON_OPT += -mips32r5 -mnan=2008 -mtune=p5600 $(MSA_FLAGS) +CCOMMON_OPT += -mips32r5 -mnan=2008 -mtune=p5600 $(MSA_FLAGS) +FCOMMON_OPT += -mips32r5 -mnan=2008 -mtune=p5600 $(MSA_FLAGS) endif ifeq ($(CORE), I6400) @@ -660,6 +732,8 @@ endif ifeq ($(F_COMPILER), GFORTRAN) CCOMMON_OPT += -DF_INTERFACE_GFORT FCOMMON_OPT += -Wall +# make single-threaded LAPACK calls thread-safe #1847 +FCOMMON_OPT += -frecursive #Don't include -lgfortran, when NO_LAPACK=1 or lsbcc ifneq ($(NO_LAPACK), 1) EXTRALIB += -lgfortran @@ -703,7 +777,7 @@ FCOMMON_OPT += -i8 endif endif ifeq ($(USE_OPENMP), 1) -FCOMMON_OPT += -openmp +FCOMMON_OPT += -fopenmp endif endif @@ -883,6 +957,10 @@ ifeq ($(DYNAMIC_ARCH), 1) CCOMMON_OPT += -DDYNAMIC_ARCH endif +ifeq ($(DYNAMIC_OLDER), 1) +CCOMMON_OPT += -DDYNAMIC_OLDER +endif + ifeq ($(NO_LAPACK), 1) CCOMMON_OPT += -DNO_LAPACK #Disable LAPACK C interface @@ -905,6 +983,10 @@ ifeq ($(NO_AVX2), 1) CCOMMON_OPT += -DNO_AVX2 endif +ifeq ($(NO_AVX512), 1) +CCOMMON_OPT += -DNO_AVX512 +endif + ifdef SMP CCOMMON_OPT += -DSMP_SERVER @@ -951,10 +1033,18 @@ endif CCOMMON_OPT += -DMAX_CPU_NUMBER=$(NUM_THREADS) +CCOMMON_OPT += -DMAX_PARALLEL_NUMBER=$(NUM_PARALLEL) + ifdef USE_SIMPLE_THREADED_LEVEL3 CCOMMON_OPT += -DUSE_SIMPLE_THREADED_LEVEL3 endif +ifdef USE_TLS +CCOMMON_OPT += -DUSE_TLS +endif + +CCOMMON_OPT += -DVERSION=\"$(VERSION)\" + ifndef SYMBOLPREFIX SYMBOLPREFIX = endif @@ -1065,8 +1155,6 @@ ifndef FCOMMON_OPT FCOMMON_OPT = -O2 -frecursive endif - - override CFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR) override PFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR) -DPROFILE $(COMMON_PROF) @@ -1074,6 +1162,12 @@ override FFLAGS += $(COMMON_OPT) $(FCOMMON_OPT) override FPFLAGS += $(FCOMMON_OPT) $(COMMON_PROF) #MAKEOVERRIDES = +ifdef NEED_PIC +ifeq (,$(findstring PIC,$(FFLAGS))) +override FFLAGS += -fPIC +endif +endif + #For LAPACK Fortran codes. #Disable -fopenmp for LAPACK Fortran codes on Windows. ifdef OS_WINDOWS @@ -1132,7 +1226,11 @@ endif LIBDLLNAME = $(LIBPREFIX).dll IMPLIBNAME = lib$(LIBNAMEBASE).dll.a +ifneq ($(OSNAME), AIX) LIBSONAME = $(LIBNAME:.$(LIBSUFFIX)=.so) +else +LIBSONAME = $(LIBNAME:.$(LIBSUFFIX)=.a) +endif LIBDYNNAME = $(LIBNAME:.$(LIBSUFFIX)=.dylib) LIBDEFNAME = $(LIBNAME:.$(LIBSUFFIX)=.def) LIBEXPNAME = $(LIBNAME:.$(LIBSUFFIX)=.exp) @@ -1209,6 +1307,7 @@ export MSA_FLAGS export KERNELDIR export FUNCTION_PROFILE export TARGET_CORE +export NO_AVX512 export SGEMM_UNROLL_M export SGEMM_UNROLL_N diff --git a/Makefile.x86_64 b/Makefile.x86_64 index 1ba63278a..1b7fe3ef4 100644 --- a/Makefile.x86_64 +++ b/Makefile.x86_64 @@ -8,6 +8,34 @@ endif endif endif +ifeq ($(CORE), SKYLAKEX) +ifndef DYNAMIC_ARCH +ifndef NO_AVX512 +CCOMMON_OPT += -march=skylake-avx512 +FCOMMON_OPT += -march=skylake-avx512 +ifeq ($(OSNAME), CYGWIN_NT) +CCOMMON_OPT += -fno-asynchronous-unwind-tables +endif +ifeq ($(OSNAME), WINNT) +ifeq ($(C_COMPILER), GCC) +CCOMMON_OPT += -fno-asynchronous-unwind-tables +endif +endif +endif +endif +endif + +ifeq ($(CORE), HASWELL) +ifndef DYNAMIC_ARCH +ifndef NO_AVX2 +CCOMMON_OPT += -mavx2 +FCOMMON_OPT += -mavx2 +endif +endif +endif + + + ifeq ($(OSNAME), Interix) ARFLAGS = -m x64 endif diff --git a/README.md b/README.md index ec32c1f60..26055c745 100644 --- a/README.md +++ b/README.md @@ -5,175 +5,221 @@ Travis CI: [![Build Status](https://travis-ci.org/xianyi/OpenBLAS.svg?branch=develop)](https://travis-ci.org/xianyi/OpenBLAS) AppVeyor: [![Build status](https://ci.appveyor.com/api/projects/status/09sohd35n8nkkx64/branch/develop?svg=true)](https://ci.appveyor.com/project/xianyi/openblas/branch/develop) + ## Introduction + OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version. -Please read the documents on OpenBLAS wiki pages . +Please read the documentation on the OpenBLAS wiki pages: . ## Binary Packages -We provide binary packages for the following platform. + +We provide official binary packages for the following platform: * Windows x86/x86_64 You can download them from [file hosting on sourceforge.net](https://sourceforge.net/projects/openblas/files/). ## Installation from Source -Download from project homepage. http://xianyi.github.com/OpenBLAS/ -Or, check out codes from git://github.com/xianyi/OpenBLAS.git +Download from project homepage, http://xianyi.github.com/OpenBLAS/, or check out the code +using Git from https://github.com/xianyi/OpenBLAS.git. + +### Dependencies + +Building OpenBLAS requires the following to be installed: + +* GNU Make +* A C compiler, e.g. GCC or Clang +* A Fortran compiler (optional, for LAPACK) +* IBM MASS (optional, see below) + ### Normal compile - * type "make" to detect the CPU automatically. - or - * type "make TARGET=xxx" to set target CPU, e.g. "make TARGET=NEHALEM". The full target list is in file TargetList.txt. + +Simply invoking `make` (or `gmake` on BSD) will detect the CPU automatically. +To set a specific target CPU, use `make TARGET=xxx`, e.g. `make TARGET=NEHALEM`. +The full target list is in the file `TargetList.txt`. ### Cross compile -Please set CC and FC with the cross toolchains. Then, set HOSTCC with your host C compiler. At last, set TARGET explicitly. + +Set `CC` and `FC` to point to the cross toolchains, and set `HOSTCC` to your host C compiler. +The target must be specified explicitly when cross compiling. Examples: -On X86 box, compile this library for loongson3a CPU. +* On an x86 box, compile this library for a loongson3a CPU: + ```sh + make BINARY=64 CC=mips64el-unknown-linux-gnu-gcc FC=mips64el-unknown-linux-gnu-gfortran HOSTCC=gcc TARGET=LOONGSON3A + ``` - make BINARY=64 CC=mips64el-unknown-linux-gnu-gcc FC=mips64el-unknown-linux-gnu-gfortran HOSTCC=gcc TARGET=LOONGSON3A - -On X86 box, compile this library for loongson3a CPU with loongcc (based on Open64) compiler. - - make CC=loongcc FC=loongf95 HOSTCC=gcc TARGET=LOONGSON3A CROSS=1 CROSS_SUFFIX=mips64el-st-linux-gnu- NO_LAPACKE=1 NO_SHARED=1 BINARY=32 +* On an x86 box, compile this library for a loongson3a CPU with loongcc (based on Open64) compiler: + ```sh + make CC=loongcc FC=loongf95 HOSTCC=gcc TARGET=LOONGSON3A CROSS=1 CROSS_SUFFIX=mips64el-st-linux-gnu- NO_LAPACKE=1 NO_SHARED=1 BINARY=32 + ``` ### Debug version - make DEBUG=1 +A debug version can be built using `make DEBUG=1`. -### Compile with MASS Support on Power CPU (Optional dependency) +### Compile with MASS support on Power CPU (optional) -[IBM MASS](http://www-01.ibm.com/software/awdtools/mass/linux/mass-linux.html) library consists of a set of mathematical functions for C, C++, and -Fortran-language applications that are tuned for optimum performance on POWER architectures. OpenBLAS with MASS requires 64-bit, little-endian OS on POWER. -The library can be installed as below - +The [IBM MASS](http://www-01.ibm.com/software/awdtools/mass/linux/mass-linux.html) library +consists of a set of mathematical functions for C, C++, and Fortran applications that are +are tuned for optimum performance on POWER architectures. +OpenBLAS with MASS requires a 64-bit, little-endian OS on POWER. +The library can be installed as shown: - * On Ubuntu: +* On Ubuntu: + ```sh + wget -q http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/ubuntu/public.gpg -O- | sudo apt-key add - + echo "deb http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/ubuntu/ trusty main" | sudo tee /etc/apt/sources.list.d/ibm-xl-compiler-eval.list + sudo apt-get update + sudo apt-get install libxlmass-devel.8.1.5 + ``` - wget -q http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/ubuntu/public.gpg -O- | sudo apt-key add -
- echo "deb http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/ubuntu/ trusty main" | sudo tee /etc/apt/sources.list.d/ibm-xl-compiler-eval.list
- sudo apt-get update
- sudo apt-get install libxlmass-devel.8.1.5
+* On RHEL/CentOS: + ```sh + wget http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/rhel7/repodata/repomd.xml.key + sudo rpm --import repomd.xml.key + wget http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/rhel7/ibm-xl-compiler-eval.repo + sudo cp ibm-xl-compiler-eval.repo /etc/yum.repos.d/ + sudo yum install libxlmass-devel.8.1.5 + ``` - * On RHEL/CentOS: +After installing the MASS library, compile OpenBLAS with `USE_MASS=1`. +For example, to compile on Power8 with MASS support: `make USE_MASS=1 TARGET=POWER8`. - wget http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/rhel7/repodata/repomd.xml.key
- sudo rpm --import repomd.xml.key
- wget http://public.dhe.ibm.com/software/server/POWER/Linux/xl-compiler/eval/ppc64le/rhel7/ibm-xl-compiler-eval.repo
- sudo cp ibm-xl-compiler-eval.repo /etc/yum.repos.d/
- sudo yum install libxlmass-devel.8.1.5
+### Install to a specific directory (optional) -After installing MASS library, compile openblas with USE_MASS=1. +Use `PREFIX=` when invoking `make`, for example -Example: +```sh +make install PREFIX=your_installation_directory +``` -Compiling on Power8 with MASS support - +The default installation directory is `/opt/OpenBLAS`. - make USE_MASS=1 TARGET=POWER8 +## Supported CPUs and Operating Systems -### Install to the directory (optional) +Please read `GotoBLAS_01Readme.txt`. -Example: +### Additional supported CPUs - make install PREFIX=your_installation_directory +#### x86/x86-64 -The default directory is /opt/OpenBLAS - -## Support CPU & OS -Please read GotoBLAS_01Readme.txt - -### Additional support CPU: - -#### x86/x86-64: - **Intel Xeon 56xx (Westmere)**: Used GotoBLAS2 Nehalem codes. - **Intel Sandy Bridge**: Optimized Level-3 and Level-2 BLAS with AVX on x86-64. - **Intel Haswell**: Optimized Level-3 and Level-2 BLAS with AVX2 and FMA on x86-64. +- **Intel Skylake**: Optimized Level-3 and Level-2 BLAS with AVX512 and FMA on x86-64. - **AMD Bobcat**: Used GotoBLAS2 Barcelona codes. -- **AMD Bulldozer**: x86-64 ?GEMM FMA4 kernels. (Thank Werner Saar) +- **AMD Bulldozer**: x86-64 ?GEMM FMA4 kernels. (Thanks to Werner Saar) - **AMD PILEDRIVER**: Uses Bulldozer codes with some optimizations. - **AMD STEAMROLLER**: Uses Bulldozer codes with some optimizations. -#### MIPS64: +#### MIPS64 + - **ICT Loongson 3A**: Optimized Level-3 BLAS and the part of Level-1,2. - **ICT Loongson 3B**: Experimental -#### ARM: -- **ARMV6**: Optimized BLAS for vfpv2 and vfpv3-d16 ( e.g. BCM2835, Cortex M0+ ) -- **ARMV7**: Optimized BLAS for vfpv3-d32 ( e.g. Cortex A8, A9 and A15 ) +#### ARM -#### ARM64: -- **ARMV8**: Experimental +- **ARMv6**: Optimized BLAS for vfpv2 and vfpv3-d16 (e.g. BCM2835, Cortex M0+) +- **ARMv7**: Optimized BLAS for vfpv3-d32 (e.g. Cortex A8, A9 and A15) + +#### ARM64 + +- **ARMv8**: Experimental - **ARM Cortex-A57**: Experimental #### PPC/PPC64 -- **POWER8**: Optmized Level-3 BLAS and some Level-1, only with USE_OPENMP=1 -#### IBM zEnterprise System: +- **POWER8**: Optmized Level-3 BLAS and some Level-1, only with `USE_OPENMP=1` + +#### IBM zEnterprise System + - **Z13**: Optimized Level-3 BLAS and Level-1,2 (double precision) - -### Support OS: +### Supported OS + - **GNU/Linux** -- **MingWin or Visual Studio(CMake)/Windows**: Please read . -- **Darwin/Mac OS X**: Experimental. Although GotoBLAS2 supports Darwin, we are the beginner on Mac OS X. -- **FreeBSD**: Supported by community. We didn't test the library on this OS. -- **Android**: Supported by community. Please read . +- **MinGW or Visual Studio (CMake)/Windows**: Please read . +- **Darwin/macOS**: Experimental. Although GotoBLAS2 supports Darwin, we are not macOS experts. +- **FreeBSD**: Supported by the community. We don't actively test the library on this OS. +- **OpenBSD**: Supported by the community. We don't actively test the library on this OS. +- **DragonFly BSD**: Supported by the community. We don't actively test the library on this OS. +- **Android**: Supported by the community. Please read . -## Usages -Link with libopenblas.a or -lopenblas for shared library. +## Usage -### Set the number of threads with environment variables. +Statically link with `libopenblas.a` or dynamically link with `-lopenblas` if OpenBLAS was +compiled as a shared library. -Examples: +### Setting the number of threads using environment variables - export OPENBLAS_NUM_THREADS=4 +Environment variables are used to specify a maximum number of threads. +For example, - or +```sh +export OPENBLAS_NUM_THREADS=4 +export GOTO_NUM_THREADS=4 +export OMP_NUM_THREADS=4 +``` - export GOTO_NUM_THREADS=4 +The priorities are `OPENBLAS_NUM_THREADS` > `GOTO_NUM_THREADS` > `OMP_NUM_THREADS`. - or +If you compile this library with `USE_OPENMP=1`, you should set the `OMP_NUM_THREADS` +environment variable; OpenBLAS ignores `OPENBLAS_NUM_THREADS` and `GOTO_NUM_THREADS` when +compiled with `USE_OPENMP=1`. - export OMP_NUM_THREADS=4 +### Setting the number of threads at runtime -The priorities are OPENBLAS_NUM_THREADS > GOTO_NUM_THREADS > OMP_NUM_THREADS. +We provide the following functions to control the number of threads at runtime: -If you compile this lib with USE_OPENMP=1, you should set OMP_NUM_THREADS environment variable. OpenBLAS ignores OPENBLAS_NUM_THREADS and GOTO_NUM_THREADS with USE_OPENMP=1. +```c +void goto_set_num_threads(int num_threads); +void openblas_set_num_threads(int num_threads); +``` -### Set the number of threads on runtime. +If you compile this library with `USE_OPENMP=1`, you should use the above functions too. -We provided the below functions to control the number of threads on runtime. +## Reporting bugs - void goto_set_num_threads(int num_threads); - - void openblas_set_num_threads(int num_threads); - -If you compile this lib with USE_OPENMP=1, you should use the above functions, too. - -## Report Bugs -Please add a issue in https://github.com/xianyi/OpenBLAS/issues +Please submit an issue in https://github.com/xianyi/OpenBLAS/issues. ## Contact + * OpenBLAS users mailing list: https://groups.google.com/forum/#!forum/openblas-users * OpenBLAS developers mailing list: https://groups.google.com/forum/#!forum/openblas-dev -## ChangeLog -Please see Changelog.txt to obtain the differences between GotoBLAS2 1.13 BSD version. +## Change log + +Please see Changelog.txt to view the differences between OpenBLAS and GotoBLAS2 1.13 BSD version. ## Troubleshooting -* Please read [Faq](https://github.com/xianyi/OpenBLAS/wiki/Faq) at first. -* Please use gcc version 4.6 and above to compile Sandy Bridge AVX kernels on Linux/MingW/BSD. -* Please use Clang version 3.1 and above to compile the library on Sandy Bridge microarchitecture. The Clang 3.0 will generate the wrong AVX binary code. -* The number of CPUs/Cores should less than or equal to 256. On Linux x86_64(amd64), there is experimental support for up to 1024 CPUs/Cores and 128 numa nodes if you build the library with BIGNUMA=1. -* OpenBLAS does not set processor affinity by default. On Linux, you can enable processor affinity by commenting the line NO_AFFINITY=1 in Makefile.rule. But this may cause [the conflict with R parallel](https://stat.ethz.ch/pipermail/r-sig-hpc/2012-April/001348.html). -* On Loongson 3A. make test would be failed because of pthread_create error. The error code is EAGAIN. However, it will be OK when you run the same testcase on shell. + +* Please read the [FAQ](https://github.com/xianyi/OpenBLAS/wiki/Faq) first. +* Please use GCC version 4.6 and above to compile Sandy Bridge AVX kernels on Linux/MinGW/BSD. +* Please use Clang version 3.1 and above to compile the library on Sandy Bridge microarchitecture. + Clang 3.0 will generate the wrong AVX binary code. +* Please use GCC version 6 or LLVM version 6 and above to compile Skylake AVX512 kernels. +* The number of CPUs/cores should less than or equal to 256. On Linux `x86_64` (`amd64`), + there is experimental support for up to 1024 CPUs/cores and 128 numa nodes if you build + the library with `BIGNUMA=1`. +* OpenBLAS does not set processor affinity by default. + On Linux, you can enable processor affinity by commenting out the line `NO_AFFINITY=1` in + Makefile.rule. However, note that this may cause + [a conflict with R parallel](https://stat.ethz.ch/pipermail/r-sig-hpc/2012-April/001348.html). +* On Loongson 3A, `make test` may fail with a `pthread_create` error (`EAGAIN`). + However, it will be okay when you run the same test case on the shell. ## Contributing -1. [Check for open issues](https://github.com/xianyi/OpenBLAS/issues) or open a fresh issue to start a discussion around a feature idea or a bug. -1. Fork the [OpenBLAS](https://github.com/xianyi/OpenBLAS) repository to start making your changes. -1. Write a test which shows that the bug was fixed or that the feature works as expected. -1. Send a pull request. Make sure to add yourself to `CONTRIBUTORS.md`. + +1. [Check for open issues](https://github.com/xianyi/OpenBLAS/issues) or open a fresh issue + to start a discussion around a feature idea or a bug. +2. Fork the [OpenBLAS](https://github.com/xianyi/OpenBLAS) repository to start making your changes. +3. Write a test which shows that the bug was fixed or that the feature works as expected. +4. Send a pull request. Make sure to add yourself to `CONTRIBUTORS.md`. ## Donation + Please read [this wiki page](https://github.com/xianyi/OpenBLAS/wiki/Donation). diff --git a/TargetList.txt b/TargetList.txt index d40545cf8..3d04a57cf 100644 --- a/TargetList.txt +++ b/TargetList.txt @@ -20,6 +20,7 @@ DUNNINGTON NEHALEM SANDYBRIDGE HASWELL +SKYLAKEX ATOM b)AMD CPU: @@ -56,6 +57,7 @@ CELL 3.MIPS CPU: P5600 +1004K 4.MIPS64 CPU: SICORTEX @@ -81,8 +83,11 @@ ARMV5 8.ARM 64-bit CPU: ARMV8 +CORTEXA53 CORTEXA57 -VULCAN +CORTEXA72 +CORTEXA73 +FALKOR THUNDERX THUNDERX2T99 diff --git a/USAGE.md b/USAGE.md index c76ceb324..89f3bba67 100644 --- a/USAGE.md +++ b/USAGE.md @@ -14,6 +14,20 @@ Please build OpenBLAS with larger `NUM_THREADS`. For example, `make NUM_THREADS=32` or `make NUM_THREADS=64`. In `Makefile.system`, we will set `MAX_CPU_NUMBER=NUM_THREADS`. +Despite its name, and due to the use of memory buffers in functions like SGEMM, +the setting of NUM_THREADS can be relevant even for a single-threaded build +of OpenBLAS, if such functions get called by multiple threads of a program +that uses OpenBLAS. In some cases, the affected code may simply crash or throw +a segmentation fault without displaying the above warning first. + +Note that the number of threads used at runtime can be altered to differ from the +value NUM_THREADS was set to at build time. At runtime, the actual number of +threads can be set anywhere from 1 to the build's NUM_THREADS (note however, +that this does not change the number of memory buffers that will be allocated, +which is set at build time). The number of threads for a process can be set by +using the mechanisms described below. + + #### How can I use OpenBLAS in multi-threaded applications? If your application is already multi-threaded, it will conflict with OpenBLAS diff --git a/benchmark/gemm.c b/benchmark/gemm.c index 809813c92..85bcbc710 100644 --- a/benchmark/gemm.c +++ b/benchmark/gemm.c @@ -237,7 +237,7 @@ int main(int argc, char *argv[]){ timeg = time1/loops; fprintf(stderr, " %10.2f MFlops %10.6f sec\n", - COMPSIZE * COMPSIZE * 2. * (double)m * (double)m * (double)n / timeg * 1.e-6, time1); + COMPSIZE * COMPSIZE * 2. * (double)k * (double)m * (double)n / timeg * 1.e-6, time1); } diff --git a/benchmark/gemv.c b/benchmark/gemv.c index c06e829d9..b6a42f42f 100644 --- a/benchmark/gemv.c +++ b/benchmark/gemv.c @@ -122,7 +122,7 @@ int main(int argc, char *argv[]){ FLOAT *a, *x, *y; FLOAT alpha[] = {1.0, 1.0}; - FLOAT beta [] = {1.0, 1.0}; + FLOAT beta [] = {1.0, 0.0}; char trans='N'; blasint m, i, j; blasint inc_x=1,inc_y=1; diff --git a/c_check b/c_check index 20da288be..9dc237beb 100644 --- a/c_check +++ b/c_check @@ -54,6 +54,8 @@ $compiler = GCC if ($compiler eq ""); $os = Linux if ($data =~ /OS_LINUX/); $os = FreeBSD if ($data =~ /OS_FREEBSD/); $os = NetBSD if ($data =~ /OS_NETBSD/); +$os = OpenBSD if ($data =~ /OS_OPENBSD/); +$os = DragonFly if ($data =~ /OS_DRAGONFLY/); $os = Darwin if ($data =~ /OS_DARWIN/); $os = SunOS if ($data =~ /OS_SUNOS/); $os = AIX if ($data =~ /OS_AIX/); @@ -62,6 +64,7 @@ $os = WINNT if ($data =~ /OS_WINNT/); $os = CYGWIN_NT if ($data =~ /OS_CYGWIN_NT/); $os = Interix if ($data =~ /OS_INTERIX/); $os = Android if ($data =~ /OS_ANDROID/); +$os = Haiku if ($data =~ /OS_HAIKU/); $architecture = x86 if ($data =~ /ARCH_X86/); $architecture = x86_64 if ($data =~ /ARCH_X86_64/); @@ -199,6 +202,21 @@ $architecture = zarch if ($data =~ /ARCH_ZARCH/); $binformat = bin32; $binformat = bin64 if ($data =~ /BINARY_64/); +$no_avx512= 0; +if (($architecture eq "x86") || ($architecture eq "x86_64")) { + $code = '"vbroadcastss -4 * 4(%rsi), %zmm2"'; + print $tmpf "#include \n\nint main(void){ __asm__ volatile($code); }\n"; + $args = " -march=skylake-avx512 -o $tmpf.o -x c $tmpf"; + my @cmd = ("$compiler_name $args >/dev/null 2>/dev/null"); + system(@cmd) == 0; + if ($? != 0) { + $no_avx512 = 1; + } else { + $no_avx512 = 0; + } + unlink("tmpf.o"); +} + $data = `$compiler_name -S ctest1.c && grep globl ctest1.s | head -n 1 && rm -f ctest1.s`; $data =~ /globl\s([_\.]*)(.*)/; @@ -206,7 +224,6 @@ $data =~ /globl\s([_\.]*)(.*)/; $need_fu = $1; $cross = 0; -$cross = 1 if ($os ne $hostos); if ($architecture ne $hostarch) { $cross = 1; @@ -214,6 +231,8 @@ if ($architecture ne $hostarch) { $cross = 0 if (($hostarch eq "mips64") && ($architecture eq "mips")); } +$cross = 1 if ($os ne $hostos); + $openmp = "" if $ENV{USE_OPENMP} != 1; $linker_L = ""; @@ -286,6 +305,7 @@ print MAKEFILE "CROSS=1\n" if $cross != 0; print MAKEFILE "CEXTRALIB=$linker_L $linker_l $linker_a\n"; print MAKEFILE "HAVE_MSA=1\n" if $have_msa eq 1; print MAKEFILE "MSA_FLAGS=$msa_flags\n" if $have_msa eq 1; +print MAKEFILE "NO_AVX512=1\n" if $no_avx512 eq 1; $os =~ tr/[a-z]/[A-Z]/; $architecture =~ tr/[a-z]/[A-Z]/; diff --git a/cblas.h b/cblas.h index 89f78c133..d340a2037 100644 --- a/cblas.h +++ b/cblas.h @@ -51,7 +51,8 @@ typedef enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=1 typedef enum CBLAS_UPLO {CblasUpper=121, CblasLower=122} CBLAS_UPLO; typedef enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132} CBLAS_DIAG; typedef enum CBLAS_SIDE {CblasLeft=141, CblasRight=142} CBLAS_SIDE; - +typedef CBLAS_ORDER CBLAS_LAYOUT; + float cblas_sdsdot(OPENBLAS_CONST blasint n, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); double cblas_dsdot (OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); float cblas_sdot(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); @@ -82,6 +83,11 @@ CBLAS_INDEX cblas_idamax(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPE CBLAS_INDEX cblas_icamax(OPENBLAS_CONST blasint n, OPENBLAS_CONST void *x, OPENBLAS_CONST blasint incx); CBLAS_INDEX cblas_izamax(OPENBLAS_CONST blasint n, OPENBLAS_CONST void *x, OPENBLAS_CONST blasint incx); +CBLAS_INDEX cblas_isamin(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx); +CBLAS_INDEX cblas_idamin(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx); +CBLAS_INDEX cblas_icamin(OPENBLAS_CONST blasint n, OPENBLAS_CONST void *x, OPENBLAS_CONST blasint incx); +CBLAS_INDEX cblas_izamin(OPENBLAS_CONST blasint n, OPENBLAS_CONST void *x, OPENBLAS_CONST blasint incx); + void cblas_saxpy(OPENBLAS_CONST blasint n, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, float *y, OPENBLAS_CONST blasint incy); void cblas_daxpy(OPENBLAS_CONST blasint n, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, double *y, OPENBLAS_CONST blasint incy); void cblas_caxpy(OPENBLAS_CONST blasint n, OPENBLAS_CONST void *alpha, OPENBLAS_CONST void *x, OPENBLAS_CONST blasint incx, void *y, OPENBLAS_CONST blasint incy); diff --git a/cmake/OpenBLASConfig.cmake.in b/cmake/OpenBLASConfig.cmake.in new file mode 100644 index 000000000..87a1621b4 --- /dev/null +++ b/cmake/OpenBLASConfig.cmake.in @@ -0,0 +1,79 @@ +# OpenBLASConfig.cmake +# -------------------- +# +# OpenBLAS cmake module. +# This module sets the following variables in your project:: +# +# OpenBLAS_FOUND - true if OpenBLAS and all required components found on the system +# OpenBLAS_VERSION - OpenBLAS version in format Major.Minor.Release +# OpenBLAS_INCLUDE_DIRS - Directory where OpenBLAS header is located. +# OpenBLAS_INCLUDE_DIR - same as DIRS +# OpenBLAS_LIBRARIES - OpenBLAS library to link against. +# OpenBLAS_LIBRARY - same as LIBRARIES +# +# +# Available components:: +# +## shared - search for only shared library +## static - search for only static library +# serial - search for unthreaded library +# pthread - search for native pthread threaded library +# openmp - search for OpenMP threaded library +# +# +# Exported targets:: +# +# If OpenBLAS is found, this module defines the following :prop_tgt:`IMPORTED` +## target. Target is shared _or_ static, so, for both, use separate, not +## overlapping, installations. :: +# +# OpenBLAS::OpenBLAS - the main OpenBLAS library #with header & defs attached. +# +# +# Suggested usage:: +# +# find_package(OpenBLAS) +# find_package(OpenBLAS 0.2.20 EXACT CONFIG REQUIRED COMPONENTS pthread) +# +# +# The following variables can be set to guide the search for this package:: +# +# OpenBLAS_DIR - CMake variable, set to directory containing this Config file +# CMAKE_PREFIX_PATH - CMake variable, set to root directory of this package +# PATH - environment variable, set to bin directory of this package +# CMAKE_DISABLE_FIND_PACKAGE_OpenBLAS - CMake variable, disables +# find_package(OpenBLAS) when not REQUIRED, perhaps to force internal build + +@PACKAGE_INIT@ + +set(PN OpenBLAS) + +# need to check that the @USE_*@ evaluate to something cmake can perform boolean logic upon +if(@USE_OPENMP@) + set(${PN}_openmp_FOUND 1) +elseif(@USE_THREAD@) + set(${PN}_pthread_FOUND 1) +else() + set(${PN}_serial_FOUND 1) +endif() + +check_required_components(${PN}) + +#----------------------------------------------------------------------------- +# Don't include targets if this file is being picked up by another +# project which has already built this as a subproject +#----------------------------------------------------------------------------- +if(NOT TARGET ${PN}::OpenBLAS) + include("${CMAKE_CURRENT_LIST_DIR}/${PN}Targets.cmake") + + get_property(_loc TARGET ${PN}::OpenBLAS PROPERTY LOCATION) + set(${PN}_LIBRARY ${_loc}) + get_property(_ill TARGET ${PN}::OpenBLAS PROPERTY INTERFACE_LINK_LIBRARIES) + set(${PN}_LIBRARIES ${_ill}) + + get_property(_id TARGET ${PN}::OpenBLAS PROPERTY INCLUDE_DIRECTORIES) + set(${PN}_INCLUDE_DIR ${_id}) + get_property(_iid TARGET ${PN}::OpenBLAS PROPERTY INTERFACE_INCLUDE_DIRECTORIES) + set(${PN}_INCLUDE_DIRS ${_iid}) +endif() + diff --git a/cmake/arch.cmake b/cmake/arch.cmake index 798a9ef82..63fb86fa2 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -44,18 +44,36 @@ endif () if (DYNAMIC_ARCH) + if (ARM64) + set(DYNAMIC_CORE ARMV8 CORTEXA53 CORTEXA57 CORTEXA72 CORTEXA73 FALKOR THUNDERX THUNDERX2T99) + endif () + if (X86) set(DYNAMIC_CORE KATMAI COPPERMINE NORTHWOOD PRESCOTT BANIAS CORE2 PENRYN DUNNINGTON NEHALEM ATHLON OPTERON OPTERON_SSE3 BARCELONA BOBCAT ATOM NANO) endif () if (X86_64) - set(DYNAMIC_CORE PRESCOTT CORE2 PENRYN DUNNINGTON NEHALEM OPTERON OPTERON_SSE3 BARCELONA BOBCAT ATOM NANO) + set(DYNAMIC_CORE PRESCOTT CORE2) + if (DYNAMIC_OLDER) + set (DYNAMIC_CORE ${DYNAMIC_CORE} PENRYN DUNNINGTON) + endif () + set (DYNAMIC_CORE ${DYNAMIC_CORE} NEHALEM) + if (DYNAMIC_OLDER) + set (DYNAMIC_CORE ${DYNAMIC_CORE} OPTERON OPTERON_SSE3) + endif () + set (DYNAMIC_CORE ${DYNAMIC_CORE} BARCELONA) + if (DYNAMIC_OLDER) + set (DYNAMIC_CORE ${DYNAMIC_CORE} BOBCAT ATOM NANO) + endif () if (NOT NO_AVX) set(DYNAMIC_CORE ${DYNAMIC_CORE} SANDYBRIDGE BULLDOZER PILEDRIVER STEAMROLLER EXCAVATOR) endif () if (NOT NO_AVX2) set(DYNAMIC_CORE ${DYNAMIC_CORE} HASWELL ZEN) endif () + if (NOT NO_AVX512) + set(DYNAMIC_CORE ${DYNAMIC_CORE} SKYLAKEX) + endif () endif () if (NOT DYNAMIC_CORE) diff --git a/cmake/fc.cmake b/cmake/fc.cmake index 1446a900d..adec28a91 100644 --- a/cmake/fc.cmake +++ b/cmake/fc.cmake @@ -3,6 +3,11 @@ ## Description: Ported from portion of OpenBLAS/Makefile.system ## Sets Fortran related variables. +if (INTERFACE64) + set(SUFFIX64 64) + set(SUFFIX64_UNDERSCORE _64) +endif() + if (${F_COMPILER} STREQUAL "FLANG") set(CCOMMON_OPT "${CCOMMON_OPT} -DF_INTERFACE_FLANG") if (BINARY64 AND INTERFACE64) @@ -39,7 +44,7 @@ endif () if (${F_COMPILER} STREQUAL "GFORTRAN") set(CCOMMON_OPT "${CCOMMON_OPT} -DF_INTERFACE_GFORT") - set(FCOMMON_OPT "${FCOMMON_OPT} -Wall") + set(FCOMMON_OPT "${FCOMMON_OPT} -Wall -frecursive") #Don't include -lgfortran, when NO_LAPACK=1 or lsbcc if (NOT NO_LAPACK) set(EXTRALIB "{EXTRALIB} -lgfortran") diff --git a/cmake/openblas.pc.in b/cmake/openblas.pc.in index 113ba8526..df4b2ab06 100644 --- a/cmake/openblas.pc.in +++ b/cmake/openblas.pc.in @@ -1,9 +1,11 @@ libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +libsuffix=@SUFFIX64_UNDERSCORE@ includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ +openblas_config=USE_64BITINT=@USE_64BITINT@ NO_CBLAS=@NO_CBLAS@ NO_LAPACK=@NO_LAPACK@ NO_LAPACKE=@NO_LAPACKE@ DYNAMIC_ARCH=@DYNAMIC_ARCH@ DYNAMIC_OLDER=@DYNAMIC_OLDER@ NO_AFFINITY=@NO_AFFINITY@ USE_OPENMP=@USE_OPENMP@ @CORE@ MAX_THREADS=@NUM_THREADS@ Name: OpenBLAS Description: OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version Version: @OPENBLAS_VERSION@ URL: https://github.com/xianyi/OpenBLAS -Libs: -L${libdir} -lopenblas +Libs: -L${libdir} -lopenblas${libsuffix} Cflags: -I${includedir} diff --git a/cmake/prebuild.cmake b/cmake/prebuild.cmake index b783ef90d..a67c44bf5 100644 --- a/cmake/prebuild.cmake +++ b/cmake/prebuild.cmake @@ -85,15 +85,20 @@ if (NOT NOFORTRAN) endif () # Cannot run getarch on target if we are cross-compiling -if (DEFINED CORE AND CMAKE_CROSSCOMPILING) +if (DEFINED CORE AND CMAKE_CROSSCOMPILING AND NOT (${HOST_OS} STREQUAL "WINDOWSSTORE")) # Write to config as getarch would + if (DEFINED TARGET_CORE) + set(TCORE ${TARGET_CORE}) + else() + set(TCORE ${CORE}) + endif() # TODO: Set up defines that getarch sets up based on every other target # Perhaps this should be inside a different file as it grows larger file(APPEND ${TARGET_CONF_TEMP} - "#define ${CORE}\n" - "#define CHAR_CORENAME \"${CORE}\"\n") - if ("${CORE}" STREQUAL "ARMV7") + "#define ${TCORE}\n" + "#define CHAR_CORENAME \"${TCORE}\"\n") + if ("${TCORE}" STREQUAL "ARMV7") file(APPEND ${TARGET_CONF_TEMP} "#define L1_DATA_SIZE\t65536\n" "#define L1_DATA_LINESIZE\t32\n" @@ -108,7 +113,7 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING) set(SGEMM_UNROLL_N 4) set(DGEMM_UNROLL_M 4) set(DGEMM_UNROLL_N 4) - elseif ("${CORE}" STREQUAL "ARMV8") + elseif ("${TCORE}" STREQUAL "ARMV8") file(APPEND ${TARGET_CONF_TEMP} "#define L1_DATA_SIZE\t32768\n" "#define L1_DATA_LINESIZE\t64\n" @@ -116,18 +121,26 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING) "#define L2_LINESIZE\t64\n" "#define DTB_DEFAULT_ENTRIES\t64\n" "#define DTB_SIZE\t4096\n" - "#define L2_ASSOCIATIVE\t32\n") - set(SGEMM_UNROLL_M 4) + "#define L2_ASSOCIATIVE\t32\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 16) set(SGEMM_UNROLL_N 4) - elseif ("${CORE}" STREQUAL "CORTEXA57") + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "CORTEXA57" OR "${TCORE}" STREQUAL "CORTEXA53") file(APPEND ${TARGET_CONF_TEMP} - "#define L1_CODE_SIZE\t49152\n" + "#define L1_CODE_SIZE\t32768\n" "#define L1_CODE_LINESIZE\t64\n" "#define L1_CODE_ASSOCIATIVE\t3\n" "#define L1_DATA_SIZE\t32768\n" "#define L1_DATA_LINESIZE\t64\n" "#define L1_DATA_ASSOCIATIVE\t2\n" - "#define L2_SIZE\t2097152\n" + "#define L2_SIZE\t262144\n" "#define L2_LINESIZE\t64\n" "#define L2_ASSOCIATIVE\t16\n" "#define DTB_DEFAULT_ENTRIES\t64\n" @@ -135,15 +148,124 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING) "#define HAVE_VFPV4\n" "#define HAVE_VFPV3\n" "#define HAVE_VFP\n" - "#define HAVE_NEON\n") + "#define HAVE_NEON\n" + "#define ARMV8\n") set(SGEMM_UNROLL_M 16) set(SGEMM_UNROLL_N 4) set(DGEMM_UNROLL_M 8) set(DGEMM_UNROLL_N 4) set(CGEMM_UNROLL_M 8) set(CGEMM_UNROLL_N 4) - set(ZGEMM_UNROLL_M 8) + set(ZGEMM_UNROLL_M 4) set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "CORTEXA72" OR "${TCORE}" STREQUAL "CORTEXA73") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_CODE_SIZE\t49152\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t3\n" + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L1_DATA_ASSOCIATIVE\t2\n" + "#define L2_SIZE\t524288\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t16\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n" + "#define HAVE_VFPV4\n" + "#define HAVE_VFPV3\n" + "#define HAVE_VFP\n" + "#define HAVE_NEON\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "FALKOR") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_CODE_SIZE\t65536\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t3\n" + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t128\n" + "#define L1_DATA_ASSOCIATIVE\t2\n" + "#define L2_SIZE\t524288\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t16\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n" + "#define HAVE_VFPV4\n" + "#define HAVE_VFPV3\n" + "#define HAVE_VFP\n" + "#define HAVE_NEON\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "THUNDERX") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_CODE_SIZE\t32768\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t3\n" + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t128\n" + "#define L1_DATA_ASSOCIATIVE\t2\n" + "#define L2_SIZE\t167772164\n" + "#define L2_LINESIZE\t128\n" + "#define L2_ASSOCIATIVE\t16\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n" + "#define HAVE_VFPV4\n" + "#define HAVE_VFPV3\n" + "#define HAVE_VFP\n" + "#define HAVE_NEON\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 4) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 2) + set(DGEMM_UNROLL_N 2) + set(CGEMM_UNROLL_M 2) + set(CGEMM_UNROLL_N 2) + set(ZGEMM_UNROLL_M 2) + set(ZGEMM_UNROLL_N 2) + set(SYMV_P 16) + elseif ("${TCORE}" STREQUAL "THUNDERX2T99") + file(APPEND ${TARGET_CONF_TEMP} + "#define L1_CODE_SIZE\t32768\n" + "#define L1_CODE_LINESIZE\t64\n" + "#define L1_CODE_ASSOCIATIVE\t8\n" + "#define L1_DATA_SIZE\t32768\n" + "#define L1_DATA_LINESIZE\t64\n" + "#define L1_DATA_ASSOCIATIVE\t8\n" + "#define L2_SIZE\t262144\n" + "#define L2_LINESIZE\t64\n" + "#define L2_ASSOCIATIVE\t8\n" + "#define L3_SIZE\t33554432\n" + "#define L3_LINESIZE\t64\n" + "#define L3_ASSOCIATIVE\t32\n" + "#define DTB_DEFAULT_ENTRIES\t64\n" + "#define DTB_SIZE\t4096\n" + "#define ARMV8\n") + set(SGEMM_UNROLL_M 16) + set(SGEMM_UNROLL_N 4) + set(DGEMM_UNROLL_M 8) + set(DGEMM_UNROLL_N 4) + set(CGEMM_UNROLL_M 8) + set(CGEMM_UNROLL_N 4) + set(ZGEMM_UNROLL_M 4) + set(ZGEMM_UNROLL_N 4) + set(SYMV_P 16) endif() # Or should this actually be NUM_CORES? @@ -163,6 +285,7 @@ if (DEFINED CORE AND CMAKE_CROSSCOMPILING) file(APPEND ${TARGET_CONF_TEMP} "#define GEMM_MULTITHREAD_THRESHOLD\t${GEMM_MULTITHREAD_THRESHOLD}\n") # Move to where gen_config_h would place it + file(MAKE_DIRECTORY ${TARGET_CONF_DIR}) file(RENAME ${TARGET_CONF_TEMP} "${TARGET_CONF_DIR}/${TARGET_CONF}") else(NOT CMAKE_CROSSCOMPILING) diff --git a/cmake/system.cmake b/cmake/system.cmake index 064e7e4f2..a060d98cb 100644 --- a/cmake/system.cmake +++ b/cmake/system.cmake @@ -33,7 +33,7 @@ endif () if (DEFINED BINARY AND DEFINED TARGET AND BINARY EQUAL 32) message(STATUS "Compiling a ${BINARY}-bit binary.") set(NO_AVX 1) - if (${TARGET} STREQUAL "HASWELL" OR ${TARGET} STREQUAL "SANDYBRIDGE") + if (${TARGET} STREQUAL "HASWELL" OR ${TARGET} STREQUAL "SANDYBRIDGE" OR ${TARGET} STREQUAL "SKYLAKEX") set(TARGET "NEHALEM") endif () if (${TARGET} STREQUAL "BULLDOZER" OR ${TARGET} STREQUAL "PILEDRIVER" OR ${TARGET} STREQUAL "ZEN") @@ -41,6 +41,22 @@ if (DEFINED BINARY AND DEFINED TARGET AND BINARY EQUAL 32) endif () endif () +if (DEFINED TARGET) + if (${TARGET} STREQUAL "SKYLAKEX" AND NOT NO_AVX512) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512") + endif() + if (${TARGET} STREQUAL "HASWELL" AND NOT NO_AVX2) + if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU") + execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION) + if (${GCC_VERSION} VERSION_GREATER 4.7 OR ${GCC_VERSION} VERSION_EQUAL 4.7) + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") + endif() + elseif (${CMAKE_C_COMPILER_ID} STREQUAL "CLANG") + set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2") + endif() + endif() +endif() + if (DEFINED TARGET) message(STATUS "Targeting the ${TARGET} architecture.") set(GETARCH_FLAGS "-DFORCE_${TARGET}") @@ -96,8 +112,12 @@ if (NOT CMAKE_CROSSCOMPILING) endif() +if (NOT DEFINED NUM_PARALLEL) + set(NUM_PARALLEL 1) +endif() + if (NOT DEFINED NUM_THREADS) - if (NOT NUM_CORES EQUAL 0) + if (DEFINED NUM_CORES AND NOT NUM_CORES EQUAL 0) # HT? set(NUM_THREADS ${NUM_CORES}) else () @@ -159,6 +179,9 @@ endif () if (DYNAMIC_ARCH) set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_ARCH") + if (DYNAMIC_OLDER) + set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_OLDER") + endif () endif () if (NO_LAPACK) @@ -207,6 +230,10 @@ if (CONSISTENT_FPCSR) set(CCOMMON_OPT "${CCOMMON_OPT} -DCONSISTENT_FPCSR") endif () +if (USE_TLS) + set(CCOMMON_OPT "${CCOMMON_OPT} -DUSE_TLS") +endif () + # Only for development # set(CCOMMON_OPT "${CCOMMON_OPT} -DPARAMTEST") # set(CCOMMON_OPT "${CCOMMON_OPT} -DPREFETCHTEST") @@ -224,6 +251,8 @@ endif () set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_CPU_NUMBER=${NUM_THREADS}") +set(CCOMMON_OPT "${CCOMMON_OPT} -DMAX_PARALLEL_NUMBER=${NUM_PARALLEL}") + if (USE_SIMPLE_THREADED_LEVEL3) set(CCOMMON_OPT "${CCOMMON_OPT} -DUSE_SIMPLE_THREADED_LEVEL3") endif () @@ -291,6 +320,8 @@ if (MIXED_MEMORY_ALLOCATION) set(CCOMMON_OPT "${CCOMMON_OPT} -DMIXED_MEMORY_ALLOCATION") endif () +set(CCOMMON_OPT "${CCOMMON_OPT} -DVERSION=\"\\\"${OpenBLAS_VERSION}\\\"\"") + set(REVISION "-r${OpenBLAS_VERSION}") set(MAJOR_VERSION ${OpenBLAS_MAJOR_VERSION}) diff --git a/cmake/system_check.cmake b/cmake/system_check.cmake index d47c38cdd..6b602c1b0 100644 --- a/cmake/system_check.cmake +++ b/cmake/system_check.cmake @@ -10,6 +10,16 @@ if (${HOST_OS} STREQUAL "WINDOWS") set(HOST_OS WINNT) endif () +if (${HOST_OS} STREQUAL "LINUX") +# check if we're building natively on Android (TERMUX) + EXECUTE_PROCESS( COMMAND uname -o COMMAND tr -d '\n' OUTPUT_VARIABLE OPERATING_SYSTEM) + if(${OPERATING_SYSTEM} MATCHES "Android") + set(HOST_OS ANDROID) + endif(${OPERATING_SYSTEM} MATCHES "Android") +endif() + + + if(CMAKE_COMPILER_IS_GNUCC AND WIN32) execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpmachine OUTPUT_VARIABLE OPENBLAS_GCC_TARGET_MACHINE @@ -66,3 +76,12 @@ else() set(BINARY32 1) endif() +if (X86_64 OR X86) + file(WRITE ${PROJECT_BINARY_DIR}/avx512.tmp "#include \n\nint main(void){ __asm__ volatile(\"vbroadcastss -4 * 4(%rsi), %zmm2\"); }") +execute_process(COMMAND ${CMAKE_C_COMPILER} -march=skylake-avx512 -v -o ${PROJECT_BINARY_DIR}/avx512.o -x c ${PROJECT_BINARY_DIR}/avx512.tmp OUTPUT_QUIET ERROR_QUIET RESULT_VARIABLE NO_AVX512) +if (NO_AVX512 EQUAL 1) +set (CCOMMON_OPT "${CCOMMON_OPT} -DNO_AVX512") +endif() + file(REMOVE "avx512.tmp" "avx512.o") +endif() + diff --git a/common.h b/common.h index ae98279ef..7fcd5e316 100644 --- a/common.h +++ b/common.h @@ -93,7 +93,7 @@ extern "C" { #include #endif -#if defined(OS_DARWIN) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(OS_ANDROID) +#if defined(OS_DARWIN) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_ANDROID) #include #endif @@ -105,6 +105,10 @@ extern "C" { #endif #endif +#ifdef OS_HAIKU +#define NO_SYSV_IPC +#endif + #ifdef OS_WINDOWS #ifdef ATOM #define GOTO_ATOM ATOM @@ -179,7 +183,7 @@ extern "C" { #define ALLOCA_ALIGN 63UL -#define NUM_BUFFERS (MAX_CPU_NUMBER * 2) +#define NUM_BUFFERS MAX(50,(MAX_CPU_NUMBER * 2 * MAX_PARALLEL_NUMBER)) #ifdef NEEDBUNDERSCORE #define BLASFUNC(FUNC) FUNC##_ @@ -253,8 +257,14 @@ typedef unsigned long BLASULONG; #ifdef USE64BITINT typedef BLASLONG blasint; +#if defined(OS_WINDOWS) && defined(__64BIT__) +#define blasabs(x) llabs(x) +#else +#define blasabs(x) labs(x) +#endif #else typedef int blasint; +#define blasabs(x) abs(x) #endif #else #ifdef USE64BITINT @@ -642,6 +652,7 @@ void gotoblas_profile_init(void); void gotoblas_profile_quit(void); #ifdef USE_OPENMP + #ifndef C_MSVC int omp_in_parallel(void); int omp_get_num_procs(void); @@ -649,6 +660,21 @@ int omp_get_num_procs(void); __declspec(dllimport) int __cdecl omp_in_parallel(void); __declspec(dllimport) int __cdecl omp_get_num_procs(void); #endif + +#if (__STDC_VERSION__ >= 201112L) +#if defined(C_GCC) && ( __GNUC__ < 7) +// workaround for GCC bug 65467 +#ifndef _Atomic +#define _Atomic volatile +#endif +#endif +#include +#else +#ifndef _Atomic +#define _Atomic volatile +#endif +#endif + #else #ifdef __ELF__ int omp_in_parallel (void) __attribute__ ((weak)); diff --git a/common_level3.h b/common_level3.h index 1f5490baa..6fa902be8 100644 --- a/common_level3.h +++ b/common_level3.h @@ -47,6 +47,14 @@ __global__ void cuda_dgemm_kernel(int, int, int, double *, double *, double *); extern "C" { #endif +extern void sgemm_kernel_direct(BLASLONG M, BLASLONG N, BLASLONG K, + float * A, BLASLONG strideA, + float * B, BLASLONG strideB, + float * R, BLASLONG strideR); + +extern int sgemm_kernel_direct_performant(BLASLONG M, BLASLONG N, BLASLONG K); + + int sgemm_beta(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG); int dgemm_beta(BLASLONG, BLASLONG, BLASLONG, double, diff --git a/common_mips64.h b/common_mips64.h index 93bc7e519..1163413dc 100644 --- a/common_mips64.h +++ b/common_mips64.h @@ -94,7 +94,7 @@ static inline unsigned int rpcc(void){ #define RPCC_DEFINED #ifndef NO_AFFINITY -#define WHEREAMI +//#define WHEREAMI static inline int WhereAmI(void){ int ret=0; __asm__ __volatile__(".set push \n" diff --git a/common_stackalloc.h b/common_stackalloc.h index 71fb1a477..ec0fa1611 100644 --- a/common_stackalloc.h +++ b/common_stackalloc.h @@ -47,14 +47,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - large enough to support all architectures and kernel * Chosing a too small SIZE will lead to a stack smashing. */ -#define STACK_ALLOC(SIZE, TYPE, BUFFER) \ - /* make it volatile because some function (ex: dgemv_n.S) */ \ - /* do not restore all register */ \ - volatile int stack_alloc_size = SIZE; \ - if(stack_alloc_size > MAX_STACK_ALLOC / sizeof(TYPE)) \ - stack_alloc_size = 0; \ - STACK_ALLOC_PROTECT_SET \ - TYPE stack_buffer[stack_alloc_size] __attribute__((aligned(0x20))); \ +#define STACK_ALLOC(SIZE, TYPE, BUFFER) \ + /* make it volatile because some function (ex: dgemv_n.S) */ \ + /* do not restore all register */ \ + volatile int stack_alloc_size = SIZE; \ + if (stack_alloc_size > MAX_STACK_ALLOC / sizeof(TYPE)) stack_alloc_size = 0; \ + STACK_ALLOC_PROTECT_SET \ + /* Avoid declaring an array of length 0 */ \ + TYPE stack_buffer[stack_alloc_size ? stack_alloc_size : 1] \ + __attribute__((aligned(0x20))); \ BUFFER = stack_alloc_size ? stack_buffer : (TYPE *)blas_memory_alloc(1); #else //Original OpenBLAS/GotoBLAS codes. diff --git a/common_x86.h b/common_x86.h index 4363fb2f4..4f538c948 100644 --- a/common_x86.h +++ b/common_x86.h @@ -178,7 +178,13 @@ static __inline int blas_quickdivide(unsigned int x, unsigned int y){ result = x/y; return result; #else - +#if (MAX_CPU_NUMBER > 64) + if ( y > 64) { + result = x/y; + return result; + } +#endif + y = blas_quick_divide_table[y]; __asm__ __volatile__ ("mull %0" :"=d" (result) :"a"(x), "0" (y)); @@ -327,7 +333,7 @@ REALNAME: #endif #endif -#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(__ELF__) +#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(__ELF__) #define PROLOGUE \ .text; \ .align 16; \ diff --git a/common_x86_64.h b/common_x86_64.h index bee88d3ce..f27c1e9be 100644 --- a/common_x86_64.h +++ b/common_x86_64.h @@ -60,8 +60,13 @@ #endif */ -#define MB -#define WMB +#ifdef __GNUC__ +#define MB do { __asm__ __volatile__("": : :"memory"); } while (0) +#define WMB do { __asm__ __volatile__("": : :"memory"); } while (0) +#else +#define MB do {} while (0) +#define WMB do {} while (0) +#endif static void __inline blas_lock(volatile BLASULONG *address){ @@ -129,7 +134,7 @@ static __inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx){ "=b" (*ebx), "=c" (*ecx), "=d" (*edx) - : "0" (op)); + : "0" (op), "c"(0)); #endif } @@ -196,6 +201,13 @@ static __inline int blas_quickdivide(unsigned int x, unsigned int y){ if (y <= 1) return x; +#if (MAX_CPU_NUMBER > 64) + if (y > 64) { + result = x / y; + return result; + } +#endif + y = blas_quick_divide_table[y]; __asm__ __volatile__ ("mull %0" :"=d" (result) :"a"(x), "0" (y)); @@ -403,7 +415,7 @@ REALNAME: #define EPILOGUE .end #endif -#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(__ELF__) || defined(C_PGI) +#if defined(OS_LINUX) || defined(OS_FREEBSD) || defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(__ELF__) || defined(C_PGI) #define PROLOGUE \ .text; \ .align 512; \ diff --git a/cpuid.h b/cpuid.h index 1dacc49ba..697f43133 100644 --- a/cpuid.h +++ b/cpuid.h @@ -53,6 +53,7 @@ #define VENDOR_SIS 8 #define VENDOR_TRANSMETA 9 #define VENDOR_NSC 10 +#define VENDOR_HYGON 11 #define VENDOR_UNKNOWN 99 #define BITMASK(a, b, c) ((((a) >> (b)) & (c))) @@ -115,6 +116,8 @@ #define CORE_STEAMROLLER 25 #define CORE_EXCAVATOR 26 #define CORE_ZEN 27 +#define CORE_SKYLAKEX 28 +#define CORE_DHYANA 29 #define HAVE_SSE (1 << 0) #define HAVE_SSE2 (1 << 1) @@ -137,6 +140,8 @@ #define HAVE_AVX (1 << 18) #define HAVE_FMA4 (1 << 19) #define HAVE_FMA3 (1 << 20) +#define HAVE_AVX512VL (1 << 21) +#define HAVE_AVX2 (1 << 22) #define CACHE_INFO_L1_I 1 #define CACHE_INFO_L1_D 2 @@ -211,5 +216,9 @@ typedef struct { #define CPUTYPE_STEAMROLLER 49 #define CPUTYPE_EXCAVATOR 50 #define CPUTYPE_ZEN 51 +#define CPUTYPE_SKYLAKEX 52 +#define CPUTYPE_DHYANA 53 + +#define CPUTYPE_HYGON_UNKNOWN 54 #endif diff --git a/cpuid_arm.c b/cpuid_arm.c index 2f8959242..19aa90718 100644 --- a/cpuid_arm.c +++ b/cpuid_arm.c @@ -34,7 +34,7 @@ #define CPU_CORTEXA15 4 static char *cpuname[] = { - "UNKOWN", + "UNKNOWN", "ARMV6", "ARMV7", "CORTEXA9", diff --git a/cpuid_arm64.c b/cpuid_arm64.c index 3a7aec7be..5077d7b11 100644 --- a/cpuid_arm64.c +++ b/cpuid_arm64.c @@ -29,25 +29,37 @@ #define CPU_UNKNOWN 0 #define CPU_ARMV8 1 -#define CPU_CORTEXA57 2 -#define CPU_VULCAN 3 -#define CPU_THUNDERX 4 -#define CPU_THUNDERX2T99 5 +// Arm +#define CPU_CORTEXA53 2 +#define CPU_CORTEXA57 3 +#define CPU_CORTEXA72 4 +#define CPU_CORTEXA73 5 +// Qualcomm +#define CPU_FALKOR 6 +// Cavium +#define CPU_THUNDERX 7 +#define CPU_THUNDERX2T99 8 static char *cpuname[] = { "UNKNOWN", "ARMV8" , + "CORTEXA53", "CORTEXA57", - "VULCAN", + "CORTEXA72", + "CORTEXA73", + "FALKOR", "THUNDERX", "THUNDERX2T99" }; static char *cpuname_lower[] = { "unknown", - "armv8" , + "armv8", + "cortexa53", "cortexa57", - "vulcan", + "cortexa72", + "cortexa73", + "falkor", "thunderx", "thunderx2t99" }; @@ -114,13 +126,24 @@ int detect(void) fclose(infile); if(cpu_part != NULL && cpu_implementer != NULL) { - if (strstr(cpu_part, "0xd07") && strstr(cpu_implementer, "0x41")) - return CPU_CORTEXA57; - else if (strstr(cpu_part, "0x516") && strstr(cpu_implementer, "0x42")) - return CPU_VULCAN; - else if (strstr(cpu_part, "0x0a1") && strstr(cpu_implementer, "0x43")) + // Arm + if (strstr(cpu_implementer, "0x41")) { + if (strstr(cpu_part, "0xd03")) + return CPU_CORTEXA53; + else if (strstr(cpu_part, "0xd07")) + return CPU_CORTEXA57; + else if (strstr(cpu_part, "0xd08")) + return CPU_CORTEXA72; + else if (strstr(cpu_part, "0xd09")) + return CPU_CORTEXA73; + } + // Qualcomm + else if (strstr(cpu_implementer, "0x51") && strstr(cpu_part, "0xc00")) + return CPU_FALKOR; + // Cavium + else if (strstr(cpu_implementer, "0x43") && strstr(cpu_part, "0x0a1")) return CPU_THUNDERX; - else if (strstr(cpu_part, "0xFFF") && strstr(cpu_implementer, "0x43")) /* TODO */ + else if (strstr(cpu_implementer, "0x43") && strstr(cpu_part, "0x0af")) return CPU_THUNDERX2T99; } @@ -179,64 +202,63 @@ void get_subdirname(void) void get_cpuconfig(void) { + // All arches should define ARMv8 + printf("#define ARMV8\n"); + printf("#define HAVE_NEON\n"); // This shouldn't be necessary + printf("#define HAVE_VFPV4\n"); // This shouldn't be necessary + int d = detect(); switch (d) { + case CPU_CORTEXA53: + printf("#define %s\n", cpuname[d]); + // Fall-through case CPU_ARMV8: - printf("#define ARMV8\n"); - printf("#define L1_DATA_SIZE 32768\n"); - printf("#define L1_DATA_LINESIZE 64\n"); - printf("#define L2_SIZE 262144\n"); - printf("#define L2_LINESIZE 64\n"); - printf("#define DTB_DEFAULT_ENTRIES 64\n"); - printf("#define DTB_SIZE 4096\n"); - printf("#define L2_ASSOCIATIVE 4\n"); - break; - - case CPU_VULCAN: - printf("#define VULCAN \n"); - printf("#define HAVE_VFP \n"); - printf("#define HAVE_VFPV3 \n"); - printf("#define HAVE_NEON \n"); - printf("#define HAVE_VFPV4 \n"); - printf("#define L1_CODE_SIZE 32768 \n"); - printf("#define L1_CODE_LINESIZE 64 \n"); - printf("#define L1_CODE_ASSOCIATIVE 8 \n"); - printf("#define L1_DATA_SIZE 32768 \n"); - printf("#define L1_DATA_LINESIZE 64 \n"); - printf("#define L1_DATA_ASSOCIATIVE 8 \n"); - printf("#define L2_SIZE 262144 \n"); - printf("#define L2_LINESIZE 64 \n"); - printf("#define L2_ASSOCIATIVE 8 \n"); - printf("#define L3_SIZE 33554432 \n"); - printf("#define L3_LINESIZE 64 \n"); - printf("#define L3_ASSOCIATIVE 32 \n"); - printf("#define DTB_DEFAULT_ENTRIES 64 \n"); - printf("#define DTB_SIZE 4096 \n"); + // Minimum parameters for ARMv8 (based on A53) + printf("#define L1_DATA_SIZE 32768\n"); + printf("#define L1_DATA_LINESIZE 64\n"); + printf("#define L2_SIZE 262144\n"); + printf("#define L2_LINESIZE 64\n"); + printf("#define DTB_DEFAULT_ENTRIES 64\n"); + printf("#define DTB_SIZE 4096\n"); + printf("#define L2_ASSOCIATIVE 4\n"); break; case CPU_CORTEXA57: - printf("#define CORTEXA57\n"); - printf("#define HAVE_VFP\n"); - printf("#define HAVE_VFPV3\n"); - printf("#define HAVE_NEON\n"); - printf("#define HAVE_VFPV4\n"); + case CPU_CORTEXA72: + case CPU_CORTEXA73: + // Common minimum settings for these Arm cores + // Can change a lot, but we need to be conservative + // TODO: detect info from /sys if possible + printf("#define %s\n", cpuname[d]); printf("#define L1_CODE_SIZE 49152\n"); printf("#define L1_CODE_LINESIZE 64\n"); printf("#define L1_CODE_ASSOCIATIVE 3\n"); printf("#define L1_DATA_SIZE 32768\n"); printf("#define L1_DATA_LINESIZE 64\n"); printf("#define L1_DATA_ASSOCIATIVE 2\n"); - printf("#define L2_SIZE 2097152\n"); + printf("#define L2_SIZE 524288\n"); printf("#define L2_LINESIZE 64\n"); printf("#define L2_ASSOCIATIVE 16\n"); printf("#define DTB_DEFAULT_ENTRIES 64\n"); printf("#define DTB_SIZE 4096\n"); break; + case CPU_FALKOR: + printf("#define FALKOR\n"); + printf("#define L1_CODE_SIZE 65536\n"); + printf("#define L1_CODE_LINESIZE 64\n"); + printf("#define L1_DATA_SIZE 32768\n"); + printf("#define L1_DATA_LINESIZE 128\n"); + printf("#define L2_SIZE 524288\n"); + printf("#define L2_LINESIZE 64\n"); + printf("#define DTB_DEFAULT_ENTRIES 64\n"); + printf("#define DTB_SIZE 4096\n"); + printf("#define L2_ASSOCIATIVE 16\n"); + break; + case CPU_THUNDERX: - printf("#define ARMV8\n"); printf("#define THUNDERX\n"); printf("#define L1_DATA_SIZE 32768\n"); printf("#define L1_DATA_LINESIZE 128\n"); @@ -248,11 +270,7 @@ void get_cpuconfig(void) break; case CPU_THUNDERX2T99: - printf("#define VULCAN \n"); - printf("#define HAVE_VFP \n"); - printf("#define HAVE_VFPV3 \n"); - printf("#define HAVE_NEON \n"); - printf("#define HAVE_VFPV4 \n"); + printf("#define THUNDERX2T99 \n"); printf("#define L1_CODE_SIZE 32768 \n"); printf("#define L1_CODE_LINESIZE 64 \n"); printf("#define L1_CODE_ASSOCIATIVE 8 \n"); diff --git a/cpuid_mips.c b/cpuid_mips.c index 15c58959e..6f2932c94 100644 --- a/cpuid_mips.c +++ b/cpuid_mips.c @@ -72,10 +72,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CPU_UNKNOWN 0 #define CPU_P5600 1 +#define CPU_1004K 2 static char *cpuname[] = { - "UNKOWN", - "P5600" + "UNKNOWN", + "P5600", + "1004K" }; int detect(void){ @@ -90,7 +92,7 @@ int detect(void){ if (!strncmp("cpu", buffer, 3)){ p = strchr(buffer, ':') + 2; #if 0 - fprintf(stderr, "%s\n", p); + fprintf(stderr, "%s \n", p); #endif break; } @@ -99,43 +101,13 @@ int detect(void){ fclose(infile); if(p != NULL){ - if (strstr(p, "Loongson-3A")){ - return CPU_LOONGSON3A; - }else if(strstr(p, "Loongson-3B")){ - return CPU_LOONGSON3B; - }else if (strstr(p, "Loongson-3")){ - infile = fopen("/proc/cpuinfo", "r"); - p = (char *)NULL; - while (fgets(buffer, sizeof(buffer), infile)){ - if (!strncmp("system type", buffer, 11)){ - p = strchr(buffer, ':') + 2; - break; - } - } - fclose(infile); - if (strstr(p, "loongson3a")) - return CPU_LOONGSON3A; - }else{ + if (strstr(p, "5600")) { + return CPU_P5600; + } else if (strstr(p, "1004K")) { + return CPU_1004K; + } else return CPU_UNKNOWN; } - } - //Check model name for Loongson3 - infile = fopen("/proc/cpuinfo", "r"); - p = (char *)NULL; - while (fgets(buffer, sizeof(buffer), infile)){ - if (!strncmp("model name", buffer, 10)){ - p = strchr(buffer, ':') + 2; - break; - } - } - fclose(infile); - if(p != NULL){ - if (strstr(p, "Loongson-3A")){ - return CPU_LOONGSON3A; - }else if(strstr(p, "Loongson-3B")){ - return CPU_LOONGSON3B; - } - } #endif return CPU_UNKNOWN; } @@ -149,7 +121,7 @@ void get_architecture(void){ } void get_subarchitecture(void){ - if(detect()==CPU_P5600){ + if(detect()==CPU_P5600|| detect()==CPU_1004K){ printf("P5600"); }else{ printf("UNKNOWN"); @@ -170,6 +142,14 @@ void get_cpuconfig(void){ printf("#define DTB_DEFAULT_ENTRIES 64\n"); printf("#define DTB_SIZE 4096\n"); printf("#define L2_ASSOCIATIVE 8\n"); + } else if (detect()==CPU_1004K) { + printf("#define MIPS1004K\n"); + printf("#define L1_DATA_SIZE 32768\n"); + printf("#define L1_DATA_LINESIZE 32\n"); + printf("#define L2_SIZE 26144\n"); + printf("#define DTB_DEFAULT_ENTRIES 8\n"); + printf("#define DTB_SIZE 4096\n"); + printf("#define L2_ASSOCIATIVE 4\n"); }else{ printf("#define UNKNOWN\n"); } @@ -178,6 +158,8 @@ void get_cpuconfig(void){ void get_libname(void){ if(detect()==CPU_P5600) { printf("p5600\n"); + } else if (detect()==CPU_1004K) { + printf("1004K\n"); }else{ printf("mips\n"); } diff --git a/cpuid_mips64.c b/cpuid_mips64.c index dcb559a7c..0e32bfc0b 100644 --- a/cpuid_mips64.c +++ b/cpuid_mips64.c @@ -79,7 +79,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CPU_I6500 6 static char *cpuname[] = { - "UNKOWN", + "UNKNOWN", "SICORTEX", "LOONGSON3A", "LOONGSON3B", diff --git a/cpuid_power.c b/cpuid_power.c index 951204ae9..82a3f4aac 100644 --- a/cpuid_power.c +++ b/cpuid_power.c @@ -56,6 +56,7 @@ #define CPUTYPE_CELL 6 #define CPUTYPE_PPCG4 7 #define CPUTYPE_POWER8 8 +#define CPUTYPE_POWER9 9 char *cpuname[] = { "UNKNOWN", @@ -66,7 +67,8 @@ char *cpuname[] = { "POWER6", "CELL", "PPCG4", - "POWER8" + "POWER8", + "POWER9" }; char *lowercpuname[] = { @@ -78,7 +80,8 @@ char *lowercpuname[] = { "power6", "cell", "ppcg4", - "power8" + "power8", + "power9" }; char *corename[] = { @@ -90,7 +93,8 @@ char *corename[] = { "POWER6", "CELL", "PPCG4", - "POWER8" + "POWER8", + "POWER8" }; int detect(void){ @@ -120,6 +124,7 @@ int detect(void){ if (!strncasecmp(p, "POWER6", 6)) return CPUTYPE_POWER6; if (!strncasecmp(p, "POWER7", 6)) return CPUTYPE_POWER6; if (!strncasecmp(p, "POWER8", 6)) return CPUTYPE_POWER8; + if (!strncasecmp(p, "POWER9", 6)) return CPUTYPE_POWER8; if (!strncasecmp(p, "Cell", 4)) return CPUTYPE_CELL; if (!strncasecmp(p, "7447", 4)) return CPUTYPE_PPCG4; @@ -127,6 +132,33 @@ int detect(void){ #endif #ifdef _AIX + FILE *infile; + char buffer[512], *p; + + p = (char *)NULL; + infile = popen("prtconf|grep 'Processor Type'", "r"); + while (fgets(buffer, sizeof(buffer), infile)){ + if (!strncmp("Pro", buffer, 3)){ + p = strchr(buffer, ':') + 2; +#if 0 + fprintf(stderr, "%s\n", p); +#endif + break; + } + } + + pclose(infile); + + if (!strncasecmp(p, "POWER3", 6)) return CPUTYPE_POWER3; + if (!strncasecmp(p, "POWER4", 6)) return CPUTYPE_POWER4; + if (!strncasecmp(p, "PPC970", 6)) return CPUTYPE_PPC970; + if (!strncasecmp(p, "POWER5", 6)) return CPUTYPE_POWER5; + if (!strncasecmp(p, "POWER6", 6)) return CPUTYPE_POWER6; + if (!strncasecmp(p, "POWER7", 6)) return CPUTYPE_POWER6; + if (!strncasecmp(p, "POWER8", 6)) return CPUTYPE_POWER8; + if (!strncasecmp(p, "POWER9", 6)) return CPUTYPE_POWER8; + if (!strncasecmp(p, "Cell", 4)) return CPUTYPE_CELL; + if (!strncasecmp(p, "7447", 4)) return CPUTYPE_PPCG4; return CPUTYPE_POWER5; #endif @@ -142,6 +174,52 @@ int detect(void){ return CPUTYPE_PPC970; #endif + +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) +int id; +__asm __volatile("mfpvr %0" : "=r"(id)); +switch ( id >> 16 ) { + case 0x4e: // POWER9 + return CPUTYPE_POWER8; + break; + case 0x4d: + case 0x4b: // POWER8/8E + return CPUTYPE_POWER8; + break; + case 0x4a: + case 0x3f: // POWER7/7E + return CPUTYPE_POWER6; + break; + case 0x3e: + return CPUTYPE_POWER6; + break; + case 0x3a: + return CPUTYPE_POWER5; + break; + case 0x35: + case 0x38: // POWER4 /4+ + return CPUTYPE_POWER4; + break; + case 0x40: + case 0x41: // POWER3 /3+ + return CPUTYPE_POWER3; + break; + case 0x39: + case 0x3c: + case 0x44: + case 0x45: + return CPUTYPE_PPC970; + break; + case 0x70: + return CPUTYPE_CELL; + break; + case 0x8003: + return CPUTYPE_PPCG4; + break; + default: + return CPUTYPE_UNKNOWN; + } +#endif } void get_architecture(void){ diff --git a/cpuid_sparc.c b/cpuid_sparc.c index f390f0d7f..6902a8fa2 100644 --- a/cpuid_sparc.c +++ b/cpuid_sparc.c @@ -57,3 +57,8 @@ void get_cpuconfig(void){ void get_libname(void){ printf("v9\n"); } + +char *get_corename(void){ + return "sparc"; +} + diff --git a/cpuid_x86.c b/cpuid_x86.c index 342c56525..726014033 100644 --- a/cpuid_x86.c +++ b/cpuid_x86.c @@ -50,6 +50,8 @@ #ifdef NO_AVX #define CPUTYPE_HASWELL CPUTYPE_NEHALEM #define CORE_HASWELL CORE_NEHALEM +#define CPUTYPE_SKYLAKEX CPUTYPE_NEHALEM +#define CORE_SKYLAKEX CORE_NEHALEM #define CPUTYPE_SANDYBRIDGE CPUTYPE_NEHALEM #define CORE_SANDYBRIDGE CORE_NEHALEM #define CPUTYPE_BULLDOZER CPUTYPE_BARCELONA @@ -95,10 +97,10 @@ static C_INLINE void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx){ ("mov %%ebx, %%edi;" "cpuid;" "xchgl %%ebx, %%edi;" - : "=a" (*eax), "=D" (*ebx), "=c" (*ecx), "=d" (*edx) : "a" (op) : "cc"); + : "=a" (*eax), "=D" (*ebx), "=c" (*ecx), "=d" (*edx) : "a" (op), "c" (0) : "cc"); #else __asm__ __volatile__ - ("cpuid": "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "a" (op) : "cc"); + ("cpuid": "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) : "a" (op) , "c" (0) : "cc"); #endif } @@ -209,6 +211,44 @@ int support_avx(){ #endif } +int support_avx2(){ +#ifndef NO_AVX2 + int eax, ebx, ecx=0, edx; + int ret=0; + + if (!support_avx()) + return 0; + cpuid(7, &eax, &ebx, &ecx, &edx); + if((ebx & (1<<7)) != 0) + ret=1; //OS supports AVX2 + return ret; +#else + return 0; +#endif +} + +int support_avx512(){ +#ifndef NO_AVX512 + int eax, ebx, ecx, edx; + int ret=0; + + if (!support_avx()) + return 0; + cpuid(7, &eax, &ebx, &ecx, &edx); + if((ebx & 32) != 32){ + ret=0; //OS does not even support AVX2 + } + if((ebx & (1<<31)) != 0){ + xgetbv(0, &eax, &edx); + if((eax & 0xe0) == 0xe0) + ret=1; //OS supports AVX512VL + } + return ret; +#else + return 0; +#endif +} + int get_vendor(void){ int eax, ebx, ecx, edx; @@ -231,6 +271,7 @@ int get_vendor(void){ if (!strcmp(vendor, " SiS SiS SiS")) return VENDOR_SIS; if (!strcmp(vendor, "GenuineTMx86")) return VENDOR_TRANSMETA; if (!strcmp(vendor, "Geode by NSC")) return VENDOR_NSC; + if (!strcmp(vendor, "HygonGenuine")) return VENDOR_HYGON; if ((eax == 0) || ((eax & 0x500) != 0)) return VENDOR_INTEL; @@ -292,6 +333,8 @@ int get_cputype(int gettype){ if ((ecx & (1 << 20)) != 0) feature |= HAVE_SSE4_2; #ifndef NO_AVX if (support_avx()) feature |= HAVE_AVX; + if (support_avx2()) feature |= HAVE_AVX2; + if (support_avx512()) feature |= HAVE_AVX512VL; if ((ecx & (1 << 12)) != 0) feature |= HAVE_FMA3; #endif @@ -1004,7 +1047,9 @@ int get_cacheinfo(int type, cache_info_t *cacheinfo){ } } - if ((get_vendor() == VENDOR_AMD) || (get_vendor() == VENDOR_CENTAUR)) { + if ((get_vendor() == VENDOR_AMD) || + (get_vendor() == VENDOR_HYGON) || + (get_vendor() == VENDOR_CENTAUR)) { cpuid(0x80000005, &eax, &ebx, &ecx, &edx); LDTB.size = 4096; @@ -1226,22 +1271,18 @@ int get_cpuname(void){ return CPUTYPE_NEHALEM; case 12: case 15: - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; case 13: //Broadwell - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; } @@ -1250,33 +1291,27 @@ int get_cpuname(void){ switch (model) { case 5: case 6: - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; case 7: case 15: //Broadwell - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; case 14: //Skylake - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; case 12: @@ -1290,33 +1325,36 @@ int get_cpuname(void){ switch (model) { case 6: //Broadwell - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; case 5: + // Skylake X + if(support_avx512()) + return CPUTYPE_SKYLAKEX; + if(support_avx2()) + return CPUTYPE_HASWELL; + if(support_avx()) + return CPUTYPE_SANDYBRIDGE; + else + return CPUTYPE_NEHALEM; case 14: // Skylake - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; case 7: // Xeon Phi Knights Landing - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; case 12: @@ -1324,16 +1362,27 @@ int get_cpuname(void){ return CPUTYPE_NEHALEM; } break; + case 6: + switch (model) { + case 6: // Cannon Lake + if(support_avx512()) + return CPUTYPE_SKYLAKEX; + if(support_avx2()) + return CPUTYPE_HASWELL; + if(support_avx()) + return CPUTYPE_SANDYBRIDGE; + else + return CPUTYPE_NEHALEM; + } + break; case 9: case 8: switch (model) { case 14: // Kaby Lake - if(support_avx()) -#ifndef NO_AVX2 + if(support_avx2()) return CPUTYPE_HASWELL; -#else + if(support_avx()) return CPUTYPE_SANDYBRIDGE; -#endif else return CPUTYPE_NEHALEM; } @@ -1420,6 +1469,8 @@ int get_cpuname(void){ switch (model) { case 1: // AMD Ryzen + case 8: + // AMD Ryzen2 if(support_avx()) #ifndef NO_AVX2 return CPUTYPE_ZEN; @@ -1435,6 +1486,26 @@ int get_cpuname(void){ return CPUTYPE_AMD_UNKNOWN; } + if (vendor == VENDOR_HYGON){ + switch (family) { + case 0xf: + switch (exfamily) { + case 9: + //Hygon Dhyana + if(support_avx()) +#ifndef NO_AVX2 + return CPUTYPE_ZEN; +#else + return CPUTYPE_SANDYBRIDGE; // closer in architecture to Sandy Bridge than to Excavator +#endif + else + return CPUTYPE_BARCELONA; + } + break; + } + return CPUTYPE_HYGON_UNKNOWN; + } + if (vendor == VENDOR_CYRIX){ switch (family) { case 0x4: @@ -1556,6 +1627,8 @@ static char *cpuname[] = { "STEAMROLLER", "EXCAVATOR", "ZEN", + "SKYLAKEX", + "DHYANA" }; static char *lowercpuname[] = { @@ -1610,10 +1683,12 @@ static char *lowercpuname[] = { "steamroller", "excavator", "zen", + "skylakex", + "dhyana" }; static char *corename[] = { - "UNKOWN", + "UNKNOWN", "80486", "P5", "P6", @@ -1641,6 +1716,8 @@ static char *corename[] = { "STEAMROLLER", "EXCAVATOR", "ZEN", + "SKYLAKEX", + "DHYANA" }; static char *corename_lower[] = { @@ -1672,6 +1749,8 @@ static char *corename_lower[] = { "steamroller", "excavator", "zen", + "skylakex", + "dhyana" }; @@ -1860,6 +1939,19 @@ int get_coretype(void){ else return CORE_NEHALEM; case 5: + // Skylake X +#ifndef NO_AVX512 + return CORE_SKYLAKEX; +#else + if(support_avx()) +#ifndef NO_AVX2 + return CORE_HASWELL; +#else + return CORE_SANDYBRIDGE; +#endif + else + return CORE_NEHALEM; +#endif case 14: // Skylake if(support_avx()) @@ -1958,6 +2050,8 @@ int get_coretype(void){ switch (model) { case 1: // AMD Ryzen + case 8: + // Ryzen 2 if(support_avx()) #ifndef NO_AVX2 return CORE_ZEN; @@ -1973,6 +2067,23 @@ int get_coretype(void){ } } + if (vendor == VENDOR_HYGON){ + if (family == 0xf){ + if (exfamily == 9) { + if(support_avx()) +#ifndef NO_AVX2 + return CORE_ZEN; +#else + return CORE_SANDYBRIDGE; // closer in architecture to Sandy Bridge than to Excavator +#endif + else + return CORE_BARCELONA; + } else { + return CORE_BARCELONA; + } + } + } + if (vendor == VENDOR_CENTAUR) { switch (family) { case 0x6: @@ -2059,6 +2170,8 @@ void get_cpuconfig(void){ if (features & HAVE_SSE4A) printf("#define HAVE_SSE4A\n"); if (features & HAVE_SSE5 ) printf("#define HAVE_SSSE5\n"); if (features & HAVE_AVX ) printf("#define HAVE_AVX\n"); + if (features & HAVE_AVX2 ) printf("#define HAVE_AVX2\n"); + if (features & HAVE_AVX512VL ) printf("#define HAVE_AVX512VL\n"); if (features & HAVE_3DNOWEX) printf("#define HAVE_3DNOWEX\n"); if (features & HAVE_3DNOW) printf("#define HAVE_3DNOW\n"); if (features & HAVE_FMA4 ) printf("#define HAVE_FMA4\n"); @@ -2127,6 +2240,8 @@ void get_sse(void){ if (features & HAVE_SSE4A) printf("HAVE_SSE4A=1\n"); if (features & HAVE_SSE5 ) printf("HAVE_SSSE5=1\n"); if (features & HAVE_AVX ) printf("HAVE_AVX=1\n"); + if (features & HAVE_AVX2 ) printf("HAVE_AVX2=1\n"); + if (features & HAVE_AVX512VL ) printf("HAVE_AVX512VL=1\n"); if (features & HAVE_3DNOWEX) printf("HAVE_3DNOWEX=1\n"); if (features & HAVE_3DNOW) printf("HAVE_3DNOW=1\n"); if (features & HAVE_FMA4 ) printf("HAVE_FMA4=1\n"); diff --git a/cpuid_zarch.c b/cpuid_zarch.c index 4e1935429..e0d9221f3 100644 --- a/cpuid_zarch.c +++ b/cpuid_zarch.c @@ -29,15 +29,18 @@ #define CPU_GENERIC 0 #define CPU_Z13 1 +#define CPU_Z14 2 static char *cpuname[] = { "ZARCH_GENERIC", - "Z13" + "Z13", + "Z14" }; static char *cpuname_lower[] = { "zarch_generic", - "z13" + "z13", + "z14" }; int detect(void) @@ -62,6 +65,10 @@ int detect(void) if (strstr(p, "2964")) return CPU_Z13; if (strstr(p, "2965")) return CPU_Z13; + /* detect z14, but fall back to z13 */ + if (strstr(p, "3906")) return CPU_Z13; + if (strstr(p, "3907")) return CPU_Z13; + return CPU_GENERIC; } @@ -107,5 +114,9 @@ void get_cpuconfig(void) printf("#define Z13\n"); printf("#define DTB_DEFAULT_ENTRIES 64\n"); break; + case CPU_Z14: + printf("#define Z14\n"); + printf("#define DTB_DEFAULT_ENTRIES 64\n"); + break; } } diff --git a/ctest.c b/ctest.c index 27d3b473a..0571e9e02 100644 --- a/ctest.c +++ b/ctest.c @@ -60,6 +60,14 @@ OS_FREEBSD OS_NETBSD #endif +#if defined(__OpenBSD__) +OS_OPENBSD +#endif + +#if defined(__DragonFly__) +OS_DRAGONFLY +#endif + #if defined(__sun) OS_SUNOS #endif @@ -93,6 +101,10 @@ OS_INTERIX OS_LINUX #endif +#if defined(__HAIKU__) +OS_HAIKU +#endif + #if defined(__i386) || defined(_X86) ARCH_X86 #endif diff --git a/ctest/Makefile b/ctest/Makefile index 6eda43863..569a5dda3 100644 --- a/ctest/Makefile +++ b/ctest/Makefile @@ -102,7 +102,13 @@ clean :: rm -f x* FLDFLAGS = $(FFLAGS:-fPIC=) $(LDFLAGS) -CEXTRALIB = +ifeq ($(USE_OPENMP), 1) +ifeq ($(F_COMPILER), GFORTRAN) +ifeq ($(C_COMPILER), CLANG) +CEXTRALIB = -lomp +endif +endif +endif # Single real xscblat1: $(stestl1o) c_sblat1.o $(TOPDIR)/$(LIBNAME) diff --git a/driver/level2/gemv_thread.c b/driver/level2/gemv_thread.c index 061454848..d57740314 100644 --- a/driver/level2/gemv_thread.c +++ b/driver/level2/gemv_thread.c @@ -62,9 +62,36 @@ #endif #endif -#ifndef TRANSA +#ifndef thread_local +# if __STDC_VERSION__ >= 201112 && !defined __STDC_NO_THREADS__ +# define thread_local _Thread_local +# elif defined _WIN32 && ( \ + defined _MSC_VER || \ + defined __ICL || \ + defined __DMC__ || \ + defined __BORLANDC__ ) +# define thread_local __declspec(thread) +/* note that ICC (linux) and Clang are covered by __GNUC__ */ +# elif defined __GNUC__ || \ + defined __SUNPRO_C || \ + defined __xlC__ +# define thread_local __thread +# else +# define UNSAFE +#endif +#endif +#if defined USE_OPENMP +#undef UNSAFE +#endif + +#if !defined(TRANSA) && !defined(UNSAFE) #define Y_DUMMY_NUM 1024 +#if defined(USE_OPENMP) static FLOAT y_dummy[Y_DUMMY_NUM]; +#pragma omp threadprivate(y_dummy) +# else +static thread_local FLOAT y_dummy[Y_DUMMY_NUM]; +# endif #endif static int gemv_kernel(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *dummy1, FLOAT *buffer, BLASLONG pos){ @@ -105,10 +132,12 @@ static int gemv_kernel(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, F #ifdef TRANSA y += n_from * incy * COMPSIZE; #else +# ifndef UNSAFE //for split matrix row (n) direction and vector x of gemv_n x += n_from * incx * COMPSIZE; //store partial result for every thread y += (m_to - m_from) * 1 * COMPSIZE * pos; +# endif #endif } @@ -136,7 +165,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *alpha, FLOAT *a, BLASLONG lda, FLOAT *x BLASLONG width, i, num_cpu; -#ifndef TRANSA +#if !defined(TRANSA) && !defined(UNSAFE) int split_x=0; #endif @@ -212,7 +241,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *alpha, FLOAT *a, BLASLONG lda, FLOAT *x i -= width; } -#ifndef TRANSA +#if !defined(TRANSA) && !defined(UNSAFE) //try to split matrix on row direction and x. //Then, reduction. if (num_cpu < nthreads) { @@ -272,7 +301,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *alpha, FLOAT *a, BLASLONG lda, FLOAT *x exec_blas(num_cpu, queue); } -#ifndef TRANSA +#if !defined(TRANSA) && !defined(UNSAFE) if(split_x==1){ //reduction for(i=0; i 0){ -fprintf(stderr,"WARNING unrolling of the trmv_U loop may give wrong results\n"); + if (is > 0){ GEMV_N(is, min_i, 0, dp1, a + is * lda, lda, B + is, 1, diff --git a/driver/level3/Makefile b/driver/level3/Makefile index 352225206..e320092e3 100644 --- a/driver/level3/Makefile +++ b/driver/level3/Makefile @@ -362,7 +362,7 @@ cgemm_ct.$(SUFFIX) : gemm.c level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm_cr.$(SUFFIX) : gemm.c level3.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm_cc.$(SUFFIX) : gemm.c level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -410,7 +410,7 @@ zgemm_ct.$(SUFFIX) : gemm.c level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm_cr.$(SUFFIX) : gemm.c level3.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm_cc.$(SUFFIX) : gemm.c level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -458,7 +458,7 @@ xgemm_ct.$(SUFFIX) : gemm.c level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm_cr.$(SUFFIX) : gemm.c level3.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm_cc.$(SUFFIX) : gemm.c level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -558,7 +558,7 @@ cgemm_thread_ct.$(SUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm_thread_cr.$(SUFFIX) : gemm.c level3_thread.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm_thread_cc.$(SUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -606,7 +606,7 @@ zgemm_thread_ct.$(SUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm_thread_cr.$(SUFFIX) : gemm.c level3_thread.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm_thread_cc.$(SUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -654,7 +654,7 @@ xgemm_thread_ct.$(SUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm_thread_cr.$(SUFFIX) : gemm.c level3_thread.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm_thread_cc.$(SUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -1821,7 +1821,7 @@ cgemm3m_ct.$(SUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm3m_cr.$(SUFFIX) : gemm3m.c gemm3m_level3.c - $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm3m_cc.$(SUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(CFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -1869,7 +1869,7 @@ zgemm3m_ct.$(SUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm3m_cr.$(SUFFIX) : gemm3m.c gemm3m_level3.c - $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm3m_cc.$(SUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(CFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -1917,7 +1917,7 @@ xgemm3m_ct.$(SUFFIX) : gemm3m.c gemm3m_level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm3m_cr.$(SUFFIX) : gemm3m.c gemm3m_level3.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm3m_cc.$(SUFFIX) : gemm3m.c gemm3m_level3.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -1974,7 +1974,7 @@ cgemm3m_thread_ct.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm3m_thread_cr.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm3m_thread_cc.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -2022,7 +2022,7 @@ zgemm3m_thread_ct.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm3m_thread_cr.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm3m_thread_cc.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -2070,7 +2070,7 @@ xgemm3m_thread_ct.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm3m_thread_cr.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h - $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm3m_thread_cc.$(SUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(CFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -2731,7 +2731,7 @@ cgemm_ct.$(PSUFFIX) : gemm.c level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm_cr.$(PSUFFIX) : gemm.c level3.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm_cc.$(PSUFFIX) : gemm.c level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -2779,7 +2779,7 @@ zgemm_ct.$(PSUFFIX) : gemm.c level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm_cr.$(PSUFFIX) : gemm.c level3.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm_cc.$(PSUFFIX) : gemm.c level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -2827,7 +2827,7 @@ xgemm_ct.$(PSUFFIX) : gemm.c level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm_cr.$(PSUFFIX) : gemm.c level3.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm_cc.$(PSUFFIX) : gemm.c level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -2927,7 +2927,7 @@ cgemm_thread_ct.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm_thread_cr.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm_thread_cc.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -2975,7 +2975,7 @@ zgemm_thread_ct.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm_thread_cr.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm_thread_cc.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -3023,7 +3023,7 @@ xgemm_thread_ct.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm_thread_cr.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm_thread_cc.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -4190,7 +4190,7 @@ cgemm3m_ct.$(PSUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm3m_cr.$(PSUFFIX) : gemm3m.c gemm3m_level3.c - $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm3m_cc.$(PSUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(PFLAGS) $(BLOCKS) -c -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -4238,7 +4238,7 @@ zgemm3m_ct.$(PSUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm3m_cr.$(PSUFFIX) : gemm3m.c gemm3m_level3.c - $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm3m_cc.$(PSUFFIX) : gemm3m.c gemm3m_level3.c $(CC) $(PFLAGS) $(BLOCKS) -c -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -4286,7 +4286,7 @@ xgemm3m_ct.$(PSUFFIX) : gemm3m.c gemm3m_level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm3m_cr.$(PSUFFIX) : gemm3m.c gemm3m_level3.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm3m_cc.$(PSUFFIX) : gemm3m.c gemm3m_level3.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -4343,7 +4343,7 @@ cgemm3m_thread_ct.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCT $< -o $(@F) cgemm3m_thread_cr.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) cgemm3m_thread_cc.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -UDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -4391,7 +4391,7 @@ zgemm3m_thread_ct.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCT $< -o $(@F) zgemm3m_thread_cr.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) zgemm3m_thread_cc.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DDOUBLE -DCOMPLEX -DCC $< -o $(@F) @@ -4439,7 +4439,7 @@ xgemm3m_thread_ct.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCT $< -o $(@F) xgemm3m_thread_cr.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h - $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR $< -o $(@F) + $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCR=CR $< -o $(@F) xgemm3m_thread_cc.$(PSUFFIX) : gemm3m.c level3_gemm3m_thread.c ../../param.h $(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DXDOUBLE -DCOMPLEX -DCC $< -o $(@F) diff --git a/driver/level3/level3_gemm3m_thread.c b/driver/level3/level3_gemm3m_thread.c index bfd991ffb..4903aa5bd 100644 --- a/driver/level3/level3_gemm3m_thread.c +++ b/driver/level3/level3_gemm3m_thread.c @@ -91,7 +91,12 @@ #endif typedef struct { - volatile BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; +#if __STDC_VERSION__ >= 201112L + _Atomic +#else + volatile +#endif + BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; } job_t; diff --git a/driver/level3/level3_syrk_threaded.c b/driver/level3/level3_syrk_threaded.c index 65002ae46..574f825b0 100644 --- a/driver/level3/level3_syrk_threaded.c +++ b/driver/level3/level3_syrk_threaded.c @@ -67,7 +67,12 @@ #endif typedef struct { - volatile BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; +#if __STDC_VERSION__ >= 201112L +_Atomic +#else + volatile +#endif + BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; } job_t; diff --git a/driver/level3/level3_thread.c b/driver/level3/level3_thread.c index a1ed8bbb1..cfbff7554 100644 --- a/driver/level3/level3_thread.c +++ b/driver/level3/level3_thread.c @@ -48,6 +48,10 @@ #define SWITCH_RATIO 2 #endif +#ifndef GEMM_PREFERED_SIZE +#define GEMM_PREFERED_SIZE 1 +#endif + //The array of job_t may overflow the stack. //Instead, use malloc to alloc job_t. #if MAX_CPU_NUMBER > BLAS3_MEM_ALLOC_THRESHOLD @@ -91,7 +95,8 @@ #endif typedef struct { - volatile BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; + volatile + BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; } job_t; @@ -346,7 +351,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Make sure if no one is using workspace */ START_RPCC(); for (i = 0; i < args -> nthreads; i++) - while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;MB;}; STOP_RPCC(waiting1); #if defined(FUSED_GEMM) && !defined(TIMING) @@ -408,7 +413,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Wait until other region of B is initialized */ START_RPCC(); - while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; + while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;MB;}; STOP_RPCC(waiting2); /* Apply kernel with local region of A and part of other region of B */ @@ -426,6 +431,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, /* Clear synchronization flag if this thread is done with other region of B */ if (m_to - m_from == min_i) { job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; + WMB; } } } while (current != mypos); @@ -487,7 +493,7 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, START_RPCC(); for (i = 0; i < args -> nthreads; i++) { for (js = 0; js < DIVIDE_RATE; js++) { - while (job[mypos].working[i][CACHE_LINE_SIZE * js] ) {YIELDING;}; + while (job[mypos].working[i][CACHE_LINE_SIZE * js] ) {YIELDING;MB;}; } } STOP_RPCC(waiting3); @@ -508,10 +514,29 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, return 0; } +static int round_up(int remainder, int width, int multiple) +{ + if (multiple > remainder || width <= multiple) + return width; + width = (width + multiple - 1) / multiple; + width = width * multiple; + return width; +} + + static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG nthreads_m, BLASLONG nthreads_n) { +#ifndef USE_OPENMP +#ifndef OS_WINDOWS +static pthread_mutex_t level3_lock = PTHREAD_MUTEX_INITIALIZER; +#else +CRITICAL_SECTION level3_lock; +InitializeCriticalSection((PCRITICAL_SECTION)&level3_lock); +#endif +#endif + blas_arg_t newarg; #ifndef USE_ALLOC_HEAP @@ -552,6 +577,14 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG #endif #endif +#ifndef USE_OPENMP +#ifndef OS_WINDOWS +pthread_mutex_lock(&level3_lock); +#else +EnterCriticalSection((PCRITICAL_SECTION)&level3_lock); +#endif +#endif + #ifdef USE_ALLOC_HEAP /* Dynamically allocate workspace */ job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t)); @@ -599,9 +632,14 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG num_parts = 0; while (m > 0){ width = blas_quickdivide(m + nthreads_m - num_parts - 1, nthreads_m - num_parts); + + width = round_up(m, width, GEMM_PREFERED_SIZE); + m -= width; + if (m < 0) width = width + m; range_M[num_parts + 1] = range_M[num_parts] + width; + num_parts ++; } for (i = num_parts; i < MAX_CPU_NUMBER; i++) { @@ -643,9 +681,12 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG if (width < SWITCH_RATIO) { width = SWITCH_RATIO; } + width = round_up(n, width, GEMM_PREFERED_SIZE); + n -= width; if (n < 0) width = width + n; range_N[num_parts + 1] = range_N[num_parts] + width; + num_parts ++; } for (j = num_parts; j < MAX_CPU_NUMBER; j++) { @@ -653,8 +694,8 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG } /* Clear synchronization flags */ - for (i = 0; i < MAX_CPU_NUMBER; i++) { - for (j = 0; j < MAX_CPU_NUMBER; j++) { + for (i = 0; i < nthreads; i++) { + for (j = 0; j < nthreads; j++) { for (k = 0; k < DIVIDE_RATE; k++) { job[i].working[j][CACHE_LINE_SIZE * k] = 0; } @@ -669,6 +710,14 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG free(job); #endif +#ifndef USE_OPENMP +#ifndef OS_WINDOWS + pthread_mutex_unlock(&level3_lock); +#else + LeaveCriticalSection((PCRITICAL_SECTION)&level3_lock); +#endif +#endif + return 0; } diff --git a/driver/level3/syrk_thread.c b/driver/level3/syrk_thread.c index 5f40853dc..b26d363c4 100644 --- a/driver/level3/syrk_thread.c +++ b/driver/level3/syrk_thread.c @@ -48,7 +48,7 @@ int CNAME(int mode, blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, int ( BLASLONG width, i; BLASLONG n_from, n_to; - double dnum, nf, nt, di; + double dnum, nf, nt, di, dinum; int num_cpu; int mask = 0; @@ -109,7 +109,11 @@ int CNAME(int mode, blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, int ( if (nthreads - num_cpu > 1) { di = (double)i; - width = (BLASLONG)(( sqrt(di * di + dnum) - di + mask)/(mask+1)) * (mask+1); + dinum = di * di +dnum; + if (dinum <0) + width = (BLASLONG)(( - di + mask)/(mask+1)) * (mask+1); + else + width = (BLASLONG)(( sqrt(dinum) - di + mask)/(mask+1)) * (mask+1); if ((width <= 0) || (width > n_to - i)) width = n_to - i; @@ -136,9 +140,7 @@ int CNAME(int mode, blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, int ( nf = (double)(arg -> n - n_from); nt = (double)(arg -> n - n_to); - dnum = (nt * nt - nf * nf) / (double)nthreads; - num_cpu = 0; range[0] = n_from; @@ -149,8 +151,11 @@ int CNAME(int mode, blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, int ( if (nthreads - num_cpu > 1) { di = (double)(arg -> n - i); - width = ((BLASLONG)((-sqrt(di * di + dnum) + di) + mask)/(mask+1)) * (mask+1); - + dinum = di * di + dnum; + if (dinum<0) + width = ((BLASLONG)(di + mask)/(mask+1)) * (mask+1); + else + width = ((BLASLONG)((-sqrt(dinum) + di) + mask)/(mask+1)) * (mask+1); if ((width <= 0) || (width > n_to - i)) width = n_to - i; } else { diff --git a/driver/others/CMakeLists.txt b/driver/others/CMakeLists.txt index e20b14e79..a07e00b3b 100644 --- a/driver/others/CMakeLists.txt +++ b/driver/others/CMakeLists.txt @@ -47,7 +47,11 @@ GenerateNamedObjects("abs.c" "DOUBLE" "z_abs" 0 "" "" 1) GenerateNamedObjects("openblas_get_config.c;openblas_get_parallel.c" "" "" 0 "" "" 1) if (DYNAMIC_ARCH) - list(APPEND COMMON_SOURCES dynamic.c) + if (ARM64) + list(APPEND COMMON_SOURCES dynamic_arm64.c) + else () + list(APPEND COMMON_SOURCES dynamic.c) + endif () else () list(APPEND COMMON_SOURCES parameter.c) endif () diff --git a/driver/others/Makefile b/driver/others/Makefile index e61ba7bc8..3dc2e7c1b 100644 --- a/driver/others/Makefile +++ b/driver/others/Makefile @@ -15,7 +15,11 @@ endif # COMMONOBJS += info.$(SUFFIX) ifeq ($(DYNAMIC_ARCH), 1) +ifeq ($(ARCH),arm64) +COMMONOBJS += dynamic_arm64.$(SUFFIX) +else COMMONOBJS += dynamic.$(SUFFIX) +endif else COMMONOBJS += parameter.$(SUFFIX) endif @@ -71,7 +75,11 @@ BLAS_SERVER = blas_server.c endif ifeq ($(DYNAMIC_ARCH), 1) +ifeq ($(ARCH),arm64) +HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_arm64.$(SUFFIX) +else HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic.$(SUFFIX) +endif else HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) parameter.$(SUFFIX) endif diff --git a/driver/others/blas_server.c b/driver/others/blas_server.c index 2e0fe190d..e5db1804f 100644 --- a/driver/others/blas_server.c +++ b/driver/others/blas_server.c @@ -70,7 +70,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /*********************************************************************/ #include "common.h" -#if defined(OS_LINUX) || defined(OS_NETBSD) || defined(OS_DARWIN) || defined(OS_ANDROID) || defined(OS_SUNOS) || defined(OS_FREEBSD) +#if defined(OS_LINUX) || defined(OS_NETBSD) || defined(OS_DARWIN) || defined(OS_ANDROID) || defined(OS_SUNOS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_HAIKU) #include #include #include @@ -582,7 +582,7 @@ int blas_thread_init(void){ if(ret!=0){ struct rlimit rlim; const char *msg = strerror(ret); - fprintf(STDERR, "OpenBLAS blas_thread_init: pthread_create: %s\n", msg); + fprintf(STDERR, "OpenBLAS blas_thread_init: pthread_create failed for thread %ld of %ld: %s\n", i+1,blas_num_threads,msg); #ifdef RLIMIT_NPROC if(0 == getrlimit(RLIMIT_NPROC, &rlim)) { fprintf(STDERR, "OpenBLAS blas_thread_init: RLIMIT_NPROC " @@ -850,6 +850,11 @@ void goto_set_num_threads(int num_threads) { long i; +#ifdef SMP_SERVER + // Handle lazy re-init of the thread-pool after a POSIX fork + if (unlikely(blas_server_avail == 0)) blas_thread_init(); +#endif + if (num_threads < 1) num_threads = blas_num_threads; #ifndef NO_AFFINITY diff --git a/driver/others/blas_server_omp.c b/driver/others/blas_server_omp.c index 8d62a8125..4255852c8 100644 --- a/driver/others/blas_server_omp.c +++ b/driver/others/blas_server_omp.c @@ -36,6 +36,7 @@ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ +#include #include #include //#include @@ -47,13 +48,22 @@ #else +#ifndef OMP_SCHED +#define OMP_SCHED static +#endif + int blas_server_avail = 0; -static void * blas_thread_buffer[MAX_CPU_NUMBER]; +static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER]; +#if __STDC_VERSION__ >= 201112L +static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; +#else +static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; +#endif void goto_set_num_threads(int num_threads) { - int i=0; + int i=0, j=0; if (num_threads < 1) num_threads = blas_num_threads; @@ -68,15 +78,17 @@ void goto_set_num_threads(int num_threads) { omp_set_num_threads(blas_cpu_number); //adjust buffer for each thread - for(i=0; i mode & BLAS_PTHREAD) == 0)) { pos = omp_get_thread_num(); - buffer = blas_thread_buffer[pos]; + buffer = blas_thread_buffer[buf_index][pos]; //fallback if(buffer==NULL) { @@ -291,7 +307,7 @@ static void exec_threads(blas_queue_t *queue){ int exec_blas(BLASLONG num, blas_queue_t *queue){ - BLASLONG i; + BLASLONG i, buf_index; if ((num <= 0) || (queue == NULL)) return 0; @@ -302,16 +318,39 @@ int exec_blas(BLASLONG num, blas_queue_t *queue){ } #endif -#pragma omp parallel for schedule(static) + while(true) { + for(i=0; i < MAX_PARALLEL_NUMBER; i++) { +#if __STDC_VERSION__ >= 201112L + _Bool inuse = false; + if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) { +#else + if(blas_buffer_inuse[i] == false) { + blas_buffer_inuse[i] = true; +#endif + buf_index = i; + break; + } + } + if(i != MAX_PARALLEL_NUMBER) + break; + } + +#pragma omp parallel for schedule(OMP_SCHED) for (i = 0; i < num; i ++) { #ifndef USE_SIMPLE_THREADED_LEVEL3 queue[i].position = i; #endif - exec_threads(&queue[i]); + exec_threads(&queue[i], buf_index); } +#if __STDC_VERSION__ >= 201112L + atomic_store(&blas_buffer_inuse[buf_index], false); +#else + blas_buffer_inuse[buf_index] = false; +#endif + return 0; } diff --git a/driver/others/blas_server_win32.c b/driver/others/blas_server_win32.c index cde8ca793..bae344c59 100644 --- a/driver/others/blas_server_win32.c +++ b/driver/others/blas_server_win32.c @@ -40,6 +40,14 @@ #include #include "common.h" +#if defined(OS_CYGWIN_NT) && !defined(unlikely) +#ifdef __GNUC__ +#define unlikely(x) __builtin_expect(!!(x), 0) +#else +#define unlikely(x) (x) +#endif +#endif + /* This is a thread implementation for Win32 lazy implementation */ /* Thread server common infomation */ @@ -53,7 +61,7 @@ typedef struct{ } blas_pool_t; -/* We need this grobal for cheking if initialization is finished. */ +/* We need this global for cheking if initialization is finished. */ int blas_server_avail = 0; /* Local Variables */ @@ -340,6 +348,11 @@ int blas_thread_init(void){ int exec_blas_async(BLASLONG pos, blas_queue_t *queue){ +#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT) + // Handle lazy re-init of the thread-pool after a POSIX fork + if (unlikely(blas_server_avail == 0)) blas_thread_init(); +#endif + blas_queue_t *current; current = queue; @@ -405,6 +418,11 @@ int exec_blas_async_wait(BLASLONG num, blas_queue_t *queue){ /* Execute Threads */ int exec_blas(BLASLONG num, blas_queue_t *queue){ +#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT) + // Handle lazy re-init of the thread-pool after a POSIX fork + if (unlikely(blas_server_avail == 0)) blas_thread_init(); +#endif + #ifndef ALL_THREADED int (*routine)(blas_arg_t *, void *, void *, double *, double *, BLASLONG); #endif @@ -460,7 +478,12 @@ int BLASFUNC(blas_thread_shutdown)(void){ void goto_set_num_threads(int num_threads) { - long i; + long i; + +#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT) + // Handle lazy re-init of the thread-pool after a POSIX fork + if (unlikely(blas_server_avail == 0)) blas_thread_init(); +#endif if (num_threads < 1) num_threads = blas_cpu_number; diff --git a/driver/others/dynamic.c b/driver/others/dynamic.c index fbf7cd40e..99c9254ac 100644 --- a/driver/others/dynamic.c +++ b/driver/others/dynamic.c @@ -49,6 +49,167 @@ #define EXTERN #endif +#ifdef DYNAMIC_LIST +extern gotoblas_t gotoblas_PRESCOTT; + +#ifdef DYN_ATHLON +extern gotoblas_t gotoblas_ATHLON; +#else +#define gotoblas_ATHLON gotoblas_PRESCOTT +#endif +#ifdef DYN_KATMAI +extern gotoblas_t gotoblas_KATMAI; +#else +#define gotoblas_KATMAI gotoblas_PRESCOTT +#endif +#ifdef DYN_BANIAS +extern gotoblas_t gotoblas_BANIAS; +#else +#define gotoblas_BANIAS gotoblas_PRESCOTT +#endif +#ifdef DYN_COPPERMINE +extern gotoblas_t gotoblas_COPPERMINE; +#else +#define gotoblas_COPPERMINE gotoblas_PRESCOTT +#endif +#ifdef DYN_NORTHWOOD +extern gotoblas_t gotoblas_NORTHWOOD; +#else +#define gotoblas_NORTHWOOD gotoblas_PRESCOTT +#endif +#ifdef DYN_CORE2 +extern gotoblas_t gotoblas_CORE2; +#else +#define gotoblas_CORE2 gotoblas_PRESCOTT +#endif +#ifdef DYN_NEHALEM +extern gotoblas_t gotoblas_NEHALEM; +#else +#define gotoblas_NEHALEM gotoblas_PRESCOTT +#endif +#ifdef DYN_BARCELONA +extern gotoblas_t gotoblas_BARCELONA; +#elif defined(DYN_NEHALEM) +#define gotoblas_BARCELONA gotoblas_NEHALEM +#else +#define gotoblas_BARCELONA gotoblas_PRESCOTT +#endif +#ifdef DYN_ATOM +extern gotoblas_t gotoblas_ATOM; +elif defined(DYN_NEHALEM) +#define gotoblas_ATOM gotoblas_NEHALEM +#else +#define gotoblas_ATOM gotoblas_PRESCOTT +#endif +#ifdef DYN_NANO +extern gotoblas_t gotoblas_NANO; +#else +#define gotoblas_NANO gotoblas_PRESCOTT +#endif +#ifdef DYN_PENRYN +extern gotoblas_t gotoblas_PENRYN; +#else +#define gotoblas_PENRYN gotoblas_PRESCOTT +#endif +#ifdef DYN_DUNNINGTON +extern gotoblas_t gotoblas_DUNNINGTON; +#else +#define gotoblas_DUNNINGTON gotoblas_PRESCOTT +#endif +#ifdef DYN_OPTERON +extern gotoblas_t gotoblas_OPTERON; +#else +#define gotoblas_OPTERON gotoblas_PRESCOTT +#endif +#ifdef DYN_OPTERON_SSE3 +extern gotoblas_t gotoblas_OPTERON_SSE3; +#else +#define gotoblas_OPTERON_SSE3 gotoblas_PRESCOTT +#endif +#ifdef DYN_BOBCAT +extern gotoblas_t gotoblas_BOBCAT; +#elif defined(DYN_NEHALEM) +#define gotoblas_BOBCAT gotoblas_NEHALEM +#else +#define gotoblas_BOBCAT gotoblas_PRESCOTT +#endif +#ifdef DYN_SANDYBRIDGE +extern gotoblas_t gotoblas_SANDYBRIDGE; +#elif defined(DYN_NEHALEM) +#define gotoblas_SANDYBRIDGE gotoblas_NEHALEM +#else +#define gotoblas_SANDYBRIDGE gotoblas_PRESCOTT +#endif +#ifdef DYN_BULLDOZER +extern gotoblas_t gotoblas_BULLDOZER; +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_BULLDOZER gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_BULLDOZER gotoblas_NEHALEM +#else +#define gotoblas_BULLDOZER gotoblas_PRESCOTT +#endif +#ifdef DYN_PILEDRIVER +extern gotoblas_t gotoblas_PILEDRIVER; +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_PILEDRIVER gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_PILEDRIVER gotoblas_NEHALEM +#else +#define gotoblas_PILEDRIVER gotoblas_PRESCOTT +#endif +#ifdef DYN_STEAMROLLER +extern gotoblas_t gotoblas_STEAMROLLER; +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_STEAMROLLER gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_STEAMROLLER gotoblas_NEHALEM +#else +#define gotoblas_STEAMROLLER gotoblas_PRESCOTT +#endif +#ifdef DYN_EXCAVATOR +extern gotoblas_t gotoblas_EXCAVATOR; +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_EXCAVATOR gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_EXCAVATOR gotoblas_NEHALEM +#else +#define gotoblas_EXCAVATOR gotoblas_PRESCOTT +#endif +#ifdef DYN_HASWELL +extern gotoblas_t gotoblas_HASWELL; +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_HASWELL gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_HASWELL gotoblas_NEHALEM +#else +#define gotoblas_HASWELL gotoblas_PRESCOTT +#endif +#ifdef DYN_ZEN +extern gotoblas_t gotoblas_ZEN; +#elif defined(DYN_HASWELL) +#define gotoblas_ZEN gotoblas_HASWELL +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_ZEN gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_ZEN gotoblas_NEHALEM +#else +#define gotoblas_ZEN gotoblas_PRESCOTT +#endif +#ifdef DYN_SKYLAKEX +extern gotoblas_t gotoblas_SKYLAKEX; +#elif defined(DYN_HASWELL) +#define gotoblas_SKYLAKEX gotoblas_HASWELL +#elif defined(DYN_SANDYBRIDGE) +#define gotoblas_SKYLAKEX gotoblas_SANDYBRIDGE +#elif defined(DYN_NEHALEM) +#define gotoblas_SKYLAKEX gotoblas_NEHALEM +#else +#define gotoblas_SKYLAKEX gotoblas_PRESCOTT +#endif + + +#else // not DYNAMIC_LIST EXTERN gotoblas_t gotoblas_KATMAI; EXTERN gotoblas_t gotoblas_COPPERMINE; EXTERN gotoblas_t gotoblas_NORTHWOOD; @@ -56,16 +217,27 @@ EXTERN gotoblas_t gotoblas_BANIAS; EXTERN gotoblas_t gotoblas_ATHLON; extern gotoblas_t gotoblas_PRESCOTT; +extern gotoblas_t gotoblas_CORE2; +extern gotoblas_t gotoblas_NEHALEM; +extern gotoblas_t gotoblas_BARCELONA; +#ifdef DYNAMIC_OLDER extern gotoblas_t gotoblas_ATOM; extern gotoblas_t gotoblas_NANO; -extern gotoblas_t gotoblas_CORE2; extern gotoblas_t gotoblas_PENRYN; extern gotoblas_t gotoblas_DUNNINGTON; -extern gotoblas_t gotoblas_NEHALEM; extern gotoblas_t gotoblas_OPTERON; extern gotoblas_t gotoblas_OPTERON_SSE3; -extern gotoblas_t gotoblas_BARCELONA; extern gotoblas_t gotoblas_BOBCAT; +#else +#define gotoblas_ATOM gotoblas_NEHALEM +#define gotoblas_NANO gotoblas_NEHALEM +#define gotoblas_PENRYN gotoblas_CORE2 +#define gotoblas_DUNNINGTON gotoblas_CORE2 +#define gotoblas_OPTERON gotoblas_CORE2 +#define gotoblas_OPTERON_SSE3 gotoblas_CORE2 +#define gotoblas_BOBCAT gotoblas_CORE2 +#endif + #ifndef NO_AVX extern gotoblas_t gotoblas_SANDYBRIDGE; extern gotoblas_t gotoblas_BULLDOZER; @@ -74,15 +246,22 @@ extern gotoblas_t gotoblas_STEAMROLLER; extern gotoblas_t gotoblas_EXCAVATOR; #ifdef NO_AVX2 #define gotoblas_HASWELL gotoblas_SANDYBRIDGE +#define gotoblas_SKYLAKEX gotoblas_SANDYBRIDGE #define gotoblas_ZEN gotoblas_SANDYBRIDGE #else extern gotoblas_t gotoblas_HASWELL; extern gotoblas_t gotoblas_ZEN; +#ifndef NO_AVX512 +extern gotoblas_t gotoblas_SKYLAKEX; +#else +#define gotoblas_SKYLAKEX gotoblas_HASWELL +#endif #endif #else //Use NEHALEM kernels for sandy bridge #define gotoblas_SANDYBRIDGE gotoblas_NEHALEM #define gotoblas_HASWELL gotoblas_NEHALEM +#define gotoblas_SKYLAKEX gotoblas_NEHALEM #define gotoblas_BULLDOZER gotoblas_BARCELONA #define gotoblas_PILEDRIVER gotoblas_BARCELONA #define gotoblas_STEAMROLLER gotoblas_BARCELONA @@ -90,10 +269,12 @@ extern gotoblas_t gotoblas_ZEN; #define gotoblas_ZEN gotoblas_BARCELONA #endif +#endif // DYNAMIC_LIST #define VENDOR_INTEL 1 #define VENDOR_AMD 2 #define VENDOR_CENTAUR 3 +#define VENDOR_HYGON 4 #define VENDOR_UNKNOWN 99 #define BITMASK(a, b, c) ((((a) >> (b)) & (c))) @@ -124,9 +305,49 @@ int support_avx(){ #endif } +int support_avx2(){ +#ifndef NO_AVX2 + int eax, ebx, ecx=0, edx; + int ret=0; + + if (!support_avx()) + return 0; + cpuid(7, &eax, &ebx, &ecx, &edx); + if((ebx & (1<<7)) != 0) + ret=1; //OS supports AVX2 + return ret; +#else + return 0; +#endif +} + +int support_avx512(){ +#ifndef NO_AVX512 + int eax, ebx, ecx, edx; + int ret=0; + + if (!support_avx()) + return 0; + cpuid(7, &eax, &ebx, &ecx, &edx); + if((ebx & (1<<7)) != 1){ + ret=0; //OS does not even support AVX2 + } + if((ebx & (1<<31)) != 0){ + xgetbv(0, &eax, &edx); + if((eax & 0xe0) == 0xe0) + ret=1; //OS supports AVX512VL + } + return ret; +#else + return 0; +#endif +} + extern void openblas_warning(int verbose, const char * msg); #define FALLBACK_VERBOSE 1 #define NEHALEM_FALLBACK "OpenBLAS : Your OS does not support AVX instructions. OpenBLAS is using Nehalem kernels as a fallback, which may give poorer performance.\n" +#define SANDYBRIDGE_FALLBACK "OpenBLAS : Your OS does not support AVX2 instructions. OpenBLAS is using Sandybridge kernels as a fallback, which may give poorer performance.\n" +#define HASWELL_FALLBACK "OpenBLAS : Your OS does not support AVX512VL instructions. OpenBLAS is using Haswell kernels as a fallback, which may give poorer performance.\n" #define BARCELONA_FALLBACK "OpenBLAS : Your OS does not support AVX instructions. OpenBLAS is using Barcelona kernels as a fallback, which may give poorer performance.\n" static int get_vendor(void){ @@ -149,6 +370,7 @@ static int get_vendor(void){ if (!strcmp(vendor.vchar, "GenuineIntel")) return VENDOR_INTEL; if (!strcmp(vendor.vchar, "AuthenticAMD")) return VENDOR_AMD; if (!strcmp(vendor.vchar, "CentaurHauls")) return VENDOR_CENTAUR; + if (!strcmp(vendor.vchar, "HygonGenuine")) return VENDOR_HYGON; if ((eax == 0) || ((eax & 0x500) != 0)) return VENDOR_INTEL; @@ -223,18 +445,24 @@ static gotoblas_t *get_coretype(void){ } //Intel Haswell if (model == 12 || model == 15) { - if(support_avx()) + if(support_avx2()) return &gotoblas_HASWELL; - else{ + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } } //Intel Broadwell if (model == 13) { - if(support_avx()) + if(support_avx2()) return &gotoblas_HASWELL; - else{ + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } @@ -244,27 +472,36 @@ static gotoblas_t *get_coretype(void){ case 4: //Intel Haswell if (model == 5 || model == 6) { - if(support_avx()) + if(support_avx2()) return &gotoblas_HASWELL; - else{ + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } } //Intel Broadwell if (model == 7 || model == 15) { - if(support_avx()) + if(support_avx2()) return &gotoblas_HASWELL; - else{ + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } } //Intel Skylake if (model == 14) { - if(support_avx()) + if(support_avx2()) return &gotoblas_HASWELL; - else{ + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } @@ -277,27 +514,54 @@ static gotoblas_t *get_coretype(void){ case 5: //Intel Broadwell if (model == 6) { - if(support_avx()) + if(support_avx2()) return &gotoblas_HASWELL; - else{ + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } } - //Intel Skylake - if (model == 14 || model == 5) { - if(support_avx()) + if (model == 5) { + // Intel Skylake X + if (support_avx512()) + return &gotoblas_SKYLAKEX; + if(support_avx2()){ + openblas_warning(FALLBACK_VERBOSE, HASWELL_FALLBACK); return &gotoblas_HASWELL; - else{ + } + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { + openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); + return &gotoblas_NEHALEM; + } + } + //Intel Skylake + if (model == 14) { + if(support_avx2()) + return &gotoblas_HASWELL; + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } } //Intel Phi Knights Landing if (model == 7) { - if(support_avx()) + if(support_avx2()){ + openblas_warning(FALLBACK_VERBOSE, HASWELL_FALLBACK); return &gotoblas_HASWELL; - else{ + } + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } @@ -307,12 +571,29 @@ static gotoblas_t *get_coretype(void){ return &gotoblas_NEHALEM; } return NULL; + case 6: + if (model == 6) { + // Cannon Lake + if(support_avx2()) + return &gotoblas_HASWELL; + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { + openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); + return &gotoblas_NEHALEM; + } + } + return NULL; case 9: case 8: if (model == 14 ) { // Kaby Lake - if(support_avx()) + if(support_avx2()) return &gotoblas_HASWELL; - else{ + if(support_avx()) { + openblas_warning(FALLBACK_VERBOSE, SANDYBRIDGE_FALLBACK); + return &gotoblas_SANDYBRIDGE; + } else { openblas_warning(FALLBACK_VERBOSE, NEHALEM_FALLBACK); return &gotoblas_NEHALEM; //OS doesn't support AVX. Use old kernels. } @@ -325,7 +606,7 @@ static gotoblas_t *get_coretype(void){ } } - if (vendor == VENDOR_AMD){ + if (vendor == VENDOR_AMD || vendor == VENDOR_HYGON){ if (family <= 0xe) { // Verify that CPU has 3dnow and 3dnowext before claiming it is Athlon cpuid(0x80000000, &eax, &ebx, &ecx, &edx); @@ -397,7 +678,7 @@ static gotoblas_t *get_coretype(void){ } } } else if (exfamily == 8) { - if (model == 1) { + if (model == 1 || model == 8) { if(support_avx()) return &gotoblas_ZEN; else{ @@ -405,6 +686,13 @@ static gotoblas_t *get_coretype(void){ return &gotoblas_BARCELONA; //OS doesn't support AVX. Use old kernels. } } + } else if (exfamily == 9) { + if(support_avx()) + return &gotoblas_ZEN; + else{ + openblas_warning(FALLBACK_VERBOSE, BARCELONA_FALLBACK); + return &gotoblas_BARCELONA; //OS doesn't support AVX. Use old kernels. + } }else { return &gotoblas_BARCELONA; } @@ -445,7 +733,8 @@ static char *corename[] = { "Haswell", "Steamroller", "Excavator", - "Zen" + "Zen", + "SkylakeX" }; char *gotoblas_corename(void) { @@ -473,7 +762,7 @@ char *gotoblas_corename(void) { if (gotoblas == &gotoblas_STEAMROLLER) return corename[21]; if (gotoblas == &gotoblas_EXCAVATOR) return corename[22]; if (gotoblas == &gotoblas_ZEN) return corename[23]; - + if (gotoblas == &gotoblas_SKYLAKEX) return corename[24]; return corename[0]; } @@ -485,7 +774,7 @@ static gotoblas_t *force_coretype(char *coretype){ char message[128]; //char mname[20]; - for ( i=1 ; i <= 23; i++) + for ( i=1 ; i <= 24; i++) { if (!strncasecmp(coretype,corename[i],20)) { @@ -503,6 +792,7 @@ static gotoblas_t *force_coretype(char *coretype){ switch (found) { + case 24: return (&gotoblas_SKYLAKEX); case 23: return (&gotoblas_ZEN); case 22: return (&gotoblas_EXCAVATOR); case 21: return (&gotoblas_STEAMROLLER); diff --git a/driver/others/dynamic_arm64.c b/driver/others/dynamic_arm64.c new file mode 100644 index 000000000..b4ce6b67d --- /dev/null +++ b/driver/others/dynamic_arm64.c @@ -0,0 +1,198 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" +#include +#include + +extern gotoblas_t gotoblas_ARMV8; +extern gotoblas_t gotoblas_CORTEXA57; +extern gotoblas_t gotoblas_THUNDERX; +extern gotoblas_t gotoblas_THUNDERX2T99; + +extern void openblas_warning(int verbose, const char * msg); + +#define NUM_CORETYPES 4 + +/* + * In case asm/hwcap.h is outdated on the build system, make sure + * that HWCAP_CPUID is defined + */ +#ifndef HWCAP_CPUID +#define HWCAP_CPUID (1 << 11) +#endif + +#define get_cpu_ftr(id, var) ({ \ + asm("mrs %0, "#id : "=r" (var)); \ + }) + +static char *corename[] = { + "armv8", + "cortexa57", + "thunderx", + "thunderx2t99", + "unknown" +}; + +char *gotoblas_corename(void) { + if (gotoblas == &gotoblas_ARMV8) return corename[ 0]; + if (gotoblas == &gotoblas_CORTEXA57) return corename[ 1]; + if (gotoblas == &gotoblas_THUNDERX) return corename[ 2]; + if (gotoblas == &gotoblas_THUNDERX2T99) return corename[ 3]; + return corename[NUM_CORETYPES]; +} + +static gotoblas_t *force_coretype(char *coretype) { + int i ; + int found = -1; + char message[128]; + + for ( i=0 ; i < NUM_CORETYPES; i++) + { + if (!strncasecmp(coretype, corename[i], 20)) + { + found = i; + break; + } + } + + switch (found) + { + case 0: return (&gotoblas_ARMV8); + case 1: return (&gotoblas_CORTEXA57); + case 2: return (&gotoblas_THUNDERX); + case 3: return (&gotoblas_THUNDERX2T99); + } + snprintf(message, 128, "Core not found: %s\n", coretype); + openblas_warning(1, message); + return NULL; +} + +static gotoblas_t *get_coretype(void) { + int implementer, variant, part, arch, revision, midr_el1; + + if (!(getauxval(AT_HWCAP) & HWCAP_CPUID)) { + char coremsg[128]; + snprintf(coremsg, 128, "Kernel lacks cpuid feature support. Auto detection of core type failed !!!\n"); + openblas_warning(1, coremsg); + return NULL; + } + + get_cpu_ftr(MIDR_EL1, midr_el1); + /* + * MIDR_EL1 + * + * 31 24 23 20 19 16 15 4 3 0 + * ----------------------------------------------------------------- + * | Implementer | Variant | Architecture | Part Number | Revision | + * ----------------------------------------------------------------- + */ + implementer = (midr_el1 >> 24) & 0xFF; + part = (midr_el1 >> 4) & 0xFFF; + + switch(implementer) + { + case 0x41: // ARM + switch (part) + { + case 0xd07: // Cortex A57 + case 0xd08: // Cortex A72 + case 0xd03: // Cortex A53 + return &gotoblas_CORTEXA57; + } + break; + case 0x42: // Broadcom + switch (part) + { + case 0x516: // Vulcan + return &gotoblas_THUNDERX2T99; + } + break; + case 0x43: // Cavium + switch (part) + { + case 0x0a1: // ThunderX + return &gotoblas_THUNDERX; + case 0x0af: // ThunderX2 + return &gotoblas_THUNDERX2T99; + } + break; + } + return NULL; +} + +void gotoblas_dynamic_init(void) { + + char coremsg[128]; + char coren[22]; + char *p; + + if (gotoblas) return; + + p = getenv("OPENBLAS_CORETYPE"); + if ( p ) + { + gotoblas = force_coretype(p); + } + else + { + gotoblas = get_coretype(); + } + + if (gotoblas == NULL) + { + snprintf(coremsg, 128, "Falling back to generic ARMV8 core\n"); + openblas_warning(1, coremsg); + gotoblas = &gotoblas_ARMV8; + } + + if (gotoblas && gotoblas->init) { + strncpy(coren, gotoblas_corename(), 20); + sprintf(coremsg, "Core: %s\n", coren); + openblas_warning(2, coremsg); + gotoblas -> init(); + } else { + openblas_warning(0, "OpenBLAS : Architecture Initialization failed. No initialization function found.\n"); + exit(1); + } + +} + +void gotoblas_dynamic_quit(void) { + gotoblas = NULL; +} diff --git a/driver/others/memory.c b/driver/others/memory.c index 1d5b70003..72d3e173c 100644 --- a/driver/others/memory.c +++ b/driver/others/memory.c @@ -13,9 +13,9 @@ met: notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" @@ -72,6 +72,1537 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //#undef DEBUG #include "common.h" + +#if defined(USE_TLS) && defined(SMP) +#define COMPILE_TLS + +#if USE_TLS != 1 +#undef COMPILE_TLS +#endif + +#if defined(__GLIBC_PREREQ) +#if !__GLIBC_PREREQ(2,20) +#undef COMPILE_TLS +#endif +#endif +#endif + +#if defined(COMPILE_TLS) + +#include + +#if defined(OS_WINDOWS) && !defined(OS_CYGWIN_NT) +#define ALLOC_WINDOWS +#ifndef MEM_LARGE_PAGES +#define MEM_LARGE_PAGES 0x20000000 +#endif +#else +#define ALLOC_MMAP +#define ALLOC_MALLOC +#endif + +#include +#include +#include + +#if !defined(OS_WINDOWS) || defined(OS_CYGWIN_NT) +#include +#ifndef NO_SYSV_IPC +#include +#endif +#include +#endif + +#include + +#ifdef OS_LINUX +#include +#include +#include +#include +#include +#include +#include +#endif + +#ifdef OS_HAIKU +#include +#endif + +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) +#include +#include +#endif + +#if defined(OS_WINDOWS) && (defined(__MINGW32__) || defined(__MINGW64__)) +#include +#undef printf +#define printf _cprintf +#endif + +#ifdef OS_LINUX + +#ifndef MPOL_PREFERRED +#define MPOL_PREFERRED 1 +#endif + +#endif + +#if (defined(PPC440) || !defined(OS_LINUX) || defined(HPL)) && !defined(NO_WARMUP) +#define NO_WARMUP +#endif + +#ifndef SHM_HUGETLB +#define SHM_HUGETLB 04000 +#endif + +#ifndef FIXED_PAGESIZE +#define FIXED_PAGESIZE 4096 +#endif + +#define BITMASK(a, b, c) ((((a) >> (b)) & (c))) + +#if defined(_MSC_VER) && !defined(__clang__) +#define CONSTRUCTOR __cdecl +#define DESTRUCTOR __cdecl +#elif (defined(OS_DARWIN) || defined(OS_SUNOS)) && defined(C_GCC) +#define CONSTRUCTOR __attribute__ ((constructor)) +#define DESTRUCTOR __attribute__ ((destructor)) +#elif __GNUC__ && INIT_PRIORITY && ((GCC_VERSION >= 40300) || (CLANG_VERSION >= 20900)) +#define CONSTRUCTOR __attribute__ ((constructor(101))) +#define DESTRUCTOR __attribute__ ((destructor(101))) +#else +#define CONSTRUCTOR __attribute__ ((constructor)) +#define DESTRUCTOR __attribute__ ((destructor)) +#endif + +#ifdef DYNAMIC_ARCH +gotoblas_t *gotoblas = NULL; +#endif +extern void openblas_warning(int verbose, const char * msg); + +#ifndef SMP + +#define blas_cpu_number 1 +#define blas_num_threads 1 + +/* Dummy Function */ +int goto_get_num_procs (void) { return 1;}; +void goto_set_num_threads(int num_threads) {}; + +#else + +#if defined(OS_LINUX) || defined(OS_SUNOS) || defined(OS_NETBSD) +#ifndef NO_AFFINITY +int get_num_procs(void); +#else +int get_num_procs(void) { + static int nums = 0; +cpu_set_t *cpusetp; +size_t size; +int ret; +int i,n; + + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); +#if !defined(OS_LINUX) + return nums; +#endif + +#if !defined(__GLIBC_PREREQ) + return nums; +#else + #if !__GLIBC_PREREQ(2, 3) + return nums; + #endif + + #if !__GLIBC_PREREQ(2, 7) + ret = sched_getaffinity(0,sizeof(cpu_set_t), cpusetp); + if (ret!=0) return nums; + n=0; + #if !__GLIBC_PREREQ(2, 6) + for (i=0;i 0 && ret < nums) nums = ret; + CPU_FREE(cpusetp); + return nums; + #endif +#endif +} +#endif +#endif + +#ifdef OS_ANDROID +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif + +#ifdef OS_HAIKU +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif + +#ifdef OS_AIX +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif + + + +#ifdef OS_WINDOWS + +int get_num_procs(void) { + + static int nums = 0; + + if (nums == 0) { + + SYSTEM_INFO sysinfo; + + GetSystemInfo(&sysinfo); + + nums = sysinfo.dwNumberOfProcessors; + } + + return nums; +} + +#endif + +#if defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) + +int get_num_procs(void) { + + static int nums = 0; + + int m[2]; + size_t len; + + if (nums == 0) { + m[0] = CTL_HW; + m[1] = HW_NCPU; + len = sizeof(int); + sysctl(m, 2, &nums, &len, NULL, 0); + } + + return nums; +} + +#endif + +#if defined(OS_DARWIN) +int get_num_procs(void) { + static int nums = 0; + size_t len; + if (nums == 0){ + len = sizeof(int); + sysctlbyname("hw.physicalcpu", &nums, &len, NULL, 0); + } + return nums; +} +/* +void set_stack_limit(int limitMB){ + int result=0; + struct rlimit rl; + rlim_t StackSize; + + StackSize=limitMB*1024*1024; + result=getrlimit(RLIMIT_STACK, &rl); + if(result==0){ + if(rl.rlim_cur < StackSize){ + rl.rlim_cur=StackSize; + result=setrlimit(RLIMIT_STACK, &rl); + if(result !=0){ + fprintf(stderr, "OpenBLAS: set stack limit error =%d\n", result); + } + } + } +} +*/ +#endif + + +/* +OpenBLAS uses the numbers of CPU cores in multithreading. +It can be set by openblas_set_num_threads(int num_threads); +*/ +int blas_cpu_number = 0; +/* +The numbers of threads in the thread pool. +This value is equal or large than blas_cpu_number. This means some threads are sleep. +*/ +int blas_num_threads = 0; + +int goto_get_num_procs (void) { + return blas_cpu_number; +} + +static void blas_memory_init(); + +void openblas_fork_handler() +{ + // This handler shuts down the OpenBLAS-managed PTHREAD pool when OpenBLAS is + // built with "make USE_OPENMP=0". + // Hanging can still happen when OpenBLAS is built against the libgomp + // implementation of OpenMP. The problem is tracked at: + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60035 + // In the mean time build with USE_OPENMP=0 or link against another + // implementation of OpenMP. +#if !((defined(OS_WINDOWS) && !defined(OS_CYGWIN_NT)) || defined(OS_ANDROID)) && defined(SMP_SERVER) + int err; + err = pthread_atfork ((void (*)(void)) BLASFUNC(blas_thread_shutdown), NULL, blas_memory_init); + if(err != 0) + openblas_warning(0, "OpenBLAS Warning ... cannot install fork handler. You may meet hang after fork.\n"); +#endif +} + +extern int openblas_num_threads_env(); +extern int openblas_goto_num_threads_env(); +extern int openblas_omp_num_threads_env(); + +int blas_get_cpu_number(void){ +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) + int max_num; +#endif + int blas_goto_num = 0; + int blas_omp_num = 0; + + if (blas_num_threads) return blas_num_threads; + +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) + max_num = get_num_procs(); +#endif + + // blas_goto_num = 0; +#ifndef USE_OPENMP_UNUSED + blas_goto_num=openblas_num_threads_env(); + if (blas_goto_num < 0) blas_goto_num = 0; + + if (blas_goto_num == 0) { + blas_goto_num=openblas_goto_num_threads_env(); + if (blas_goto_num < 0) blas_goto_num = 0; + } + +#endif + + // blas_omp_num = 0; + blas_omp_num=openblas_omp_num_threads_env(); + if (blas_omp_num < 0) blas_omp_num = 0; + + if (blas_goto_num > 0) blas_num_threads = blas_goto_num; + else if (blas_omp_num > 0) blas_num_threads = blas_omp_num; + else blas_num_threads = MAX_CPU_NUMBER; + +#if defined(OS_LINUX) || defined(OS_WINDOWS) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLY) || defined(OS_DARWIN) || defined(OS_ANDROID) + if (blas_num_threads > max_num) blas_num_threads = max_num; +#endif + + if (blas_num_threads > MAX_CPU_NUMBER) blas_num_threads = MAX_CPU_NUMBER; + +#ifdef DEBUG + printf( "Adjusted number of threads : %3d\n", blas_num_threads); +#endif + + blas_cpu_number = blas_num_threads; + + return blas_num_threads; +} +#endif + + +int openblas_get_num_procs(void) { +#ifndef SMP + return 1; +#else + return get_num_procs(); +#endif +} + +int openblas_get_num_threads(void) { +#ifndef SMP + return 1; +#else + // init blas_cpu_number if needed + blas_get_cpu_number(); + return blas_cpu_number; +#endif +} + +int hugetlb_allocated = 0; + +#if defined(OS_WINDOWS) +#define LIKELY_ONE(x) (x) +#else +#define LIKELY_ONE(x) (__builtin_expect(x, 1)) +#endif + +/* Stores information about the allocation and how to release it */ +struct alloc_t { + /* Whether this allocation is being used */ + int used; + /* Any special attributes needed when releasing this allocation */ + int attr; + /* Function that can properly release this memory */ + void (*release_func)(struct alloc_t *); + /* Pad to 64-byte alignment */ + char pad[64 - 2 * sizeof(int) - sizeof(void(*))]; +}; + +/* Convenience macros for storing release funcs */ +#define STORE_RELEASE_FUNC(address, func) \ + if (address != (void *)-1) { \ + struct alloc_t *alloc_info = (struct alloc_t *)address; \ + alloc_info->release_func = func; \ + } + +#define STORE_RELEASE_FUNC_WITH_ATTR(address, func, attr) \ + if (address != (void *)-1) { \ + struct alloc_t *alloc_info = (struct alloc_t *)address; \ + alloc_info->release_func = func; \ + alloc_info->attr = attr; \ + } + +/* The number of bytes that will be allocated for each buffer. When allocating + memory, we store an alloc_t followed by the actual buffer memory. This means + that each allocation always has its associated alloc_t, without the need + for an auxiliary tracking structure. */ +static const int allocation_block_size = BUFFER_SIZE + sizeof(struct alloc_t); + +#if defined(SMP) +# if defined(OS_WINDOWS) +static DWORD local_storage_key = 0; +DWORD lsk; + +# else +static pthread_key_t local_storage_key = 0; +pthread_key_t lsk; +# endif /* defined(OS_WINDOWS) */ +#endif /* defined(SMP) */ + +#if defined(OS_LINUX) && !defined(NO_WARMUP) +static int hot_alloc = 0; +#endif + +/* Global lock for memory allocation */ + +#if defined(USE_PTHREAD_LOCK) +static pthread_mutex_t alloc_lock = PTHREAD_MUTEX_INITIALIZER; +#elif defined(USE_PTHREAD_SPINLOCK) +static pthread_spinlock_t alloc_lock = 0; +#else +static BLASULONG alloc_lock = 0UL; +#endif + +#if defined(USE_PTHREAD_LOCK) +static pthread_mutex_t key_lock = PTHREAD_MUTEX_INITIALIZER; +#elif defined(USE_PTHREAD_SPINLOCK) +static pthread_spinlock_t key_lock = 0; +#else +static BLASULONG key_lock = 0UL; +#endif + +/* Returns a pointer to the start of the per-thread memory allocation data */ +static __inline struct alloc_t ** get_memory_table() { +#if defined(SMP) +LOCK_COMMAND(&key_lock); +lsk=local_storage_key; +UNLOCK_COMMAND(&key_lock); + if (!lsk) { + blas_memory_init(); + } +# if defined(OS_WINDOWS) + struct alloc_t ** local_memory_table = (struct alloc_t **)TlsGetValue(local_storage_key); +# else + struct alloc_t ** local_memory_table = (struct alloc_t **)pthread_getspecific(local_storage_key); +# endif /* defined(OS_WINDOWS) */ +#else + static struct alloc_t ** local_memory_table = NULL; +#endif /* defined(SMP) */ +#if defined (SMP) +LOCK_COMMAND(&key_lock); +lsk=local_storage_key; +UNLOCK_COMMAND(&key_lock); + if (lsk && !local_memory_table) { +#else + if (!local_memory_table) { +#endif /* defined(SMP) */ + local_memory_table = (struct alloc_t **)malloc(sizeof(struct alloc_t *) * NUM_BUFFERS); + memset(local_memory_table, 0, sizeof(struct alloc_t *) * NUM_BUFFERS); +#if defined(SMP) +# if defined(OS_WINDOWS) +LOCK_COMMAND(&key_lock); + TlsSetValue(local_storage_key, (void*)local_memory_table); +UNLOCK_COMMAND(&key_lock); +# else +LOCK_COMMAND(&key_lock); + pthread_setspecific(local_storage_key, (void*)local_memory_table); +UNLOCK_COMMAND(&key_lock); +# endif /* defined(OS_WINDOWS) */ +#endif /* defined(SMP) */ + } + return local_memory_table; +} + +#ifdef ALLOC_MMAP + +static void alloc_mmap_free(struct alloc_t *alloc_info){ + + if (munmap(alloc_info, allocation_block_size)) { + printf("OpenBLAS : munmap failed\n"); + } +} + + + +#ifdef NO_WARMUP + +static void *alloc_mmap(void *address){ + void *map_address; + + if (address){ + map_address = mmap(address, + allocation_block_size, + MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0); + } else { + map_address = mmap(address, + allocation_block_size, + MMAP_ACCESS, MMAP_POLICY, -1, 0); + } + + STORE_RELEASE_FUNC(map_address, alloc_mmap_free); + +#ifdef OS_LINUX + my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0); +#endif + + return map_address; +} + +#else + +#define BENCH_ITERATION 4 +#define SCALING 2 + +static inline BLASULONG run_bench(BLASULONG address, BLASULONG size) { + + BLASULONG original, *p; + BLASULONG start, stop, min; + int iter, i, count; + + min = (BLASULONG)-1; + + original = *(BLASULONG *)(address + size - PAGESIZE); + + *(BLASULONG *)(address + size - PAGESIZE) = (BLASULONG)address; + + for (iter = 0; iter < BENCH_ITERATION; iter ++ ) { + + p = (BLASULONG *)address; + + count = size / PAGESIZE; + + start = rpcc(); + + for (i = 0; i < count; i ++) { + p = (BLASULONG *)(*p); + } + + stop = rpcc(); + + if (min > stop - start) min = stop - start; + } + + *(BLASULONG *)(address + size - PAGESIZE + 0) = original; + *(BLASULONG *)(address + size - PAGESIZE + 8) = (BLASULONG)p; + + return min; +} + +static void *alloc_mmap(void *address){ + void *map_address, *best_address; + BLASULONG best, start, current, original; + BLASULONG allocsize; + + if (address){ + /* Just give up use advanced operation */ + map_address = mmap(address, allocation_block_size, MMAP_ACCESS, MMAP_POLICY | MAP_FIXED, -1, 0); + +#ifdef OS_LINUX + my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0); +#endif + + } else { +#if defined(OS_LINUX) && !defined(NO_WARMUP) + if (hot_alloc == 0) { + map_address = mmap(NULL, allocation_block_size, MMAP_ACCESS, MMAP_POLICY, -1, 0); + +#ifdef OS_LINUX + my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0); +#endif + + } else { +#endif + + map_address = mmap(NULL, allocation_block_size * SCALING, + MMAP_ACCESS, MMAP_POLICY, -1, 0); + + if (map_address != (void *)-1) { + +#ifdef OS_LINUX +#ifdef DEBUG + int ret=0; + ret=my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0); + if(ret==-1){ + int errsv=errno; + perror("OpenBLAS alloc_mmap:"); + printf("error code=%d,\tmap_address=%lx\n",errsv,map_address); + } + +#else + my_mbind(map_address, allocation_block_size * SCALING, MPOL_PREFERRED, NULL, 0, 0); +#endif +#endif + + + allocsize = DGEMM_P * DGEMM_Q * sizeof(double); + + start = (BLASULONG)map_address; + current = (SCALING - 1) * allocation_block_size; + original = current; + + while(current > 0 && current <= original) { + *(BLASLONG *)start = (BLASLONG)start + PAGESIZE; + start += PAGESIZE; + current -= PAGESIZE; + } + + *(BLASLONG *)(start - PAGESIZE) = (BLASULONG)map_address; + + start = (BLASULONG)map_address; + + best = (BLASULONG)-1; + best_address = map_address; + + while ((start + allocsize < (BLASULONG)map_address + (SCALING - 1) * allocation_block_size)) { + + current = run_bench(start, allocsize); + + if (best > current) { + best = current; + best_address = (void *)start; + } + + start += PAGESIZE; + + } + + if ((BLASULONG)best_address > (BLASULONG)map_address) + munmap(map_address, (BLASULONG)best_address - (BLASULONG)map_address); + + munmap((void *)((BLASULONG)best_address + allocation_block_size), (SCALING - 1) * allocation_block_size + (BLASULONG)map_address - (BLASULONG)best_address); + + map_address = best_address; + +#if defined(OS_LINUX) && !defined(NO_WARMUP) + hot_alloc = 2; +#endif + } + } +#if defined(OS_LINUX) && !defined(NO_WARMUP) + } +#endif + + STORE_RELEASE_FUNC(map_address, alloc_mmap_free); + + return map_address; +} + +#endif + +#endif + + +#ifdef ALLOC_MALLOC + +static void alloc_malloc_free(struct alloc_t *alloc_info){ + + free(alloc_info); + +} + +static void *alloc_malloc(void *address){ + + void *map_address; + + map_address = (void *)malloc(allocation_block_size + FIXED_PAGESIZE); + + if (map_address == (void *)NULL) map_address = (void *)-1; + + STORE_RELEASE_FUNC(map_address, alloc_malloc_free); + + return map_address; + +} + +#endif + +#ifdef ALLOC_QALLOC + +void *qalloc(int flags, size_t bytes); +void *qfree (void *address); + +#define QNONCACHE 0x1 +#define QCOMMS 0x2 +#define QFAST 0x4 + +static void alloc_qalloc_free(struct alloc_t *alloc_info){ + + qfree(alloc_info); + +} + +static void *alloc_qalloc(void *address){ + void *map_address; + + map_address = (void *)qalloc(QCOMMS | QFAST, allocation_block_size + FIXED_PAGESIZE); + + if (map_address == (void *)NULL) map_address = (void *)-1; + + STORE_RELEASE_FUNC(map_address, alloc_qalloc_free); + + return (void *)(((BLASULONG)map_address + FIXED_PAGESIZE - 1) & ~(FIXED_PAGESIZE - 1)); +} + +#endif + +#ifdef ALLOC_WINDOWS + +static void alloc_windows_free(struct alloc_t *alloc_info){ + + VirtualFree(alloc_info, allocation_block_size, MEM_DECOMMIT); + +} + +static void *alloc_windows(void *address){ + void *map_address; + + map_address = VirtualAlloc(address, + allocation_block_size, + MEM_RESERVE | MEM_COMMIT, + PAGE_READWRITE); + + if (map_address == (void *)NULL) map_address = (void *)-1; + + STORE_RELEASE_FUNC(map_address, alloc_windows_free); + + return map_address; +} + +#endif + +#ifdef ALLOC_DEVICEDRIVER +#ifndef DEVICEDRIVER_NAME +#define DEVICEDRIVER_NAME "/dev/mapper" +#endif + +static void alloc_devicedirver_free(struct alloc_t *alloc_info){ + + int attr = alloc_info -> attr; + if (munmap(address, allocation_block_size)) { + printf("OpenBLAS : Bugphysarea unmap failed.\n"); + } + + if (close(attr)) { + printf("OpenBLAS : Bugphysarea close failed.\n"); + } + +} + +static void *alloc_devicedirver(void *address){ + + int fd; + void *map_address; + + if ((fd = open(DEVICEDRIVER_NAME, O_RDWR | O_SYNC)) < 0) { + + return (void *)-1; + + } + + map_address = mmap(address, allocation_block_size, + PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, + fd, 0); + + STORE_RELEASE_FUNC_WITH_ATTR(map_address, alloc_devicedirver_free, fd); + + return map_address; +} + +#endif + +#ifdef ALLOC_SHM + +static void alloc_shm_free(struct alloc_t *alloc_info){ + + if (shmdt(alloc_info)) { + printf("OpenBLAS : Shared memory unmap failed.\n"); + } +} + +static void *alloc_shm(void *address){ + void *map_address; + int shmid; + + shmid = shmget(IPC_PRIVATE, allocation_block_size,IPC_CREAT | 0600); + + map_address = (void *)shmat(shmid, address, 0); + + if (map_address != (void *)-1){ + +#ifdef OS_LINUX + my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0); +#endif + + shmctl(shmid, IPC_RMID, 0); + + struct alloc_t *alloc_info = (struct alloc_t *)map_address; + alloc_info->release_func = alloc_shm_free; + alloc_info->attr = shmid; + } + + return map_address; +} + +#if defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS + +static void alloc_hugetlb_free(struct alloc_t *alloc_info){ + +#if defined(OS_LINUX) || defined(OS_AIX) + if (shmdt(alloc_info)) { + printf("OpenBLAS : Hugepage unmap failed.\n"); + } +#endif + +#ifdef __sun__ + + munmap(alloc_info, allocation_block_size); + +#endif + +#ifdef OS_WINDOWS + + VirtualFree(alloc_info, allocation_block_size, MEM_LARGE_PAGES | MEM_DECOMMIT); + +#endif + +} + +static void *alloc_hugetlb(void *address){ + + void *map_address = (void *)-1; + +#if defined(OS_LINUX) || defined(OS_AIX) + int shmid; + + shmid = shmget(IPC_PRIVATE, allocation_block_size, +#ifdef OS_LINUX + SHM_HUGETLB | +#endif +#ifdef OS_AIX + SHM_LGPAGE | SHM_PIN | +#endif + IPC_CREAT | SHM_R | SHM_W); + + if (shmid != -1) { + map_address = (void *)shmat(shmid, address, SHM_RND); + +#ifdef OS_LINUX + my_mbind(map_address, allocation_block_size, MPOL_PREFERRED, NULL, 0, 0); +#endif + + if (map_address != (void *)-1){ + shmctl(shmid, IPC_RMID, 0); + } + } +#endif + +#ifdef __sun__ + struct memcntl_mha mha; + + mha.mha_cmd = MHA_MAPSIZE_BSSBRK; + mha.mha_flags = 0; + mha.mha_pagesize = HUGE_PAGESIZE; + memcntl(NULL, 0, MC_HAT_ADVISE, (char *)&mha, 0, 0); + + map_address = (BLASULONG)memalign(HUGE_PAGESIZE, allocation_block_size); +#endif + +#ifdef OS_WINDOWS + + HANDLE hToken; + TOKEN_PRIVILEGES tp; + + if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &hToken) != TRUE) return (void *) -1; + + tp.PrivilegeCount = 1; + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + + if (LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &tp.Privileges[0].Luid) != TRUE) { + CloseHandle(hToken); + return (void*)-1; + } + + if (AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL) != TRUE) { + CloseHandle(hToken); + return (void*)-1; + } + + map_address = (void *)VirtualAlloc(address, + allocation_block_size, + MEM_LARGE_PAGES | MEM_RESERVE | MEM_COMMIT, + PAGE_READWRITE); + + tp.Privileges[0].Attributes = 0; + AdjustTokenPrivileges(hToken, FALSE, &tp, 0, NULL, NULL); + + if (map_address == (void *)NULL) map_address = (void *)-1; + +#endif + + STORE_RELEASE_FUNC(map_address, alloc_hugetlb_free); + + return map_address; +} +#endif + +#endif + +#ifdef ALLOC_HUGETLBFILE + +static int hugetlb_pid = 0; + +static void alloc_hugetlbfile_free(struct alloc_t *alloc_info){ + + int attr = alloc_info -> attr; + if (munmap(alloc_info, allocation_block_size)) { + printf("OpenBLAS : HugeTLBfs unmap failed.\n"); + } + + if (close(attr)) { + printf("OpenBLAS : HugeTLBfs close failed.\n"); + } +} + +static void *alloc_hugetlbfile(void *address){ + + void *map_address = (void *)-1; + int fd; + char filename[64]; + + if (!hugetlb_pid) hugetlb_pid = getpid(); + + sprintf(filename, "%s/gotoblas.%d", HUGETLB_FILE_NAME, hugetlb_pid); + + if ((fd = open(filename, O_RDWR | O_CREAT, 0700)) < 0) { + return (void *)-1; + } + + unlink(filename); + + map_address = mmap(address, allocation_block_size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fd, 0); + + STORE_RELEASE_FUNC_WITH_ATTR(map_address, alloc_hugetlbfile_free, fd); + + return map_address; +} +#endif + + +#ifdef SEEK_ADDRESS +static BLASULONG base_address = 0UL; +#else +static BLASULONG base_address = BASE_ADDRESS; +#endif + +#if __STDC_VERSION__ >= 201112L +static _Atomic int memory_initialized = 0; +#else +static volatile int memory_initialized = 0; +#endif + +/* Memory allocation routine */ +/* procpos ... indicates where it comes from */ +/* 0 : Level 3 functions */ +/* 1 : Level 2 functions */ +/* 2 : Thread */ + + static void blas_memory_cleanup(void* ptr){ + if (ptr) { + struct alloc_t ** table = (struct alloc_t **)ptr; + int pos; + for (pos = 0; pos < NUM_BUFFERS; pos ++){ + struct alloc_t *alloc_info = table[pos]; + if (alloc_info) { + alloc_info->release_func(alloc_info); + table[pos] = (void *)0; + } + } + free(table); + } +} + +static void blas_memory_init(){ +#if defined(SMP) +# if defined(OS_WINDOWS) + local_storage_key = TlsAlloc(); +# else + pthread_key_create(&local_storage_key, blas_memory_cleanup); +# endif /* defined(OS_WINDOWS) */ +#endif /* defined(SMP) */ +} + +void *blas_memory_alloc(int procpos){ + + int position; + + void *map_address; + + void *(*memoryalloc[])(void *address) = { +#ifdef ALLOC_DEVICEDRIVER + alloc_devicedirver, +#endif +/* Hugetlb implicitly assumes ALLOC_SHM */ +#ifdef ALLOC_SHM + alloc_shm, +#endif +#if ((defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)) + alloc_hugetlb, +#endif +#ifdef ALLOC_MMAP + alloc_mmap, +#endif +#ifdef ALLOC_QALLOC + alloc_qalloc, +#endif +#ifdef ALLOC_WINDOWS + alloc_windows, +#endif +#ifdef ALLOC_MALLOC + alloc_malloc, +#endif + NULL, + }; + void *(**func)(void *address); + struct alloc_t * alloc_info; + struct alloc_t ** alloc_table; + + +#if defined(SMP) && !defined(USE_OPENMP) +int mi; +LOCK_COMMAND(&alloc_lock); +mi=memory_initialized; +UNLOCK_COMMAND(&alloc_lock); + if (!LIKELY_ONE(mi)) { +#else + if (!LIKELY_ONE(memory_initialized)) { +#endif +#if defined(SMP) && !defined(USE_OPENMP) + /* Only allow a single thread to initialize memory system */ + LOCK_COMMAND(&alloc_lock); + + if (!memory_initialized) { +#endif + blas_memory_init(); +#ifdef DYNAMIC_ARCH + gotoblas_dynamic_init(); +#endif + +#if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY) + gotoblas_affinity_init(); +#endif + +#ifdef SMP + if (!blas_num_threads) blas_cpu_number = blas_get_cpu_number(); +#endif + +#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64) +#ifndef DYNAMIC_ARCH + blas_set_parameter(); +#endif +#endif + + memory_initialized = 1; + +#if defined(SMP) && !defined(USE_OPENMP) + } + UNLOCK_COMMAND(&alloc_lock); +#endif + } + +#ifdef DEBUG + printf("Alloc Start ...\n"); +#endif + + position = 0; + alloc_table = get_memory_table(); + do { + if (!alloc_table[position] || !alloc_table[position]->used) goto allocation; + position ++; + + } while (position < NUM_BUFFERS); + + goto error; + + allocation : + +#ifdef DEBUG + printf(" Position -> %d\n", position); +#endif + + alloc_info = alloc_table[position]; + if (!alloc_info) { + do { +#ifdef DEBUG + printf("Allocation Start : %lx\n", base_address); +#endif + + map_address = (void *)-1; + + func = &memoryalloc[0]; + + while ((func != NULL) && (map_address == (void *) -1)) { + + map_address = (*func)((void *)base_address); + +#ifdef ALLOC_DEVICEDRIVER + if ((*func == alloc_devicedirver) && (map_address == (void *)-1)) { + fprintf(stderr, "OpenBLAS Warning ... Physically contiguous allocation failed.\n"); + } +#endif + +#ifdef ALLOC_HUGETLBFILE + if ((*func == alloc_hugetlbfile) && (map_address == (void *)-1)) { +#ifndef OS_WINDOWS + fprintf(stderr, "OpenBLAS Warning ... HugeTLB(File) allocation failed.\n"); +#endif + } +#endif + +#if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS) + if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1; +#endif + + func ++; + } + +#ifdef DEBUG + printf(" Success -> %08lx\n", map_address); +#endif + if (((BLASLONG) map_address) == -1) base_address = 0UL; + + if (base_address) base_address += allocation_block_size + FIXED_PAGESIZE; + + } while ((BLASLONG)map_address == -1); + + alloc_table[position] = alloc_info = map_address; + +#ifdef DEBUG + printf(" Mapping Succeeded. %p(%d)\n", (void *)alloc_info, position); +#endif + } + +#ifdef DEBUG + printf("Mapped : %p %3d\n\n", (void *)alloc_info, position); +#endif + + alloc_info->used = 1; + + return (void *)(((char *)alloc_info) + sizeof(struct alloc_t)); + + error: + printf("OpenBLAS : Program will terminate because you tried to allocate too many memory regions.\n"); + + return NULL; +} + +void blas_memory_free(void *buffer){ +#ifdef DEBUG + int position; + struct alloc_t ** alloc_table; +#endif + /* Since we passed an offset pointer to the caller, get back to the actual allocation */ + struct alloc_t *alloc_info = (void *)(((char *)buffer) - sizeof(struct alloc_t)); + +#ifdef DEBUG + printf("Unmapped Start : %p ...\n", alloc_info); +#endif + + alloc_info->used = 0; + +#ifdef DEBUG + printf("Unmap Succeeded.\n\n"); +#endif + + return; + +#ifdef DEBUG + alloc_table = get_memory_table(); + for (position = 0; position < NUM_BUFFERS; position++){ + if (alloc_table[position]) { + printf("%4ld %p : %d\n", position, alloc_table[position], alloc_table[position]->used); + } + } +#endif + return; +} + +void *blas_memory_alloc_nolock(int unused) { + void *map_address; + map_address = (void *)malloc(BUFFER_SIZE + FIXED_PAGESIZE); + return map_address; +} + +void blas_memory_free_nolock(void * map_address) { + free(map_address); +} + +void blas_shutdown(void){ +#ifdef SMP + BLASFUNC(blas_thread_shutdown)(); +#endif + +#ifdef SMP + /* Only cleanupIf we were built for threading and TLS was initialized */ + if (local_storage_key) +#endif + blas_memory_cleanup((void*)get_memory_table()); + +#ifdef SEEK_ADDRESS + base_address = 0UL; +#else + base_address = BASE_ADDRESS; +#endif + + return; +} + +#if defined(OS_LINUX) && !defined(NO_WARMUP) + +#ifdef SMP +#if defined(USE_PTHREAD_LOCK) +static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; +#elif defined(USE_PTHREAD_SPINLOCK) +static pthread_spinlock_t init_lock = 0; +#else +static BLASULONG init_lock = 0UL; +#endif +#endif + +static void _touch_memory(blas_arg_t *arg, BLASLONG *range_m, BLASLONG *range_n, + void *sa, void *sb, BLASLONG pos) { + +#if !defined(ARCH_POWER) && !defined(ARCH_SPARC) + + size_t size; + BLASULONG buffer; + + size = allocation_block_size - PAGESIZE; + buffer = (BLASULONG)sa + GEMM_OFFSET_A; + +#if defined(OS_LINUX) && !defined(NO_WARMUP) + if (hot_alloc != 2) { +#endif + +#ifdef SMP + LOCK_COMMAND(&init_lock); +#endif + + while (size > 0) { + *(int *)buffer = size; + buffer += PAGESIZE; + size -= PAGESIZE; + } + +#ifdef SMP + UNLOCK_COMMAND(&init_lock); +#endif + + size = MIN((allocation_block_size - PAGESIZE), L2_SIZE); + buffer = (BLASULONG)sa + GEMM_OFFSET_A; + + while (size > 0) { + *(int *)buffer = size; + buffer += 64; + size -= 64; + } + +#if defined(OS_LINUX) && !defined(NO_WARMUP) + } +#endif + +#endif +} + +#ifdef SMP + +static void _init_thread_memory(void *buffer) { + + blas_queue_t queue[MAX_CPU_NUMBER]; + int num_cpu; + + for (num_cpu = 0; num_cpu < blas_num_threads; num_cpu++) { + + blas_queue_init(&queue[num_cpu]); + queue[num_cpu].mode = BLAS_DOUBLE | BLAS_REAL; + queue[num_cpu].routine = &_touch_memory; + queue[num_cpu].args = NULL; + queue[num_cpu].next = &queue[num_cpu + 1]; + } + + queue[num_cpu - 1].next = NULL; + queue[0].sa = buffer; + + exec_blas(num_cpu, queue); + +} +#endif + +static void gotoblas_memory_init(void) { + + void *buffer; + + hot_alloc = 1; + + buffer = (void *)blas_memory_alloc(0); + +#ifdef SMP + if (blas_cpu_number == 0) blas_get_cpu_number(); +#ifdef SMP_SERVER + if (blas_server_avail == 0) blas_thread_init(); +#endif + + _init_thread_memory((void *)((BLASULONG)buffer + GEMM_OFFSET_A)); + +#else + + _touch_memory(NULL, NULL, NULL, (void *)((BLASULONG)buffer + GEMM_OFFSET_A), NULL, 0); + +#endif + + blas_memory_free(buffer); +} +#endif + +/* Initialization for all function; this function should be called before main */ + +static int gotoblas_initialized = 0; +extern void openblas_read_env(); + +void CONSTRUCTOR gotoblas_init(void) { + + if (gotoblas_initialized) return; + +#ifdef SMP + openblas_fork_handler(); +#endif + + openblas_read_env(); + +#ifdef PROFILE + moncontrol (0); +#endif + +#ifdef DYNAMIC_ARCH + gotoblas_dynamic_init(); +#endif + +#if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY) + gotoblas_affinity_init(); +#endif + +#if defined(OS_LINUX) && !defined(NO_WARMUP) + gotoblas_memory_init(); +#endif + +//#if defined(OS_LINUX) +#if 0 + struct rlimit curlimit; + if ( getrlimit(RLIMIT_STACK, &curlimit ) == 0 ) + { + if ( curlimit.rlim_cur != curlimit.rlim_max ) + { + curlimit.rlim_cur = curlimit.rlim_max; + setrlimit(RLIMIT_STACK, &curlimit); + } + } +#endif + +#ifdef SMP + if (blas_cpu_number == 0) blas_get_cpu_number(); +#ifdef SMP_SERVER + if (blas_server_avail == 0) blas_thread_init(); +#endif +#endif + +#ifdef FUNCTION_PROFILE + gotoblas_profile_init(); +#endif + + gotoblas_initialized = 1; + +#ifdef PROFILE + moncontrol (1); +#endif + +} + +void DESTRUCTOR gotoblas_quit(void) { + + if (gotoblas_initialized == 0) return; + + blas_shutdown(); + +#if defined(SMP) +#if defined(OS_WINDOWS) + TlsFree(local_storage_key); +#else + pthread_key_delete(local_storage_key); +#endif +#endif + +#ifdef PROFILE + moncontrol (0); +#endif + +#ifdef FUNCTION_PROFILE + gotoblas_profile_quit(); +#endif + +#if defined(SMP) && defined(OS_LINUX) && !defined(NO_AFFINITY) + gotoblas_affinity_quit(); +#endif + +#ifdef DYNAMIC_ARCH + gotoblas_dynamic_quit(); +#endif + + gotoblas_initialized = 0; + +#ifdef PROFILE + moncontrol (1); +#endif +} + +#if defined(_MSC_VER) && !defined(__clang__) +BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved) +{ + switch (ul_reason_for_call) + { + case DLL_PROCESS_ATTACH: + gotoblas_init(); + break; + case DLL_THREAD_ATTACH: + break; + case DLL_THREAD_DETACH: +#if defined(SMP) + blas_memory_cleanup((void*)get_memory_table()); +#endif + break; + case DLL_PROCESS_DETACH: + gotoblas_quit(); + break; + default: + break; + } + return TRUE; +} + +/* + This is to allow static linking. + Code adapted from Google performance tools: + https://gperftools.googlecode.com/git-history/perftools-1.0/src/windows/port.cc + Reference: + https://sourceware.org/ml/pthreads-win32/2008/msg00028.html + http://ci.boost.org/svn-trac/browser/trunk/libs/thread/src/win32/tss_pe.cpp +*/ +static int on_process_term(void) +{ + gotoblas_quit(); + return 0; +} +#ifdef _WIN64 +#pragma comment(linker, "/INCLUDE:_tls_used") +#else +#pragma comment(linker, "/INCLUDE:__tls_used") +#endif + +#ifdef _WIN64 +#pragma const_seg(".CRT$XLB") +#else +#pragma data_seg(".CRT$XLB") +#endif +static void (APIENTRY *dll_callback)(HINSTANCE h, DWORD ul_reason_for_call, PVOID pv) = DllMain; +#ifdef _WIN64 +#pragma const_seg() +#else +#pragma data_seg() +#endif + +#ifdef _WIN64 +#pragma const_seg(".CRT$XTU") +#else +#pragma data_seg(".CRT$XTU") +#endif +static int(*p_process_term)(void) = on_process_term; +#ifdef _WIN64 +#pragma const_seg() +#else +#pragma data_seg() +#endif +#endif + +#if (defined(C_PGI) || (!defined(C_SUN) && defined(F_INTERFACE_SUN))) && (defined(ARCH_X86) || defined(ARCH_X86_64)) +/* Don't call me; this is just work around for PGI / Sun bug */ +void gotoblas_dummy_for_PGI(void) { + + gotoblas_init(); + gotoblas_quit(); + +#if 0 + asm ("\t.section\t.ctors,\"aw\",@progbits; .align 8; .quad gotoblas_init; .section .text"); + asm ("\t.section\t.dtors,\"aw\",@progbits; .align 8; .quad gotoblas_quit; .section .text"); +#else + asm (".section .init,\"ax\"; call gotoblas_init@PLT; .section .text"); + asm (".section .fini,\"ax\"; call gotoblas_quit@PLT; .section .text"); +#endif +} +#endif + +#else #include #ifdef OS_WINDOWS @@ -177,7 +1708,7 @@ int get_num_procs(void) { cpu_set_t *cpusetp; size_t size; int ret; -// int i,n; +int i,n; if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); #if !defined(OS_LINUX) @@ -225,6 +1756,22 @@ int get_num_procs(void) { return nums; } #endif + +#ifdef OS_HAIKU +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif + +#ifdef OS_AIX +int get_num_procs(void) { + static int nums = 0; + if (!nums) nums = sysconf(_SC_NPROCESSORS_CONF); + return nums; +} +#endif #ifdef OS_WINDOWS @@ -348,7 +1895,7 @@ int blas_get_cpu_number(void){ max_num = get_num_procs(); #endif - // blas_goto_num = 0; + blas_goto_num = 0; #ifndef USE_OPENMP blas_goto_num=openblas_num_threads_env(); if (blas_goto_num < 0) blas_goto_num = 0; @@ -360,7 +1907,7 @@ int blas_get_cpu_number(void){ #endif - // blas_omp_num = 0; + blas_omp_num = 0; blas_omp_num=openblas_omp_num_threads_env(); if (blas_omp_num < 0) blas_omp_num = 0; @@ -1047,7 +2594,7 @@ void *blas_memory_alloc(int procpos){ printf("Alloc Start ...\n"); #endif -#if defined(WHEREAMI) && !defined(USE_OPENMP) +/* #if defined(WHEREAMI) && !defined(USE_OPENMP) mypos = WhereAmI(); @@ -1057,12 +2604,12 @@ void *blas_memory_alloc(int procpos){ do { if (!memory[position].used && (memory[position].pos == mypos)) { LOCK_COMMAND(&alloc_lock); -/* blas_lock(&memory[position].lock);*/ +// blas_lock(&memory[position].lock); if (!memory[position].used) goto allocation; UNLOCK_COMMAND(&alloc_lock); -/* blas_unlock(&memory[position].lock);*/ +// blas_unlock(&memory[position].lock); } position ++; @@ -1070,24 +2617,24 @@ void *blas_memory_alloc(int procpos){ } while (position < NUM_BUFFERS); -#endif +#endif */ position = 0; + LOCK_COMMAND(&alloc_lock); do { /* if (!memory[position].used) { */ - LOCK_COMMAND(&alloc_lock); /* blas_lock(&memory[position].lock);*/ if (!memory[position].used) goto allocation; - UNLOCK_COMMAND(&alloc_lock); /* blas_unlock(&memory[position].lock);*/ /* } */ position ++; } while (position < NUM_BUFFERS); + UNLOCK_COMMAND(&alloc_lock); goto error; @@ -1561,3 +3108,5 @@ void gotoblas_dummy_for_PGI(void) { #endif } #endif + +#endif diff --git a/driver/others/openblas_get_config.c b/driver/others/openblas_get_config.c index 7d041b907..eca494dca 100644 --- a/driver/others/openblas_get_config.c +++ b/driver/others/openblas_get_config.c @@ -35,9 +35,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include +#if defined(_WIN32) && defined(_MSC_VER) +#if _MSC_VER < 1900 +#define snprintf _snprintf +#endif +#endif + static char* openblas_config_str="" +"OpenBLAS " + VERSION +" " #ifdef USE64BITINT - "USE64BITINT " + " USE64BITINT " #endif #ifdef NO_CBLAS "NO_CBLAS " @@ -54,6 +63,9 @@ static char* openblas_config_str="" #ifdef NO_AFFINITY "NO_AFFINITY " #endif +#ifdef USE_OPENMP + "USE_OPENMP " +#endif #ifndef DYNAMIC_ARCH CHAR_CORENAME #endif @@ -61,18 +73,23 @@ static char* openblas_config_str="" #ifdef DYNAMIC_ARCH char *gotoblas_corename(); -static char tmp_config_str[256]; #endif +static char tmp_config_str[256]; +int openblas_get_parallel(); char* CNAME() { -#ifndef DYNAMIC_ARCH - return openblas_config_str; -#else +char tmpstr[20]; strcpy(tmp_config_str, openblas_config_str); +#ifdef DYNAMIC_ARCH strcat(tmp_config_str, gotoblas_corename()); - return tmp_config_str; #endif +if (openblas_get_parallel() == 0) + sprintf(tmpstr, " SINGLE_THREADED"); +else + snprintf(tmpstr,19," MAX_THREADS=%d",MAX_CPU_NUMBER); + strcat(tmp_config_str, tmpstr); + return tmp_config_str; } @@ -83,3 +100,4 @@ char* openblas_get_corename() { return gotoblas_corename(); #endif } + diff --git a/driver/others/parameter.c b/driver/others/parameter.c index 31a48644f..8bf7da78b 100644 --- a/driver/others/parameter.c +++ b/driver/others/parameter.c @@ -167,7 +167,7 @@ int get_L2_size(void){ #if defined(ATHLON) || defined(OPTERON) || defined(BARCELONA) || defined(BOBCAT) || defined(BULLDOZER) || \ defined(CORE_PRESCOTT) || defined(CORE_CORE2) || defined(PENRYN) || defined(DUNNINGTON) || \ defined(CORE_NEHALEM) || defined(CORE_SANDYBRIDGE) || defined(ATOM) || defined(GENERIC) || \ - defined(PILEDRIVER) || defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined(ZEN) + defined(PILEDRIVER) || defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined(ZEN) || defined(SKYLAKEX) cpuid(0x80000006, &eax, &ebx, &ecx, &edx); @@ -251,7 +251,7 @@ int get_L2_size(void){ void blas_set_parameter(void){ int factor; -#if defined(BULLDOZER) || defined(PILEDRIVER) || defined(SANDYBRIDGE) || defined(NEHALEM) || defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined(ZEN) +#if defined(BULLDOZER) || defined(PILEDRIVER) || defined(SANDYBRIDGE) || defined(NEHALEM) || defined(HASWELL) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined(ZEN) || defined(SKYLAKEX) int size = 16; #else int size = get_L2_size(); @@ -730,35 +730,8 @@ void blas_set_parameter(void){ #if defined(ARCH_ARM64) -#if defined(VULCAN) || defined(THUNDERX2T99) -unsigned long dgemm_prefetch_size_a; -unsigned long dgemm_prefetch_size_b; -unsigned long dgemm_prefetch_size_c; -#endif - void blas_set_parameter(void) { -#if defined(VULCAN) || defined(THUNDERX2T99) - dgemm_p = 160; - dgemm_q = 128; - dgemm_r = 4096; - - sgemm_p = 128; - sgemm_q = 352; - sgemm_r = 4096; - - cgemm_p = 128; - cgemm_q = 224; - cgemm_r = 4096; - - zgemm_p = 128; - zgemm_q = 112; - zgemm_r = 4096; - - dgemm_prefetch_size_a = 3584; - dgemm_prefetch_size_b = 512; - dgemm_prefetch_size_c = 128; -#endif } #endif diff --git a/exports/Makefile b/exports/Makefile index 79c251d62..3a5f77db3 100644 --- a/exports/Makefile +++ b/exports/Makefile @@ -114,20 +114,22 @@ $(LIBDYNNAME) : ../$(LIBNAME).osx.renamed osx.def endif ifneq (,$(filter 1 2,$(NOFORTRAN))) #only build without Fortran - $(CC) $(CFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(LIBDYNNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) + $(CC) $(CFLAGS) $(LDFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(LIBDYNNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) else - $(FC) $(FFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(LIBDYNNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) + $(FC) $(FFLAGS) $(LDFLAGS) -all_load -headerpad_max_install_names -install_name "$(CURDIR)/../$(LIBDYNNAME)" -dynamiclib -o ../$(LIBDYNNAME) $< -Wl,-exported_symbols_list,osx.def $(FEXTRALIB) endif dllinit.$(SUFFIX) : dllinit.c $(CC) $(CFLAGS) -c -o $(@F) -s $< -ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android)) +ifeq ($(OSNAME), $(filter $(OSNAME),Linux SunOS Android Haiku)) so : ../$(LIBSONAME) ifeq ($(OSNAME), Android) INTERNALNAME = $(LIBPREFIX).so +FEXTRALIB += -lm +EXTRALIB += -lm else INTERNALNAME = $(LIBPREFIX).so.$(MAJOR_VERSION) endif @@ -156,7 +158,7 @@ endif endif #http://stackoverflow.com/questions/7656425/makefile-ifeq-logical-or -ifeq ($(OSNAME), $(filter $(OSNAME),FreeBSD NetBSD)) +ifeq ($(OSNAME), $(filter $(OSNAME),FreeBSD OpenBSD NetBSD DragonFly)) so : ../$(LIBSONAME) diff --git a/f_check b/f_check index 941a9a5c4..34caa00be 100644 --- a/f_check +++ b/f_check @@ -97,7 +97,7 @@ if ($compiler eq "") { if ($data =~ /Intel/) { $vendor = INTEL; - $openmp = "-openmp"; + $openmp = "-fopenmp"; } if ($data =~ /Sun Fortran/) { @@ -127,7 +127,7 @@ if ($compiler eq "") { # for embeded underscore name, e.g. zho_ge, it may append 2 underscores. $data = `$compiler -O2 -S ftest3.f > /dev/null 2>&1 && cat ftest3.s && rm -f ftest3.s`; - if ($data =~ /zho_ge__/) { + if ($data =~ / zho_ge__/) { $need2bu = 1; } } @@ -155,7 +155,7 @@ if ($compiler eq "") { if ($compiler =~ /ifort/) { $vendor = INTEL; $bu = "_"; - $openmp = "-openmp"; + $openmp = "-fopenmp"; } if ($compiler =~ /pathf/) { @@ -292,9 +292,6 @@ if ($link ne "") { && ($flags !~ /^-LIST:/) && ($flags !~ /^-LANG:/) ) { - if ($vendor eq "PGI") { - $flags =~ s/lib$/libso/; - } $linker_L .= $flags . " "; } @@ -311,17 +308,11 @@ if ($link ne "") { if ($flags =~ /^\-rpath\@/) { $flags =~ s/\@/\,/g; - if ($vendor eq "PGI") { - $flags =~ s/lib$/libso/; - } $linker_L .= "-Wl,". $flags . " " ; } if ($flags =~ /^\-rpath-link\@/) { $flags =~ s/\@/\,/g; - if ($vendor eq "PGI") { - $flags =~ s/lib$/libso/; - } $linker_L .= "-Wl,". $flags . " " ; } @@ -330,7 +321,6 @@ if ($link ne "") { && ($flags !~ /gfortranbegin/) && ($flags !~ /frtbegin/) && ($flags !~ /pathfstart/) - && ($flags !~ /numa/) && ($flags !~ /crt[0-9]/) && ($flags !~ /gcc/) && ($flags !~ /user32/) diff --git a/getarch.c b/getarch.c index c0fff0436..78ba0fefd 100644 --- a/getarch.c +++ b/getarch.c @@ -82,7 +82,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef OS_WINDOWS #include #endif -#if defined(__FreeBSD__) || defined(__APPLE__) +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__APPLE__) #include #include #endif @@ -326,6 +326,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CORENAME "HASWELL" #endif +#ifdef FORCE_SKYLAKEX +#define FORCE +#define FORCE_INTEL +#define ARCHITECTURE "X86" +#define SUBARCHITECTURE "SKYLAKEX" +#define ARCHCONFIG "-DSKYLAKEX " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_CMOV -DHAVE_MMX -DHAVE_SSE -DHAVE_SSE2 -DHAVE_SSE3 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 -DHAVE_AVX " \ + "-DFMA3 -DHAVE_AVX512VL -march=skylake-avx512" +#define LIBNAME "skylakex" +#define CORENAME "SKYLAKEX" +#endif + #ifdef FORCE_ATOM #define FORCE #define FORCE_INTEL @@ -912,11 +927,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ARCHCONFIG "-DARMV8 " \ "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 " \ "-DL2_SIZE=262144 -DL2_LINESIZE=64 " \ - "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=32 " + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 -DL2_ASSOCIATIVE=32 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" #define LIBNAME "armv8" #define CORENAME "ARMV8" #endif +#ifdef FORCE_CORTEXA53 +#define FORCE +#define ARCHITECTURE "ARM64" +#define SUBARCHITECTURE "CORTEXA53" +#define SUBDIRNAME "arm64" +#define ARCHCONFIG "-DCORTEXA53 " \ + "-DL1_CODE_SIZE=32768 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=3 " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=2 " \ + "-DL2_SIZE=262144 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=16 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" +#define LIBNAME "cortexa53" +#define CORENAME "CORTEXA53" +#else +#endif + #ifdef FORCE_CORTEXA57 #define FORCE #define ARCHITECTURE "ARM64" @@ -927,26 +959,57 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=2 " \ "-DL2_SIZE=2097152 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=16 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ - "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON" + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" #define LIBNAME "cortexa57" #define CORENAME "CORTEXA57" #else #endif -#ifdef FORCE_VULCAN +#ifdef FORCE_CORTEXA72 #define FORCE #define ARCHITECTURE "ARM64" -#define SUBARCHITECTURE "VULCAN" +#define SUBARCHITECTURE "CORTEXA72" #define SUBDIRNAME "arm64" -#define ARCHCONFIG "-DVULCAN " \ - "-DL1_CODE_SIZE=32768 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=8 " \ - "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=8 " \ - "-DL2_SIZE=262144 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=8 " \ - "-DL3_SIZE=33554432 -DL3_LINESIZE=64 -DL3_ASSOCIATIVE=32 " \ +#define ARCHCONFIG "-DCORTEXA72 " \ + "-DL1_CODE_SIZE=49152 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=3 " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=2 " \ + "-DL2_SIZE=2097152 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=16 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ - "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON" -#define LIBNAME "vulcan" -#define CORENAME "VULCAN" + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" +#define LIBNAME "cortexa72" +#define CORENAME "CORTEXA72" +#else +#endif + +#ifdef FORCE_CORTEXA73 +#define FORCE +#define ARCHITECTURE "ARM64" +#define SUBARCHITECTURE "CORTEXA73" +#define SUBDIRNAME "arm64" +#define ARCHCONFIG "-DCORTEXA73 " \ + "-DL1_CODE_SIZE=49152 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=3 " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=2 " \ + "-DL2_SIZE=2097152 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=16 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" +#define LIBNAME "cortexa73" +#define CORENAME "CORTEXA73" +#else +#endif + +#ifdef FORCE_FALKOR +#define FORCE +#define ARCHITECTURE "ARM64" +#define SUBARCHITECTURE "FALKOR" +#define SUBDIRNAME "arm64" +#define ARCHCONFIG "-DFALKOR " \ + "-DL1_CODE_SIZE=49152 -DL1_CODE_LINESIZE=64 -DL1_CODE_ASSOCIATIVE=3 " \ + "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=64 -DL1_DATA_ASSOCIATIVE=2 " \ + "-DL2_SIZE=2097152 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=16 " \ + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" +#define LIBNAME "falkor" +#define CORENAME "FALKOR" #else #endif @@ -958,13 +1021,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ARCHCONFIG "-DTHUNDERX " \ "-DL1_DATA_SIZE=32768 -DL1_DATA_LINESIZE=128 " \ "-DL2_SIZE=16777216 -DL2_LINESIZE=128 -DL2_ASSOCIATIVE=16 " \ - "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " + "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" #define LIBNAME "thunderx" #define CORENAME "THUNDERX" #else #endif #ifdef FORCE_THUNDERX2T99 +#define ARMV8 #define FORCE #define ARCHITECTURE "ARM64" #define SUBARCHITECTURE "THUNDERX2T99" @@ -975,7 +1040,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. "-DL2_SIZE=262144 -DL2_LINESIZE=64 -DL2_ASSOCIATIVE=8 " \ "-DL3_SIZE=33554432 -DL3_LINESIZE=64 -DL3_ASSOCIATIVE=32 " \ "-DDTB_DEFAULT_ENTRIES=64 -DDTB_SIZE=4096 " \ - "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON" + "-DHAVE_VFPV4 -DHAVE_VFPV3 -DHAVE_VFP -DHAVE_NEON -DARMV8" #define LIBNAME "thunderx2t99" #define CORENAME "THUNDERX2T99" #else @@ -1003,6 +1068,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef FORCE +#ifdef USER_TARGET +#error "The TARGET specified on the command line or in Makefile.rule is not supported. Please choose a target from TargetList.txt" +#endif + #if defined(__powerpc__) || defined(__powerpc) || defined(powerpc) || \ defined(__PPC__) || defined(PPC) || defined(_POWER) || defined(__POWERPC__) #ifndef POWER @@ -1074,7 +1143,7 @@ static int get_num_cores(void) { #ifdef OS_WINDOWS SYSTEM_INFO sysinfo; -#elif defined(__FreeBSD__) || defined(__APPLE__) +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__APPLE__) int m[2], count; size_t len; #endif @@ -1088,7 +1157,7 @@ static int get_num_cores(void) { GetSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; -#elif defined(__FreeBSD__) || defined(__APPLE__) +#elif defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) || defined(__APPLE__) m[0] = CTL_HW; m[1] = HW_NCPU; len = sizeof(int); @@ -1116,7 +1185,7 @@ int main(int argc, char *argv[]){ #ifdef FORCE printf("CORE=%s\n", CORENAME); #else -#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) +#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) || defined(sparc) printf("CORE=%s\n", get_corename()); #endif #endif @@ -1181,9 +1250,7 @@ int main(int argc, char *argv[]){ #elif NO_PARALLEL_MAKE==1 printf("MAKE += -j 1\n"); #else -#ifndef OS_WINDOWS printf("MAKE += -j %d\n", get_num_cores()); -#endif #endif break; @@ -1224,7 +1291,7 @@ int main(int argc, char *argv[]){ #ifdef FORCE printf("#define CHAR_CORENAME \"%s\"\n", CORENAME); #else -#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) +#if defined(INTEL_AMD) || defined(POWER) || defined(__mips__) || defined(__arm__) || defined(__aarch64__) || defined(ZARCH) || defined(sparc) printf("#define CHAR_CORENAME \"%s\"\n", get_corename()); #endif #endif diff --git a/interface/Makefile b/interface/Makefile index 9b2b93b83..20ec74e9e 100644 --- a/interface/Makefile +++ b/interface/Makefile @@ -260,7 +260,7 @@ HPLOBJS = dgemm.$(SUFFIX) dtrsm.$(SUFFIX) \ idamax.$(SUFFIX) daxpy.$(SUFFIX) dcopy.$(SUFFIX) dscal.$(SUFFIX) CSBLAS1OBJS = \ - cblas_isamax.$(SUFFIX) cblas_sasum.$(SUFFIX) cblas_saxpy.$(SUFFIX) \ + cblas_isamax.$(SUFFIX) cblas_isamin.$(SUFFIX) cblas_sasum.$(SUFFIX) cblas_saxpy.$(SUFFIX) \ cblas_scopy.$(SUFFIX) cblas_sdot.$(SUFFIX) cblas_sdsdot.$(SUFFIX) cblas_dsdot.$(SUFFIX) \ cblas_srot.$(SUFFIX) cblas_srotg.$(SUFFIX) cblas_srotm.$(SUFFIX) cblas_srotmg.$(SUFFIX) \ cblas_sscal.$(SUFFIX) cblas_sswap.$(SUFFIX) cblas_snrm2.$(SUFFIX) cblas_saxpby.$(SUFFIX) @@ -277,7 +277,7 @@ CSBLAS3OBJS = \ cblas_sgeadd.$(SUFFIX) CDBLAS1OBJS = \ - cblas_idamax.$(SUFFIX) cblas_dasum.$(SUFFIX) cblas_daxpy.$(SUFFIX) \ + cblas_idamax.$(SUFFIX) cblas_idamin.$(SUFFIX) cblas_dasum.$(SUFFIX) cblas_daxpy.$(SUFFIX) \ cblas_dcopy.$(SUFFIX) cblas_ddot.$(SUFFIX) \ cblas_drot.$(SUFFIX) cblas_drotg.$(SUFFIX) cblas_drotm.$(SUFFIX) cblas_drotmg.$(SUFFIX) \ cblas_dscal.$(SUFFIX) cblas_dswap.$(SUFFIX) cblas_dnrm2.$(SUFFIX) cblas_daxpby.$(SUFFIX) @@ -294,7 +294,7 @@ CDBLAS3OBJS += \ cblas_dgeadd.$(SUFFIX) CCBLAS1OBJS = \ - cblas_icamax.$(SUFFIX) cblas_scasum.$(SUFFIX) cblas_caxpy.$(SUFFIX) \ + cblas_icamax.$(SUFFIX) cblas_icamin.$(SUFFIX) cblas_scasum.$(SUFFIX) cblas_caxpy.$(SUFFIX) \ cblas_ccopy.$(SUFFIX) \ cblas_cdotc.$(SUFFIX) cblas_cdotu.$(SUFFIX) \ cblas_cdotc_sub.$(SUFFIX) cblas_cdotu_sub.$(SUFFIX) \ @@ -320,7 +320,7 @@ CCBLAS3OBJS = \ CZBLAS1OBJS = \ - cblas_izamax.$(SUFFIX) cblas_dzasum.$(SUFFIX) cblas_zaxpy.$(SUFFIX) \ + cblas_izamax.$(SUFFIX) cblas_izamin.$(SUFFIX) cblas_dzasum.$(SUFFIX) cblas_zaxpy.$(SUFFIX) \ cblas_zcopy.$(SUFFIX) \ cblas_zdotc.$(SUFFIX) cblas_zdotu.$(SUFFIX) \ cblas_zdotc_sub.$(SUFFIX) cblas_zdotu_sub.$(SUFFIX) \ @@ -1359,6 +1359,18 @@ cblas_icamax.$(SUFFIX) cblas_icamax.$(PSUFFIX) : imax.c cblas_izamax.$(SUFFIX) cblas_izamax.$(PSUFFIX) : imax.c $(CC) $(CFLAGS) -DCBLAS -c -DUSE_ABS -UUSE_MIN $< -o $(@F) +cblas_isamin.$(SUFFIX) cblas_isamin.$(PSUFFIX) : imax.c + $(CC) $(CFLAGS) -DCBLAS -c -DUSE_ABS -DUSE_MIN $< -o $(@F) + +cblas_idamin.$(SUFFIX) cblas_idamin.$(PSUFFIX) : imax.c + $(CC) $(CFLAGS) -DCBLAS -c -DUSE_ABS -DUSE_MIN $< -o $(@F) + +cblas_icamin.$(SUFFIX) cblas_icamin.$(PSUFFIX) : imax.c + $(CC) $(CFLAGS) -DCBLAS -c -DUSE_ABS -DUSE_MIN $< -o $(@F) + +cblas_izamin.$(SUFFIX) cblas_izamin.$(PSUFFIX) : imax.c + $(CC) $(CFLAGS) -DCBLAS -c -DUSE_ABS -DUSE_MIN $< -o $(@F) + cblas_ismax.$(SUFFIX) cblas_ismax.$(PSUFFIX) : imax.c $(CC) $(CFLAGS) -DCBLAS -c -UUSE_ABS -UUSE_MIN $< -o $(@F) diff --git a/interface/axpy.c b/interface/axpy.c index f0d95b395..9032946d2 100644 --- a/interface/axpy.c +++ b/interface/axpy.c @@ -40,11 +40,11 @@ #include "common.h" #ifdef FUNCTION_PROFILE #include "functable.h" -#endif +#endif #if defined(Z13) #define MULTI_THREAD_MINIMAL 200000 #else -#define MULTI_THREAD_MINIMAL 10000 +#define MULTI_THREAD_MINIMAL 10000 #endif #ifndef CBLAS @@ -75,6 +75,11 @@ void CNAME(blasint n, FLOAT alpha, FLOAT *x, blasint incx, FLOAT *y, blasint inc if (alpha == ZERO) return; + if (incx == 0 && incy == 0) { + *y += n * alpha *(*x); + return; + } + IDEBUG_START; FUNCTION_PROFILE_START(); @@ -83,17 +88,15 @@ void CNAME(blasint n, FLOAT alpha, FLOAT *x, blasint incx, FLOAT *y, blasint inc if (incy < 0) y -= (n - 1) * incy; #ifdef SMP - nthreads = num_cpu_avail(1); - //disable multi-thread when incx==0 or incy==0 //In that case, the threads would be dependent. - if (incx == 0 || incy == 0) - nthreads = 1; - + // //Temporarily work-around the low performance issue with small imput size & //multithreads. - if (n <= MULTI_THREAD_MINIMAL) + if (incx == 0 || incy == 0 || n <= MULTI_THREAD_MINIMAL) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { #endif diff --git a/interface/gbmv.c b/interface/gbmv.c index 096c9f6f2..1d58ba807 100644 --- a/interface/gbmv.c +++ b/interface/gbmv.c @@ -213,7 +213,7 @@ void CNAME(enum CBLAS_ORDER order, if (trans) lenx = m; if (trans) leny = n; - if (beta != ONE) SCAL_K(leny, 0, 0, beta, y, abs(incy), NULL, 0, NULL, 0); + if (beta != ONE) SCAL_K(leny, 0, 0, beta, y, blasabs(incy), NULL, 0, NULL, 0); if (alpha == ZERO) return; diff --git a/interface/gemm.c b/interface/gemm.c index 8baf3fbec..97e71bc85 100644 --- a/interface/gemm.c +++ b/interface/gemm.c @@ -44,6 +44,7 @@ #endif #ifndef COMPLEX +#define SMP_THRESHOLD_MIN 65536.0 #ifdef XDOUBLE #define ERROR_NAME "QGEMM " #elif defined(DOUBLE) @@ -52,6 +53,7 @@ #define ERROR_NAME "SGEMM " #endif #else +#define SMP_THRESHOLD_MIN 8192.0 #ifndef GEMM3M #ifdef XDOUBLE #define ERROR_NAME "XGEMM " @@ -121,8 +123,6 @@ void NAME(char *TRANSA, char *TRANSB, FLOAT *sa, *sb; #ifdef SMP - int nthreads_max; - int nthreads_avail; double MNK; #ifndef COMPLEX #ifdef XDOUBLE @@ -245,8 +245,6 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS XFLOAT *sa, *sb; #ifdef SMP - int nthreads_max; - int nthreads_avail; double MNK; #ifndef COMPLEX #ifdef XDOUBLE @@ -273,6 +271,14 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS PRINT_DEBUG_CNAME; +#if !defined(COMPLEX) && !defined(DOUBLE) && defined(USE_SGEMM_KERNEL_DIRECT) + if (beta == 0 && alpha == 1.0 && order == CblasRowMajor && TransA == CblasNoTrans && TransB == CblasNoTrans && sgemm_kernel_direct_performant(m,n,k)) { + sgemm_kernel_direct(m, n, k, a, lda, b, ldb, c, ldc); + return; + } + +#endif + #ifndef COMPLEX args.alpha = (void *)α args.beta = (void *)β @@ -411,25 +417,12 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS mode |= (transa << BLAS_TRANSA_SHIFT); mode |= (transb << BLAS_TRANSB_SHIFT); - nthreads_max = num_cpu_avail(3); - nthreads_avail = nthreads_max; - -#ifndef COMPLEX MNK = (double) args.m * (double) args.n * (double) args.k; - if ( MNK <= (65536.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) - nthreads_max = 1; -#else - MNK = (double) args.m * (double) args.n * (double) args.k; - if ( MNK <= (8192.0 * (double) GEMM_MULTITHREAD_THRESHOLD) ) - nthreads_max = 1; -#endif - args.common = NULL; - - if ( nthreads_max > nthreads_avail ) - args.nthreads = nthreads_avail; + if ( MNK <= (SMP_THRESHOLD_MIN * (double) GEMM_MULTITHREAD_THRESHOLD) ) + args.nthreads = 1; else - args.nthreads = nthreads_max; - + args.nthreads = num_cpu_avail(3); + args.common = NULL; if (args.nthreads == 1) { #endif diff --git a/interface/gemv.c b/interface/gemv.c index 30709e361..c9d52cd69 100644 --- a/interface/gemv.c +++ b/interface/gemv.c @@ -199,7 +199,7 @@ void CNAME(enum CBLAS_ORDER order, if (trans) lenx = m; if (trans) leny = n; - if (beta != ONE) SCAL_K(leny, 0, 0, beta, y, abs(incy), NULL, 0, NULL, 0); + if (beta != ONE) SCAL_K(leny, 0, 0, beta, y, blasabs(incy), NULL, 0, NULL, 0); if (alpha == ZERO) return; diff --git a/interface/lapack/laswp.c b/interface/lapack/laswp.c index ebeb103e7..0dde33ae3 100644 --- a/interface/lapack/laswp.c +++ b/interface/lapack/laswp.c @@ -97,7 +97,7 @@ int NAME(blasint *N, FLOAT *a, blasint *LDA, blasint *K1, blasint *K2, blasint * blas_level1_thread(mode, n, k1, k2, dummyalpha, a, lda, NULL, 0, ipiv, incx, - laswp[flag], nthreads); + (int(*)())laswp[flag], nthreads); } #endif diff --git a/interface/lapack/zlaswp.c b/interface/lapack/zlaswp.c index 31e08451d..b77a40985 100644 --- a/interface/lapack/zlaswp.c +++ b/interface/lapack/zlaswp.c @@ -96,7 +96,7 @@ int NAME(blasint *N, FLOAT *a, blasint *LDA, blasint *K1, blasint *K2, blasint * mode = BLAS_SINGLE | BLAS_COMPLEX; #endif - blas_level1_thread(mode, n, k1, k2, dummyalpha, a, lda, NULL, 0, ipiv, incx, laswp[flag], nthreads); + blas_level1_thread(mode, n, k1, k2, dummyalpha, a, lda, NULL, 0, ipiv, incx, (int(*)())laswp[flag], nthreads); } #endif diff --git a/interface/rotg.c b/interface/rotg.c index 092554299..69443a5a0 100644 --- a/interface/rotg.c +++ b/interface/rotg.c @@ -22,8 +22,8 @@ void CNAME(FLOAT *DA, FLOAT *DB, FLOAT *C, FLOAT *S){ long double s; long double r, roe, z; - long double ada = fabs(da); - long double adb = fabs(db); + long double ada = fabsl(da); + long double adb = fabsl(db); long double scale = ada + adb; #ifndef CBLAS diff --git a/interface/rotmg.c b/interface/rotmg.c index 1c41e14ef..ce3b146c1 100644 --- a/interface/rotmg.c +++ b/interface/rotmg.c @@ -64,6 +64,13 @@ void CNAME(FLOAT *dd1, FLOAT *dd2, FLOAT *dx1, FLOAT dy1, FLOAT *dparam){ FLOAT du, dp1, dp2, dq2, dq1, dh11=ZERO, dh21=ZERO, dh12=ZERO, dh22=ZERO, dflag=-ONE, dtemp; + if (*dd2 == ZERO || dy1 == ZERO) + { + dflag = -TWO; + dparam[0] = dflag; + return; + } + if(*dd1 < ZERO) { dflag = -ONE; @@ -76,6 +83,16 @@ void CNAME(FLOAT *dd1, FLOAT *dd2, FLOAT *dx1, FLOAT dy1, FLOAT *dparam){ *dd2 = ZERO; *dx1 = ZERO; } + else if ((*dd1 == ZERO || *dx1 == ZERO) && *dd2 > ZERO) + { + dflag = ONE; + dh12 = 1; + dh21 = -1; + *dx1 = dy1; + dtemp = *dd1; + *dd1 = *dd2; + *dd2 = dtemp; + } else { dp2 = *dd2 * dy1; @@ -90,6 +107,9 @@ void CNAME(FLOAT *dd1, FLOAT *dd2, FLOAT *dx1, FLOAT dy1, FLOAT *dparam){ dq1 = dp1 * *dx1; if(ABS(dq1) > ABS(dq2)) { + dflag = ZERO; + dh11 = ONE; + dh22 = ONE; dh21 = - dy1 / *dx1; dh12 = dp2 / dp1; @@ -100,8 +120,19 @@ void CNAME(FLOAT *dd1, FLOAT *dd2, FLOAT *dx1, FLOAT dy1, FLOAT *dparam){ *dd1 = *dd1 / du; *dd2 = *dd2 / du; *dx1 = *dx1 * du; + } else { + dflag = -ONE; + dh11 = ZERO; + dh12 = ZERO; + dh21 = ZERO; + dh22 = ZERO; + + *dd1 = ZERO; + *dd2 = ZERO; + *dx1 = ZERO; } + } else { @@ -120,7 +151,9 @@ void CNAME(FLOAT *dd1, FLOAT *dd2, FLOAT *dx1, FLOAT dy1, FLOAT *dparam){ } else { - dflag = ONE; + dflag = ONE; + dh21 = -ONE; + dh12 = ONE; dh11 = dp1 / dp2; dh22 = *dx1 / dy1; @@ -134,74 +167,33 @@ void CNAME(FLOAT *dd1, FLOAT *dd2, FLOAT *dx1, FLOAT dy1, FLOAT *dparam){ } - if(*dd1 != ZERO) + while ( *dd1 <= RGAMSQ && *dd1 != ZERO) { - while( (*dd1 <= RGAMSQ) || (*dd1 >= GAMSQ) ) - { - if(dflag == ZERO) - { - dh11 = ONE; - dh22 = ONE; - dflag = -ONE; - } - else - { - if(dflag == ONE) - { - dh21 = -ONE; - dh12 = ONE; - dflag = -ONE; - } - } - if( *dd1 <= RGAMSQ ) - { - *dd1 = *dd1 * (GAM * GAM); - *dx1 = *dx1 / GAM; - dh11 = dh11 / GAM; - dh12 = dh12 / GAM; - } - else - { - *dd1 = *dd1 / (GAM * GAM); - *dx1 = *dx1 * GAM; - dh11 = dh11 * GAM; - dh12 = dh12 * GAM; - } - } + dflag = -ONE; + *dd1 = *dd1 * (GAM * GAM); + *dx1 = *dx1 / GAM; + dh11 = dh11 / GAM; + dh12 = dh12 / GAM; + } + while (ABS(*dd1) > GAMSQ) { + dflag = -ONE; + *dd1 = *dd1 / (GAM * GAM); + *dx1 = *dx1 * GAM; + dh11 = dh11 * GAM; + dh12 = dh12 * GAM; } - if(*dd2 != ZERO) - { - while( (ABS(*dd2) <= RGAMSQ) || (ABS(*dd2) >= GAMSQ) ) - { - if(dflag == ZERO) - { - dh11 = ONE; - dh22 = ONE; - dflag = -ONE; - } - else - { - if(dflag == ONE) - { - dh21 = -ONE; - dh12 = ONE; - dflag = -ONE; - } - } - if( ABS(*dd2) <= RGAMSQ ) - { - *dd2 = *dd2 * (GAM * GAM); - dh21 = dh21 / GAM; - dh22 = dh22 / GAM; - } - else - { - *dd2 = *dd2 / (GAM * GAM); - dh21 = dh21 * GAM; - dh22 = dh22 * GAM; - } - } + while (ABS(*dd2) <= RGAMSQ && *dd2 != ZERO) { + dflag = -ONE; + *dd2 = *dd2 * (GAM * GAM); + dh21 = dh21 / GAM; + dh22 = dh22 / GAM; + } + while (ABS(*dd2) > GAMSQ) { + dflag = -ONE; + *dd2 = *dd2 / (GAM * GAM); + dh21 = dh21 * GAM; + dh22 = dh22 * GAM; } } diff --git a/interface/sbmv.c b/interface/sbmv.c index 761a9a0d0..25e99ca34 100644 --- a/interface/sbmv.c +++ b/interface/sbmv.c @@ -184,7 +184,7 @@ void CNAME(enum CBLAS_ORDER order, if (n == 0) return; - if (beta != ONE) SCAL_K(n, 0, 0, beta, y, abs(incy), NULL, 0, NULL, 0); + if (beta != ONE) SCAL_K(n, 0, 0, beta, y, blasabs(incy), NULL, 0, NULL, 0); if (alpha == ZERO) return; diff --git a/interface/scal.c b/interface/scal.c index 3f468a2a3..6d07b1650 100644 --- a/interface/scal.c +++ b/interface/scal.c @@ -76,10 +76,11 @@ void CNAME(blasint n, FLOAT alpha, FLOAT *x, blasint incx){ #ifdef SMP - nthreads = num_cpu_avail(1); - if (n <= 1048576 ) nthreads = 1; + else + nthreads = num_cpu_avail(1); + if (nthreads == 1) { #endif diff --git a/interface/spmv.c b/interface/spmv.c index 403458b06..e08ae3f6e 100644 --- a/interface/spmv.c +++ b/interface/spmv.c @@ -168,7 +168,7 @@ void CNAME(enum CBLAS_ORDER order, if (n == 0) return; - if (beta != ONE) SCAL_K(n, 0, 0, beta, y, abs(incy), NULL, 0, NULL, 0); + if (beta != ONE) SCAL_K(n, 0, 0, beta, y, blasabs(incy), NULL, 0, NULL, 0); if (alpha == ZERO) return; diff --git a/interface/swap.c b/interface/swap.c index f7642edf1..17a9868a9 100644 --- a/interface/swap.c +++ b/interface/swap.c @@ -42,7 +42,7 @@ #include "functable.h" #endif -#if defined(THUNDERX2T99) || defined(VULCAN) +#if defined(THUNDERX2T99) || defined(VULCAN) || defined(ARMV8) // Multithreaded swap gives performance benefits in ThunderX2T99 #else // Disable multi-threading as it does not show any performance diff --git a/interface/symv.c b/interface/symv.c index e4e300e20..07bd20022 100644 --- a/interface/symv.c +++ b/interface/symv.c @@ -166,7 +166,7 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_UPLO Uplo, blasint n, FLOAT alpha, if (n == 0) return; - if (beta != ONE) SCAL_K(n, 0, 0, beta, y, abs(incy), NULL, 0, NULL, 0); + if (beta != ONE) SCAL_K(n, 0, 0, beta, y, blasabs(incy), NULL, 0, NULL, 0); if (alpha == ZERO) return; diff --git a/interface/trsm.c b/interface/trsm.c index 60c49795d..5c2750e79 100644 --- a/interface/trsm.c +++ b/interface/trsm.c @@ -366,12 +366,13 @@ void CNAME(enum CBLAS_ORDER order, mode |= (trans << BLAS_TRANSA_SHIFT); mode |= (side << BLAS_RSIDE_SHIFT); - args.nthreads = num_cpu_avail(3); if ( args.m < 2*GEMM_MULTITHREAD_THRESHOLD ) args.nthreads = 1; else if ( args.n < 2*GEMM_MULTITHREAD_THRESHOLD ) args.nthreads = 1; + else + args.nthreads = num_cpu_avail(3); if (args.nthreads == 1) { diff --git a/interface/zaxpy.c b/interface/zaxpy.c index fbb830ffb..dbd559628 100644 --- a/interface/zaxpy.c +++ b/interface/zaxpy.c @@ -41,7 +41,11 @@ #ifdef FUNCTION_PROFILE #include "functable.h" #endif - +#if defined(Z13) +#define MULTI_THREAD_MINIMAL 200000 +#else +#define MULTI_THREAD_MINIMAL 10000 +#endif #ifndef CBLAS void NAME(blasint *N, FLOAT *ALPHA, FLOAT *x, blasint *INCX, FLOAT *y, blasint *INCY){ @@ -69,7 +73,7 @@ void CNAME(blasint n, FLOAT *ALPHA, FLOAT *x, blasint incx, FLOAT *y, blasint in #endif #ifndef CBLAS - PRINT_DEBUG_CNAME; + PRINT_DEBUG_NAME; #else PRINT_DEBUG_CNAME; #endif @@ -78,6 +82,12 @@ void CNAME(blasint n, FLOAT *ALPHA, FLOAT *x, blasint incx, FLOAT *y, blasint in if ((alpha_r == ZERO) && (alpha_i == ZERO)) return; + if (incx == 0 && incy == 0) { + *y += n * (alpha_r * (*x) - alpha_i* (*(x+1)) ); + *(y+1) += n * (alpha_i * (*x) + alpha_r * (*(x +1)) ); + return; + } + IDEBUG_START; FUNCTION_PROFILE_START(); @@ -86,12 +96,15 @@ void CNAME(blasint n, FLOAT *ALPHA, FLOAT *x, blasint incx, FLOAT *y, blasint in if (incy < 0) y -= (n - 1) * incy * 2; #ifdef SMP - nthreads = num_cpu_avail(1); - //disable multi-thread when incx==0 or incy==0 //In that case, the threads would be dependent. - if (incx == 0 || incy == 0) + // + //Temporarily work-around the low performance issue with small imput size & + //multithreads. + if (incx == 0 || incy == 0 || n <= MULTI_THREAD_MINIMAL) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { #endif diff --git a/interface/zgbmv.c b/interface/zgbmv.c index a04be2fbf..5e275a8ed 100644 --- a/interface/zgbmv.c +++ b/interface/zgbmv.c @@ -237,7 +237,7 @@ void CNAME(enum CBLAS_ORDER order, if (trans & 1) lenx = m; if (trans & 1) leny = n; - if (beta_r != ONE || beta_i != ZERO) SCAL_K(leny, 0, 0, beta_r, beta_i, y, abs(incy), NULL, 0, NULL, 0); + if (beta_r != ONE || beta_i != ZERO) SCAL_K(leny, 0, 0, beta_r, beta_i, y, blasabs(incy), NULL, 0, NULL, 0); if (alpha_r == ZERO && alpha_i == ZERO) return; diff --git a/interface/zgemv.c b/interface/zgemv.c index 0c75564f0..3e98dba7f 100644 --- a/interface/zgemv.c +++ b/interface/zgemv.c @@ -225,7 +225,7 @@ void CNAME(enum CBLAS_ORDER order, if (trans & 1) lenx = m; if (trans & 1) leny = n; - if (beta_r != ONE || beta_i != ZERO) SCAL_K(leny, 0, 0, beta_r, beta_i, y, abs(incy), NULL, 0, NULL, 0); + if (beta_r != ONE || beta_i != ZERO) SCAL_K(leny, 0, 0, beta_r, beta_i, y, blasabs(incy), NULL, 0, NULL, 0); if (alpha_r == ZERO && alpha_i == ZERO) return; diff --git a/interface/zhbmv.c b/interface/zhbmv.c index 9ad1b53a1..656f137c6 100644 --- a/interface/zhbmv.c +++ b/interface/zhbmv.c @@ -190,7 +190,7 @@ void CNAME(enum CBLAS_ORDER order, if (n == 0) return; - if ((beta_r != ONE) || (beta_i != ZERO)) SCAL_K(n, 0, 0, beta_r, beta_i, y, abs(incy), NULL, 0, NULL, 0); + if ((beta_r != ONE) || (beta_i != ZERO)) SCAL_K(n, 0, 0, beta_r, beta_i, y, blasabs(incy), NULL, 0, NULL, 0); if ((alpha_r == ZERO) && (alpha_i == ZERO)) return; diff --git a/interface/zhemv.c b/interface/zhemv.c index 2aee880dc..9c31f31d9 100644 --- a/interface/zhemv.c +++ b/interface/zhemv.c @@ -43,6 +43,10 @@ #include "functable.h" #endif +// this is smallest dimension N of square input a to permit threading +// see graph in issue #1820 for explanation +#define MULTI_THREAD_MINIMAL 362 + #ifdef XDOUBLE #define ERROR_NAME "XHEMV " #elif defined(DOUBLE) @@ -181,7 +185,7 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_UPLO Uplo, blasint n, void *VALPHA if (n == 0) return; - if ((beta_r != ONE) || (beta_i != ZERO)) SCAL_K(n, 0, 0, beta_r, beta_i, y, abs(incy), NULL, 0, NULL, 0); + if ((beta_r != ONE) || (beta_i != ZERO)) SCAL_K(n, 0, 0, beta_r, beta_i, y, blasabs(incy), NULL, 0, NULL, 0); if ((alpha_r == ZERO) && (alpha_i == ZERO)) return; @@ -195,7 +199,11 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_UPLO Uplo, blasint n, void *VALPHA buffer = (FLOAT *)blas_memory_alloc(1); #ifdef SMP - nthreads = num_cpu_avail(2); + if (n 1) { buffer_size = n > 16 ? 0 : n * 4 + 40; } diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index c06d1eae8..2a330df4e 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -121,14 +121,17 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS) # Makefile.L3 set(USE_TRMM false) - if (ARM OR ARM64 OR "${TARGET_CORE}" STREQUAL "LONGSOON3B" OR "${TARGET_CORE}" STREQUAL "GENERIC" OR "${CORE}" STREQUAL "generic" OR "${TARGET_CORE}" STREQUAL "HASWELL" OR "${CORE}" STREQUAL "haswell" OR "${CORE}" STREQUAL "zen") + if (ARM OR ARM64 OR "${TARGET_CORE}" STREQUAL "LONGSOON3B" OR "${TARGET_CORE}" STREQUAL "GENERIC" OR "${CORE}" STREQUAL "generic" OR "${TARGET_CORE}" STREQUAL "HASWELL" OR "${CORE}" STREQUAL "haswell" OR "${CORE}" STREQUAL "zen" OR "${TARGET_CORE}" STREQUAL "SKYLAKEX" OR "${CORE}" STREQUAL "skylakex") set(USE_TRMM true) endif () - foreach (float_type ${FLOAT_TYPES}) + foreach (float_type SINGLE DOUBLE) string(SUBSTRING ${float_type} 0 1 float_char) GenerateNamedObjects("${KERNELDIR}/${${float_char}GEMMKERNEL}" "" "gemm_kernel" false "" "" false ${float_type}) + endforeach() + foreach (float_type ${FLOAT_TYPES}) + string(SUBSTRING ${float_type} 0 1 float_char) if (${float_char}GEMMINCOPY) GenerateNamedObjects("${KERNELDIR}/${${float_char}GEMMINCOPY}" "${float_type}" "${${float_char}GEMMINCOPYOBJ}" false "" "" true ${float_type}) endif () diff --git a/kernel/Makefile b/kernel/Makefile index a0a8fcd21..e81225075 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -5,8 +5,43 @@ endif TOPDIR = .. include $(TOPDIR)/Makefile.system +AVX2OPT = +ifeq ($(C_COMPILER), GCC) +# AVX2 support was added in 4.7.0 + GCCVERSIONGTEQ4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 4) + GCCMINORVERSIONGTEQ7 := $(shell expr `$(CC) -dumpversion | cut -f2 -d.` \>= 7) + ifeq ($(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ7), 11) + AVX2OPT = -mavx2 + endif +endif +ifeq ($(C_COMPILER), CLANG) +# Any clang posing as gcc 4.2 should be new enough (3.4 or later) + GCCVERSIONGTEQ4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 4) + GCCMINORVERSIONGTEQ2 := $(shell expr `$(CC) -dumpversion | cut -f2 -d.` \>= 2) + ifeq ($(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ2), 11) + AVX2OPT = -mavx2 + endif +endif +ifdef NO_AVX2 + AVX2OPT= +endif + ifdef TARGET_CORE -override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) +ifeq ($(TARGET_CORE), SKYLAKEX) + override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) -march=skylake-avx512 + ifeq ($(OSNAME), CYGWIN_NT) + override CFLAGS += -fno-asynchronous-unwind-tables + endif + ifeq ($(OSNAME), WINNT) + ifeq ($(C_COMPILER), GCC) + override CFLAGS += -fno-asynchronous-unwind-tables + endif + endif +else ifeq ($(TARGET_CORE), HASWELL) + override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) $(AVX2OPT) +else + override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) +endif BUILD_KERNEL = 1 KDIR = TSUFFIX = _$(TARGET_CORE) @@ -88,7 +123,11 @@ lsame.$(SUFFIX): $(KERNELDIR)/$(LSAME_KERNEL) $(CC) -c $(CFLAGS) -DF_INTERFACE $< -o $(@F) setparam$(TSUFFIX).$(SUFFIX): setparam$(TSUFFIX).c kernel$(TSUFFIX).h +ifeq ($(USE_GEMM3M), 1) + $(CC) -c $(CFLAGS) -DUSE_GEMM3M $< -o $@ +else $(CC) -c $(CFLAGS) $< -o $@ +endif setparam$(TSUFFIX).c : setparam-ref.c sed 's/TS/$(TSUFFIX)/g' $< > $(@F) diff --git a/kernel/Makefile.L3 b/kernel/Makefile.L3 index 4284fbfa0..9258f216d 100644 --- a/kernel/Makefile.L3 +++ b/kernel/Makefile.L3 @@ -29,9 +29,11 @@ USE_TRMM = 1 endif ifeq ($(CORE), HASWELL) -ifeq ($(ARCH), x86_64) USE_TRMM = 1 endif + +ifeq ($(CORE), SKYLAKEX) +USE_TRMM = 1 endif ifeq ($(CORE), ZEN) @@ -42,7 +44,7 @@ ifeq ($(CORE), POWER8) USE_TRMM = 1 endif -ifeq ($(CORE), Z13) +ifeq ($(ARCH), zarch) USE_TRMM = 1 endif diff --git a/kernel/arm/KERNEL.ARMV5 b/kernel/arm/KERNEL.ARMV5 index 27157dad1..10808e2d9 100644 --- a/kernel/arm/KERNEL.ARMV5 +++ b/kernel/arm/KERNEL.ARMV5 @@ -49,6 +49,7 @@ SDOTKERNEL = ../arm/dot.c DDOTKERNEL = ../arm/dot.c CDOTKERNEL = ../arm/zdot.c ZDOTKERNEL = ../arm/zdot.c +DSDOTKERNEL = ../generic/dot.c SNRM2KERNEL = ../arm/nrm2.c DNRM2KERNEL = ../arm/nrm2.c diff --git a/kernel/arm/asum_vfp.S b/kernel/arm/asum_vfp.S index 5b08e5028..9a75885a2 100644 --- a/kernel/arm/asum_vfp.S +++ b/kernel/arm/asum_vfp.S @@ -58,11 +58,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmiad X!, { d4 - d5 } + vldmia.f64 X!, { d4 - d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 - fldmiad X!, { d6 - d7 } + vldmia.f64 X!, { d6 - d7 } vabs.f64 d6, d6 vadd.f64 d1 , d1, d5 vabs.f64 d7, d7 @@ -73,7 +73,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 } + vldmia.f64 X!, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 @@ -82,22 +82,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4 - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 add X, X, INC_X - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 add X, X, INC_X - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 add X, X, INC_X - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 add X, X, INC_X @@ -107,7 +107,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 add X, X, INC_X @@ -118,11 +118,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 - fldmias X!, { s4 - s5 } + vldmia.f32 X!, { s4 - s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 - fldmias X!, { s6 - s7 } + vldmia.f32 X!, { s6 - s7 } vabs.f32 s6, s6 vadd.f32 s1 , s1, s5 vabs.f32 s7, s7 @@ -133,7 +133,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 } + vldmia.f32 X!, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 @@ -142,22 +142,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4 - fldmias X, { s4 } + vldmia.f32 X, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 add X, X, INC_X - fldmias X, { s4 } + vldmia.f32 X, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 add X, X, INC_X - fldmias X, { s4 } + vldmia.f32 X, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 add X, X, INC_X - fldmias X, { s4 } + vldmia.f32 X, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 add X, X, INC_X @@ -167,7 +167,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X, { s4 } + vldmia.f32 X, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 add X, X, INC_X @@ -184,11 +184,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmiad X!, { d4 - d5 } + vldmia.f64 X!, { d4 - d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 - fldmiad X!, { d6 - d7 } + vldmia.f64 X!, { d6 - d7 } vabs.f64 d6, d6 vadd.f64 d1 , d1, d5 vabs.f64 d7, d7 @@ -196,11 +196,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vadd.f64 d1 , d1, d7 pld [ X, #X_PRE ] - fldmiad X!, { d4 - d5 } + vldmia.f64 X!, { d4 - d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 - fldmiad X!, { d6 - d7 } + vldmia.f64 X!, { d6 - d7 } vabs.f64 d6, d6 vadd.f64 d1 , d1, d5 vabs.f64 d7, d7 @@ -212,11 +212,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 } + vldmia.f64 X!, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 - fldmiad X!, { d4 } + vldmia.f64 X!, { d4 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 @@ -226,28 +226,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4 - fldmiad X, { d4 -d5 } + vldmia.f64 X, { d4 -d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 vadd.f64 d0 , d0, d5 add X, X, INC_X - fldmiad X, { d4 -d5 } + vldmia.f64 X, { d4 -d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 vadd.f64 d0 , d0, d5 add X, X, INC_X - fldmiad X, { d4 -d5 } + vldmia.f64 X, { d4 -d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 vadd.f64 d0 , d0, d5 add X, X, INC_X - fldmiad X, { d4 -d5 } + vldmia.f64 X, { d4 -d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 @@ -259,7 +259,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmiad X, { d4 -d5 } + vldmia.f64 X, { d4 -d5 } vabs.f64 d4, d4 vadd.f64 d0 , d0, d4 vabs.f64 d5, d5 @@ -273,22 +273,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmias X!, { s4 - s5 } + vldmia.f32 X!, { s4 - s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 - fldmias X!, { s6 - s7 } + vldmia.f32 X!, { s6 - s7 } vabs.f32 s6, s6 vadd.f32 s1 , s1, s5 vabs.f32 s7, s7 vadd.f32 s0 , s0, s6 vadd.f32 s1 , s1, s7 - fldmias X!, { s4 - s5 } + vldmia.f32 X!, { s4 - s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 - fldmias X!, { s6 - s7 } + vldmia.f32 X!, { s6 - s7 } vabs.f32 s6, s6 vadd.f32 s1 , s1, s5 vabs.f32 s7, s7 @@ -300,11 +300,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 } + vldmia.f32 X!, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 - fldmias X!, { s4 } + vldmia.f32 X!, { s4 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 @@ -313,28 +313,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4 - fldmias X, { s4 -s5 } + vldmia.f32 X, { s4 -s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 vadd.f32 s0 , s0, s5 add X, X, INC_X - fldmias X, { s4 -s5 } + vldmia.f32 X, { s4 -s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 vadd.f32 s0 , s0, s5 add X, X, INC_X - fldmias X, { s4 -s5 } + vldmia.f32 X, { s4 -s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 vadd.f32 s0 , s0, s5 add X, X, INC_X - fldmias X, { s4 -s5 } + vldmia.f32 X, { s4 -s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 @@ -346,7 +346,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X, { s4 -s5 } + vldmia.f32 X, { s4 -s5 } vabs.f32 s4, s4 vadd.f32 s0 , s0, s4 vabs.f32 s5, s5 diff --git a/kernel/arm/axpy_vfp.S b/kernel/arm/axpy_vfp.S index 37515f399..39c9ac233 100644 --- a/kernel/arm/axpy_vfp.S +++ b/kernel/arm/axpy_vfp.S @@ -146,17 +146,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmiad X!, { d4 - d7 } + vldmia.f64 X!, { d4 - d7 } pld [ Y, #X_PRE ] - fldmiad Y , { d8 - d11 } + vldmia.f64 Y , { d8 - d11 } fmacd d8 , d0, d4 - fstmiad Y!, { d8 } + vstmia.f64 Y!, { d8 } fmacd d9 , d0, d5 - fstmiad Y!, { d9 } + vstmia.f64 Y!, { d9 } fmacd d10, d0, d6 - fstmiad Y!, { d10 } + vstmia.f64 Y!, { d10 } fmacd d11, d0, d7 - fstmiad Y!, { d11 } + vstmia.f64 Y!, { d11 } .endm @@ -164,19 +164,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 } - fldmiad Y , { d8 } + vldmia.f64 X!, { d4 } + vldmia.f64 Y , { d8 } fmacd d8 , d0, d4 - fstmiad Y!, { d8 } + vstmia.f64 Y!, { d8 } .endm .macro KERNEL_S1 - fldmiad X , { d4 } - fldmiad Y , { d8 } + vldmia.f64 X , { d4 } + vldmia.f64 Y , { d8 } fmacd d8 , d0, d4 - fstmiad Y , { d8 } + vstmia.f64 Y , { d8 } add X, X, INC_X add Y, Y, INC_Y @@ -186,16 +186,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 - fldmias X!, { s4 - s7 } - fldmias Y , { s8 - s11 } + vldmia.f32 X!, { s4 - s7 } + vldmia.f32 Y , { s8 - s11 } fmacs s8 , s0, s4 - fstmias Y!, { s8 } + vstmia.f32 Y!, { s8 } fmacs s9 , s0, s5 - fstmias Y!, { s9 } + vstmia.f32 Y!, { s9 } fmacs s10, s0, s6 - fstmias Y!, { s10 } + vstmia.f32 Y!, { s10 } fmacs s11, s0, s7 - fstmias Y!, { s11 } + vstmia.f32 Y!, { s11 } .endm @@ -203,19 +203,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 } - fldmias Y , { s8 } + vldmia.f32 X!, { s4 } + vldmia.f32 Y , { s8 } fmacs s8 , s0, s4 - fstmias Y!, { s8 } + vstmia.f32 Y!, { s8 } .endm .macro KERNEL_S1 - fldmias X , { s4 } - fldmias Y , { s8 } + vldmia.f32 X , { s4 } + vldmia.f32 Y , { s8 } fmacs s8 , s0, s4 - fstmias Y , { s8 } + vstmia.f32 Y , { s8 } add X, X, INC_X add Y, Y, INC_Y @@ -231,42 +231,42 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmiad X!, { d4 - d7 } + vldmia.f64 X!, { d4 - d7 } pld [ Y, #X_PRE ] - fldmiad Y , { d8 - d11 } + vldmia.f64 Y , { d8 - d11 } FMAC_R1 d8 , d0, d4 FMAC_R2 d8 , d1, d5 FMAC_I1 d9 , d0, d5 FMAC_I2 d9 , d1, d4 - fstmiad Y!, { d8 } - fstmiad Y!, { d9 } + vstmia.f64 Y!, { d8 } + vstmia.f64 Y!, { d9 } FMAC_R1 d10, d0, d6 FMAC_R2 d10, d1, d7 FMAC_I1 d11, d0, d7 FMAC_I2 d11, d1, d6 - fstmiad Y!, { d10 } - fstmiad Y!, { d11 } + vstmia.f64 Y!, { d10 } + vstmia.f64 Y!, { d11 } pld [ X, #X_PRE ] - fldmiad X!, { d4 - d7 } + vldmia.f64 X!, { d4 - d7 } pld [ Y, #X_PRE ] - fldmiad Y , { d8 - d11 } + vldmia.f64 Y , { d8 - d11 } FMAC_R1 d8 , d0, d4 FMAC_R2 d8 , d1, d5 FMAC_I1 d9 , d0, d5 FMAC_I2 d9 , d1, d4 - fstmiad Y!, { d8 } - fstmiad Y!, { d9 } + vstmia.f64 Y!, { d8 } + vstmia.f64 Y!, { d9 } FMAC_R1 d10, d0, d6 FMAC_R2 d10, d1, d7 FMAC_I1 d11, d0, d7 FMAC_I2 d11, d1, d6 - fstmiad Y!, { d10 } - fstmiad Y!, { d11 } + vstmia.f64 Y!, { d10 } + vstmia.f64 Y!, { d11 } @@ -277,15 +277,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 - d5 } - fldmiad Y , { d8 - d9 } + vldmia.f64 X!, { d4 - d5 } + vldmia.f64 Y , { d8 - d9 } FMAC_R1 d8 , d0, d4 FMAC_R2 d8 , d1, d5 FMAC_I1 d9 , d0, d5 FMAC_I2 d9 , d1, d4 - fstmiad Y!, { d8 } - fstmiad Y!, { d9 } + vstmia.f64 Y!, { d8 } + vstmia.f64 Y!, { d9 } @@ -293,14 +293,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmiad X , { d4 - d5 } - fldmiad Y , { d8 - d9 } + vldmia.f64 X , { d4 - d5 } + vldmia.f64 Y , { d8 - d9 } FMAC_R1 d8 , d0, d4 FMAC_R2 d8 , d1, d5 FMAC_I1 d9 , d0, d5 FMAC_I2 d9 , d1, d4 - fstmiad Y , { d8 - d9 } + vstmia.f64 Y , { d8 - d9 } add X, X, INC_X add Y, Y, INC_Y @@ -314,40 +314,40 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmias X!, { s4 - s7 } + vldmia.f32 X!, { s4 - s7 } pld [ Y, #X_PRE ] - fldmias Y , { s8 - s11 } + vldmia.f32 Y , { s8 - s11 } FMAC_R1 s8 , s0, s4 FMAC_R2 s8 , s1, s5 FMAC_I1 s9 , s0, s5 FMAC_I2 s9 , s1, s4 - fstmias Y!, { s8 } - fstmias Y!, { s9 } + vstmia.f32 Y!, { s8 } + vstmia.f32 Y!, { s9 } FMAC_R1 s10, s0, s6 FMAC_R2 s10, s1, s7 FMAC_I1 s11, s0, s7 FMAC_I2 s11, s1, s6 - fstmias Y!, { s10 } - fstmias Y!, { s11 } + vstmia.f32 Y!, { s10 } + vstmia.f32 Y!, { s11 } - fldmias X!, { s4 - s7 } - fldmias Y , { s8 - s11 } + vldmia.f32 X!, { s4 - s7 } + vldmia.f32 Y , { s8 - s11 } FMAC_R1 s8 , s0, s4 FMAC_R2 s8 , s1, s5 FMAC_I1 s9 , s0, s5 FMAC_I2 s9 , s1, s4 - fstmias Y!, { s8 } - fstmias Y!, { s9 } + vstmia.f32 Y!, { s8 } + vstmia.f32 Y!, { s9 } FMAC_R1 s10, s0, s6 FMAC_R2 s10, s1, s7 FMAC_I1 s11, s0, s7 FMAC_I2 s11, s1, s6 - fstmias Y!, { s10 } - fstmias Y!, { s11 } + vstmia.f32 Y!, { s10 } + vstmia.f32 Y!, { s11 } @@ -358,15 +358,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 - s5 } - fldmias Y , { s8 - s9 } + vldmia.f32 X!, { s4 - s5 } + vldmia.f32 Y , { s8 - s9 } FMAC_R1 s8 , s0, s4 FMAC_R2 s8 , s1, s5 FMAC_I1 s9 , s0, s5 FMAC_I2 s9 , s1, s4 - fstmias Y!, { s8 } - fstmias Y!, { s9 } + vstmia.f32 Y!, { s8 } + vstmia.f32 Y!, { s9 } @@ -374,14 +374,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X , { s4 - s5 } - fldmias Y , { s8 - s9 } + vldmia.f32 X , { s4 - s5 } + vldmia.f32 Y , { s8 - s9 } FMAC_R1 s8 , s0, s4 FMAC_R2 s8 , s1, s5 FMAC_I1 s9 , s0, s5 FMAC_I2 s9 , s1, s4 - fstmias Y , { s8 - s9 } + vstmia.f32 Y , { s8 - s9 } add X, X, INC_X add Y, Y, INC_Y @@ -440,13 +440,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp N, #0 ble axpy_kernel_L999 - +/* cmp INC_X, #0 beq axpy_kernel_L999 cmp INC_Y, #0 beq axpy_kernel_L999 - +*/ cmp INC_X, #1 bne axpy_kernel_S_BEGIN diff --git a/kernel/arm/ccopy_vfp.S b/kernel/arm/ccopy_vfp.S index 874fcab9c..fbb32b43c 100644 --- a/kernel/arm/ccopy_vfp.S +++ b/kernel/arm/ccopy_vfp.S @@ -65,15 +65,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_F4 pld [ X, #X_PRE ] - fldmias X!, { s0 - s7 } - fstmias Y!, { s0 - s7 } + vldmia.f32 X!, { s0 - s7 } + vstmia.f32 Y!, { s0 - s7 } .endm .macro COPY_F1 - fldmias X!, { s0 - s1 } - fstmias Y!, { s0 - s1 } + vldmia.f32 X!, { s0 - s1 } + vstmia.f32 Y!, { s0 - s1 } .endm @@ -83,23 +83,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S4 nop - fldmias X, { s0 - s1 } - fstmias Y, { s0 - s1 } + vldmia.f32 X, { s0 - s1 } + vstmia.f32 Y, { s0 - s1 } add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s2 - s3 } - fstmias Y, { s2 - s3 } + vldmia.f32 X, { s2 - s3 } + vstmia.f32 Y, { s2 - s3 } add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s0 - s1 } - fstmias Y, { s0 - s1 } + vldmia.f32 X, { s0 - s1 } + vstmia.f32 Y, { s0 - s1 } add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s2 - s3 } - fstmias Y, { s2 - s3 } + vldmia.f32 X, { s2 - s3 } + vstmia.f32 Y, { s2 - s3 } add X, X, INC_X add Y, Y, INC_Y @@ -108,8 +108,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S1 - fldmias X, { s0 - s1 } - fstmias Y, { s0 - s1 } + vldmia.f32 X, { s0 - s1 } + vstmia.f32 Y, { s0 - s1 } add X, X, INC_X add Y, Y, INC_Y diff --git a/kernel/arm/cdot_vfp.S b/kernel/arm/cdot_vfp.S index e5a6e4d35..85246d734 100644 --- a/kernel/arm/cdot_vfp.S +++ b/kernel/arm/cdot_vfp.S @@ -76,30 +76,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmias X!, { s4 - s5 } - fldmias Y!, { s8 - s9 } + vldmia.f32 X!, { s4 - s5 } + vldmia.f32 Y!, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 - fldmias X!, { s6 - s7 } + vldmia.f32 X!, { s6 - s7 } fmacs s2 , s5, s9 fmacs s3 , s5, s8 - fldmias Y!, { s10 - s11 } + vldmia.f32 Y!, { s10 - s11 } fmacs s0 , s6, s10 fmacs s1 , s6, s11 fmacs s2 , s7, s11 fmacs s3 , s7, s10 - fldmias X!, { s4 - s5 } - fldmias Y!, { s8 - s9 } + vldmia.f32 X!, { s4 - s5 } + vldmia.f32 Y!, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 - fldmias X!, { s6 - s7 } + vldmia.f32 X!, { s6 - s7 } fmacs s2 , s5, s9 fmacs s3 , s5, s8 - fldmias Y!, { s10 - s11 } + vldmia.f32 Y!, { s10 - s11 } fmacs s0 , s6, s10 fmacs s1 , s6, s11 fmacs s2 , s7, s11 @@ -109,8 +109,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 - s5 } - fldmias Y!, { s8 - s9 } + vldmia.f32 X!, { s4 - s5 } + vldmia.f32 Y!, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 fmacs s2 , s5, s9 @@ -125,8 +125,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nop - fldmias X, { s4 - s5 } - fldmias Y, { s8 - s9 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 fmacs s2 , s5, s9 @@ -134,8 +134,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s4 - s5 } - fldmias Y, { s8 - s9 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 fmacs s2 , s5, s9 @@ -143,8 +143,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s4 - s5 } - fldmias Y, { s8 - s9 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 fmacs s2 , s5, s9 @@ -152,8 +152,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s4 - s5 } - fldmias Y, { s8 - s9 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 fmacs s2 , s5, s9 @@ -166,8 +166,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X, { s4 - s5 } - fldmias Y, { s8 - s9 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s8 - s9 } fmacs s0 , s4, s8 fmacs s1 , s4, s9 fmacs s2 , s5, s9 @@ -215,11 +215,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp N, #0 ble cdot_kernel_L999 - cmp INC_X, #0 - beq cdot_kernel_L999 +# cmp INC_X, #0 +# beq cdot_kernel_L999 - cmp INC_Y, #0 - beq cdot_kernel_L999 +# cmp INC_Y, #0 +# beq cdot_kernel_L999 cmp INC_X, #1 bne cdot_kernel_S_BEGIN diff --git a/kernel/arm/cgemm_kernel_2x2_vfp.S b/kernel/arm/cgemm_kernel_2x2_vfp.S index 71bc50efd..d2591919e 100644 --- a/kernel/arm/cgemm_kernel_2x2_vfp.S +++ b/kernel/arm/cgemm_kernel_2x2_vfp.S @@ -165,9 +165,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_I pld [ AO, #A_PRE ] - fldmias AO!, { s0 - s3 } + vldmia.f32 AO!, { s0 - s3 } pld [ BO, #B_PRE ] - fldmias BO!, { s4 - s7 } + vldmia.f32 BO!, { s4 - s7 } fmuls s8 , s0, s4 @@ -197,9 +197,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_M1 pld [ AO, #A_PRE ] - fldmias AO!, { s0 - s3 } + vldmia.f32 AO!, { s0 - s3 } pld [ BO, #B_PRE ] - fldmias BO!, { s4 - s7 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -225,8 +225,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_M2 - fldmias AO!, { s0 - s3 } - fldmias BO!, { s4 - s7 } + vldmia.f32 AO!, { s0 - s3 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -254,8 +254,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_E - fldmias AO!, { s0 - s3 } - fldmias BO!, { s4 - s7 } + vldmia.f32 AO!, { s0 - s3 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -282,8 +282,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_SUB - fldmias AO!, { s0 - s3 } - fldmias BO!, { s4 - s7 } + vldmia.f32 AO!, { s0 - s3 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -317,7 +317,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s7 } + vldmia.f32 CO1, { s4 - s7 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 @@ -329,9 +329,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s11 FMAC_I2 s7 , s1 , s10 - fstmias CO1, { s4 - s7 } + vstmia.f32 CO1, { s4 - s7 } - fldmias CO2, { s4 - s7 } + vldmia.f32 CO2, { s4 - s7 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 @@ -343,7 +343,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s15 FMAC_I2 s7 , s1 , s14 - fstmias CO2, { s4 - s7 } + vstmia.f32 CO2, { s4 - s7 } add CO1, CO1, #16 @@ -500,23 +500,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s5 } + vldmia.f32 CO1, { s4 - s5 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 FMAC_R2 s4 , s1 , s9 FMAC_I2 s5 , s1 , s8 - fstmias CO1, { s4 - s5 } + vstmia.f32 CO1, { s4 - s5 } - fldmias CO2, { s4 - s5 } + vldmia.f32 CO2, { s4 - s5 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 FMAC_R2 s4 , s1 , s13 FMAC_I2 s5 , s1 , s12 - fstmias CO2, { s4 - s5 } + vstmia.f32 CO2, { s4 - s5 } add CO1, CO1, #8 @@ -671,7 +671,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s7 } + vldmia.f32 CO1, { s4 - s7 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 @@ -683,7 +683,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s11 FMAC_I2 s7 , s1 , s10 - fstmias CO1, { s4 - s7 } + vstmia.f32 CO1, { s4 - s7 } add CO1, CO1, #16 @@ -800,14 +800,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s5 } + vldmia.f32 CO1, { s4 - s5 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 FMAC_R2 s4 , s1 , s9 FMAC_I2 s5 , s1 , s8 - fstmias CO1, { s4 - s5 } + vstmia.f32 CO1, { s4 - s5 } add CO1, CO1, #8 diff --git a/kernel/arm/cgemm_kernel_2x2_vfpv3.S b/kernel/arm/cgemm_kernel_2x2_vfpv3.S index 9d473ad78..5ebc904ac 100644 --- a/kernel/arm/cgemm_kernel_2x2_vfpv3.S +++ b/kernel/arm/cgemm_kernel_2x2_vfpv3.S @@ -182,30 +182,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_I pld [ AO , #A_PRE ] pld [ BO , #B_PRE ] - fldmias AO!, { s0 - s1 } - fldmias BO!, { s8 - s9 } + vldmia.f32 AO!, { s0 - s1 } + vldmia.f32 BO!, { s8 - s9 } fmuls s16 , s0, s8 fmuls s24 , s1, s9 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmuls s17 , s0, s9 fmuls s25 , s1, s8 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmuls s18 , s2, s8 fmuls s26 , s3, s9 - fldmias AO!, { s4 - s5 } + vldmia.f32 AO!, { s4 - s5 } fmuls s19 , s2, s9 fmuls s27 , s3, s8 - fldmias BO!, { s12 - s13 } + vldmia.f32 BO!, { s12 - s13 } fmuls s20 , s0, s10 fmuls s28 , s1, s11 - fldmias AO!, { s6 - s7 } + vldmia.f32 AO!, { s6 - s7 } fmuls s21 , s0, s11 fmuls s29 , s1, s10 - fldmias BO!, { s14 - s15 } + vldmia.f32 BO!, { s14 - s15 } fmuls s22 , s2, s10 fmuls s30 , s3, s11 fmuls s23 , s2, s11 @@ -218,17 +218,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_M1 fmacs s16 , s0, s8 - fldmias AO!, { s4 - s5 } + vldmia.f32 AO!, { s4 - s5 } fmacs s24 , s1, s9 fmacs s17 , s0, s9 - fldmias BO!, { s12 - s13 } + vldmia.f32 BO!, { s12 - s13 } fmacs s25 , s1, s8 fmacs s18 , s2, s8 - fldmias AO!, { s6 - s7 } + vldmia.f32 AO!, { s6 - s7 } fmacs s26 , s3, s9 fmacs s19 , s2, s9 - fldmias BO!, { s14 - s15 } + vldmia.f32 BO!, { s14 - s15 } fmacs s27 , s3, s8 fmacs s20 , s0, s10 @@ -250,19 +250,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ BO , #B_PRE ] fmacs s24 , s5, s13 fmacs s17 , s4, s13 - fldmias AO!, { s0 - s1 } + vldmia.f32 AO!, { s0 - s1 } fmacs s25 , s5, s12 fmacs s18 , s6, s12 fmacs s26 , s7, s13 - fldmias BO!, { s8 - s9 } + vldmia.f32 BO!, { s8 - s9 } fmacs s19 , s6, s13 fmacs s27 , s7, s12 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmacs s20 , s4, s14 fmacs s28 , s5, s15 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmacs s21 , s4, s15 fmacs s29 , s5, s14 @@ -300,16 +300,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_SUB - fldmias AO!, { s0 - s1 } - fldmias BO!, { s8 - s9 } + vldmia.f32 AO!, { s0 - s1 } + vldmia.f32 BO!, { s8 - s9 } fmacs s16 , s0, s8 fmacs s24 , s1, s9 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmacs s17 , s0, s9 fmacs s25 , s1, s8 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmacs s18 , s2, s8 fmacs s26 , s3, s9 fmacs s19 , s2, s9 @@ -338,8 +338,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s7 } - fldmias CO2, { s8 - s11 } + vldmia.f32 CO1, { s4 - s7 } + vldmia.f32 CO2, { s8 - s11 } FADD_R s16, s24 , s16 FADD_I s17, s25 , s17 @@ -370,8 +370,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s10, s1 , s23 FMAC_I2 s11, s1 , s22 - fstmias CO1, { s4 - s7 } - fstmias CO2, { s8 - s11 } + vstmia.f32 CO1, { s4 - s7 } + vstmia.f32 CO2, { s8 - s11 } add CO1, CO1, #16 @@ -534,8 +534,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s5 } - fldmias CO2, { s8 - s9 } + vldmia.f32 CO1, { s4 - s5 } + vldmia.f32 CO2, { s8 - s9 } FADD_R s16, s24 , s16 FADD_I s17, s25 , s17 @@ -552,8 +552,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s8 , s1 , s21 FMAC_I2 s9 , s1 , s20 - fstmias CO1, { s4 - s5 } - fstmias CO2, { s8 - s9 } + vstmia.f32 CO1, { s4 - s5 } + vstmia.f32 CO2, { s8 - s9 } add CO1, CO1, #8 @@ -716,7 +716,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s7 } + vldmia.f32 CO1, { s4 - s7 } FADD_R s16, s24 , s16 FADD_I s17, s25 , s17 @@ -733,7 +733,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s19 FMAC_I2 s7 , s1 , s18 - fstmias CO1, { s4 - s7 } + vstmia.f32 CO1, { s4 - s7 } add CO1, CO1, #16 @@ -851,7 +851,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias CO1, { s4 - s5 } + vldmia.f32 CO1, { s4 - s5 } FADD_R s16, s24 , s16 FADD_I s17, s25 , s17 @@ -861,7 +861,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s4 , s1 , s17 FMAC_I2 s5 , s1 , s16 - fstmias CO1, { s4 - s5 } + vstmia.f32 CO1, { s4 - s5 } add CO1, CO1, #8 diff --git a/kernel/arm/cgemm_ncopy_2_vfp.S b/kernel/arm/cgemm_ncopy_2_vfp.S index 29eeab492..fe4959988 100644 --- a/kernel/arm/cgemm_ncopy_2_vfp.S +++ b/kernel/arm/cgemm_ncopy_2_vfp.S @@ -85,7 +85,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s6 , [ AO2, #8 ] flds s7 , [ AO2, #12 ] - fstmias BO!, { s0 - s7 } + vstmia.f32 BO!, { s0 - s7 } add AO2, AO2, #16 .endm @@ -99,7 +99,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s3 , [ AO2, #4 ] add AO1, AO1, #8 - fstmias BO!, { s0 - s3 } + vstmia.f32 BO!, { s0 - s3 } add AO2, AO2, #8 .endm @@ -111,7 +111,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s2 , [ AO1, #8 ] flds s3 , [ AO1, #12 ] - fstmias BO!, { s0 - s3 } + vstmia.f32 BO!, { s0 - s3 } add AO1, AO1, #16 .endm @@ -122,7 +122,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0 , [ AO1, #0 ] flds s1 , [ AO1, #4 ] - fstmias BO!, { s0 - s1 } + vstmia.f32 BO!, { s0 - s1 } add AO1, AO1, #8 .endm diff --git a/kernel/arm/cgemm_tcopy_2_vfp.S b/kernel/arm/cgemm_tcopy_2_vfp.S index 9036b994d..7b3ae18d4 100644 --- a/kernel/arm/cgemm_tcopy_2_vfp.S +++ b/kernel/arm/cgemm_tcopy_2_vfp.S @@ -73,12 +73,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **************************************************************************************/ .macro COPY2x2 - fldmias AO1, { s0 - s3 } + vldmia.f32 AO1, { s0 - s3 } add r3, AO1, LDA - fldmias r3, { s4 - s7 } + vldmia.f32 r3, { s4 - s7 } - fstmias BO1, { s0 - s7 } + vstmia.f32 BO1, { s0 - s7 } add AO1, AO1, #16 add BO1, BO1, M4 @@ -86,12 +86,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x2 - fldmias AO1, { s0 -s1 } + vldmia.f32 AO1, { s0 -s1 } add r3, AO1, LDA - fldmias r3, { s2 - s3 } + vldmia.f32 r3, { s2 - s3 } - fstmias BO2, { s0 - s3 } + vstmia.f32 BO2, { s0 - s3 } add AO1, AO1, #8 add BO2, BO2, #16 @@ -100,9 +100,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /*************************************************************************************************************************/ .macro COPY2x1 - fldmias AO1, { s0 - s3 } + vldmia.f32 AO1, { s0 - s3 } - fstmias BO1, { s0 - s3 } + vstmia.f32 BO1, { s0 - s3 } add AO1, AO1, #16 add BO1, BO1, M4 @@ -110,9 +110,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x1 - fldmias AO1, { s0 - s1 } + vldmia.f32 AO1, { s0 - s1 } - fstmias BO2, { s0 - s1 } + vstmia.f32 BO2, { s0 - s1 } add AO1, AO1, #8 add BO2, BO2, #8 diff --git a/kernel/arm/cgemv_n_vfp.S b/kernel/arm/cgemv_n_vfp.S index 62ee33bb9..d6b18c796 100644 --- a/kernel/arm/cgemv_n_vfp.S +++ b/kernel/arm/cgemv_n_vfp.S @@ -201,7 +201,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias YO, { s4 - s7 } + vldmia.f32 YO, { s4 - s7 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 @@ -213,9 +213,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s11 FMAC_I2 s7 , s1 , s10 - fstmias YO!, { s4 - s7 } + vstmia.f32 YO!, { s4 - s7 } - fldmias YO, { s4 - s7 } + vldmia.f32 YO, { s4 - s7 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 @@ -227,7 +227,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s15 FMAC_I2 s7 , s1 , s14 - fstmias YO!, { s4 - s7 } + vstmia.f32 YO!, { s4 - s7 } .endm @@ -266,14 +266,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 FMAC_R2 s4 , s1 , s9 FMAC_I2 s5 , s1 , s8 - fstmias YO, { s4 - s5 } + vstmia.f32 YO, { s4 - s5 } add YO, YO, #8 @@ -349,47 +349,47 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 FMAC_R2 s4 , s1 , s9 FMAC_I2 s5 , s1 , s8 - fstmias YO, { s4 - s5 } + vstmia.f32 YO, { s4 - s5 } add YO, YO, INC_Y - fldmias YO, { s6 - s7 } + vldmia.f32 YO, { s6 - s7 } FMAC_R1 s6 , s0 , s10 FMAC_I1 s7 , s0 , s11 FMAC_R2 s6 , s1 , s11 FMAC_I2 s7 , s1 , s10 - fstmias YO, { s6 - s7 } + vstmia.f32 YO, { s6 - s7 } add YO, YO, INC_Y - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 FMAC_R2 s4 , s1 , s13 FMAC_I2 s5 , s1 , s12 - fstmias YO, { s4 - s5 } + vstmia.f32 YO, { s4 - s5 } add YO, YO, INC_Y - fldmias YO, { s6 - s7 } + vldmia.f32 YO, { s6 - s7 } FMAC_R1 s6 , s0 , s14 FMAC_I1 s7 , s0 , s15 FMAC_R2 s6 , s1 , s15 FMAC_I2 s7 , s1 , s14 - fstmias YO, { s6 - s7 } + vstmia.f32 YO, { s6 - s7 } add YO, YO, INC_Y @@ -430,14 +430,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA_R flds s1, ALPHA_I - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } FMAC_R1 s4 , s0 , s8 FMAC_I1 s5 , s0 , s9 FMAC_R2 s4 , s1 , s9 FMAC_I2 s5 , s1 , s8 - fstmias YO, { s4 - s5 } + vstmia.f32 YO, { s4 - s5 } add YO, YO, INC_Y diff --git a/kernel/arm/cgemv_t_vfp.S b/kernel/arm/cgemv_t_vfp.S index c07b6d6f8..6833df7d1 100644 --- a/kernel/arm/cgemv_t_vfp.S +++ b/kernel/arm/cgemv_t_vfp.S @@ -150,9 +150,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X1 - fldmias XO! , { s2 - s3 } - fldmias AO1!, { s4 - s5 } - fldmias AO2!, { s8 - s9 } + vldmia.f32 XO! , { s2 - s3 } + vldmia.f32 AO1!, { s4 - s5 } + vldmia.f32 AO2!, { s8 - s9 } fmacs s12 , s4 , s2 fmacs s13 , s4 , s3 @@ -168,7 +168,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F2 - fldmias YO, { s4 - s7 } + vldmia.f32 YO, { s4 - s7 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 @@ -180,7 +180,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s15 FMAC_I2 s7 , s1 , s14 - fstmias YO!, { s4 - s7 } + vstmia.f32 YO!, { s4 - s7 } .endm @@ -204,8 +204,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmias XO! , { s2 - s3 } - fldmias AO1!, { s4 - s5 } + vldmia.f32 XO! , { s2 - s3 } + vldmia.f32 AO1!, { s4 - s5 } fmacs s12 , s4 , s2 fmacs s13 , s4 , s3 @@ -216,14 +216,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F1 - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 FMAC_R2 s4 , s1 , s13 FMAC_I2 s5 , s1 , s12 - fstmias YO!, { s4 - s5 } + vstmia.f32 YO!, { s4 - s5 } .endm @@ -249,9 +249,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X1 - fldmias XO , { s2 - s3 } - fldmias AO1!, { s4 - s5 } - fldmias AO2!, { s8 - s9 } + vldmia.f32 XO , { s2 - s3 } + vldmia.f32 AO1!, { s4 - s5 } + vldmia.f32 AO2!, { s8 - s9 } fmacs s12 , s4 , s2 fmacs s13 , s4 , s3 @@ -269,25 +269,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S2 - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 FMAC_R2 s4 , s1 , s13 FMAC_I2 s5 , s1 , s12 - fstmias YO, { s4 - s5 } + vstmia.f32 YO, { s4 - s5 } add YO, YO, INC_Y - fldmias YO, { s6 - s7 } + vldmia.f32 YO, { s6 - s7 } FMAC_R1 s6 , s0 , s14 FMAC_I1 s7 , s0 , s15 FMAC_R2 s6 , s1 , s15 FMAC_I2 s7 , s1 , s14 - fstmias YO, { s6 - s7 } + vstmia.f32 YO, { s6 - s7 } add YO, YO, INC_Y @@ -313,8 +313,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmias XO , { s2 - s3 } - fldmias AO1!, { s4 - s5 } + vldmia.f32 XO , { s2 - s3 } + vldmia.f32 AO1!, { s4 - s5 } fmacs s12 , s4 , s2 fmacs s13 , s4 , s3 @@ -327,14 +327,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } FMAC_R1 s4 , s0 , s12 FMAC_I1 s5 , s0 , s13 FMAC_R2 s4 , s1 , s13 FMAC_I2 s5 , s1 , s12 - fstmias YO, { s4 - s5 } + vstmia.f32 YO, { s4 - s5 } add YO, YO, INC_Y diff --git a/kernel/arm/ctrmm_kernel_2x2_vfp.S b/kernel/arm/ctrmm_kernel_2x2_vfp.S index aae890ea9..ca1a512fb 100644 --- a/kernel/arm/ctrmm_kernel_2x2_vfp.S +++ b/kernel/arm/ctrmm_kernel_2x2_vfp.S @@ -165,9 +165,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_I pld [ AO, #A_PRE ] - fldmias AO!, { s0 - s3 } + vldmia.f32 AO!, { s0 - s3 } pld [ BO, #B_PRE ] - fldmias BO!, { s4 - s7 } + vldmia.f32 BO!, { s4 - s7 } fmuls s8 , s0, s4 @@ -197,9 +197,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_M1 pld [ AO, #A_PRE ] - fldmias AO!, { s0 - s3 } + vldmia.f32 AO!, { s0 - s3 } pld [ BO, #B_PRE ] - fldmias BO!, { s4 - s7 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -225,8 +225,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_M2 - fldmias AO!, { s0 - s3 } - fldmias BO!, { s4 - s7 } + vldmia.f32 AO!, { s0 - s3 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -254,8 +254,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_E - fldmias AO!, { s0 - s3 } - fldmias BO!, { s4 - s7 } + vldmia.f32 AO!, { s0 - s3 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -282,8 +282,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_SUB - fldmias AO!, { s0 - s3 } - fldmias BO!, { s4 - s7 } + vldmia.f32 AO!, { s0 - s3 } + vldmia.f32 BO!, { s4 - s7 } fmacs s8 , s0, s4 fmacs s9 , s0, s5 @@ -331,7 +331,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s11 FMAC_I2 s7 , s1 , s10 - fstmias CO1, { s4 - s7 } + vstmia.f32 CO1, { s4 - s7 } flds s4, FP_ZERO vmov.f32 s5, s4 @@ -348,7 +348,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s15 FMAC_I2 s7 , s1 , s14 - fstmias CO2, { s4 - s7 } + vstmia.f32 CO2, { s4 - s7 } add CO1, CO1, #16 @@ -513,7 +513,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s4 , s1 , s9 FMAC_I2 s5 , s1 , s8 - fstmias CO1, { s4 - s5 } + vstmia.f32 CO1, { s4 - s5 } flds s4, FP_ZERO vmov.f32 s5, s4 @@ -523,7 +523,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s4 , s1 , s13 FMAC_I2 s5 , s1 , s12 - fstmias CO2, { s4 - s5 } + vstmia.f32 CO2, { s4 - s5 } add CO1, CO1, #8 @@ -693,7 +693,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s11 FMAC_I2 s7 , s1 , s10 - fstmias CO1, { s4 - s7 } + vstmia.f32 CO1, { s4 - s7 } add CO1, CO1, #16 @@ -818,7 +818,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s4 , s1 , s9 FMAC_I2 s5 , s1 , s8 - fstmias CO1, { s4 - s5 } + vstmia.f32 CO1, { s4 - s5 } add CO1, CO1, #8 diff --git a/kernel/arm/ctrmm_kernel_2x2_vfpv3.S b/kernel/arm/ctrmm_kernel_2x2_vfpv3.S index 79e7ed07f..d75fb7735 100644 --- a/kernel/arm/ctrmm_kernel_2x2_vfpv3.S +++ b/kernel/arm/ctrmm_kernel_2x2_vfpv3.S @@ -170,30 +170,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_I pld [ AO , #A_PRE ] pld [ BO , #B_PRE ] - fldmias AO!, { s0 - s1 } - fldmias BO!, { s8 - s9 } + vldmia.f32 AO!, { s0 - s1 } + vldmia.f32 BO!, { s8 - s9 } fmuls s16 , s0, s8 fmuls s24 , s1, s9 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmuls s17 , s0, s9 fmuls s25 , s1, s8 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmuls s18 , s2, s8 fmuls s26 , s3, s9 - fldmias AO!, { s4 - s5 } + vldmia.f32 AO!, { s4 - s5 } fmuls s19 , s2, s9 fmuls s27 , s3, s8 - fldmias BO!, { s12 - s13 } + vldmia.f32 BO!, { s12 - s13 } fmuls s20 , s0, s10 fmuls s28 , s1, s11 - fldmias AO!, { s6 - s7 } + vldmia.f32 AO!, { s6 - s7 } fmuls s21 , s0, s11 fmuls s29 , s1, s10 - fldmias BO!, { s14 - s15 } + vldmia.f32 BO!, { s14 - s15 } fmuls s22 , s2, s10 fmuls s30 , s3, s11 fmuls s23 , s2, s11 @@ -206,17 +206,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_M1 fmacs s16 , s0, s8 - fldmias AO!, { s4 - s5 } + vldmia.f32 AO!, { s4 - s5 } fmacs s24 , s1, s9 fmacs s17 , s0, s9 - fldmias BO!, { s12 - s13 } + vldmia.f32 BO!, { s12 - s13 } fmacs s25 , s1, s8 fmacs s18 , s2, s8 - fldmias AO!, { s6 - s7 } + vldmia.f32 AO!, { s6 - s7 } fmacs s26 , s3, s9 fmacs s19 , s2, s9 - fldmias BO!, { s14 - s15 } + vldmia.f32 BO!, { s14 - s15 } fmacs s27 , s3, s8 fmacs s20 , s0, s10 @@ -238,19 +238,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ BO , #B_PRE ] fmacs s24 , s5, s13 fmacs s17 , s4, s13 - fldmias AO!, { s0 - s1 } + vldmia.f32 AO!, { s0 - s1 } fmacs s25 , s5, s12 fmacs s18 , s6, s12 fmacs s26 , s7, s13 - fldmias BO!, { s8 - s9 } + vldmia.f32 BO!, { s8 - s9 } fmacs s19 , s6, s13 fmacs s27 , s7, s12 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmacs s20 , s4, s14 fmacs s28 , s5, s15 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmacs s21 , s4, s15 fmacs s29 , s5, s14 @@ -288,16 +288,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL2x2_SUB - fldmias AO!, { s0 - s1 } - fldmias BO!, { s8 - s9 } + vldmia.f32 AO!, { s0 - s1 } + vldmia.f32 BO!, { s8 - s9 } fmacs s16 , s0, s8 fmacs s24 , s1, s9 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmacs s17 , s0, s9 fmacs s25 , s1, s8 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmacs s18 , s2, s8 fmacs s26 , s3, s9 fmacs s19 , s2, s9 @@ -354,8 +354,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s10, s1 , s23 FMAC_I2 s11, s1 , s22 - fstmias CO1, { s4 - s7 } - fstmias CO2, { s8 - s11 } + vstmia.f32 CO1, { s4 - s7 } + vstmia.f32 CO2, { s8 - s11 } add CO1, CO1, #16 @@ -532,8 +532,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s8 , s1 , s21 FMAC_I2 s9 , s1 , s20 - fstmias CO1, { s4 - s5 } - fstmias CO2, { s8 - s9 } + vstmia.f32 CO1, { s4 - s5 } + vstmia.f32 CO2, { s8 - s9 } add CO1, CO1, #8 @@ -710,7 +710,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s6 , s1 , s19 FMAC_I2 s7 , s1 , s18 - fstmias CO1, { s4 - s7 } + vstmia.f32 CO1, { s4 - s7 } add CO1, CO1, #16 @@ -835,7 +835,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 s4 , s1 , s17 FMAC_I2 s5 , s1 , s16 - fstmias CO1, { s4 - s5 } + vstmia.f32 CO1, { s4 - s5 } add CO1, CO1, #8 diff --git a/kernel/arm/dcopy_vfp.S b/kernel/arm/dcopy_vfp.S index da239924a..7ee52af88 100644 --- a/kernel/arm/dcopy_vfp.S +++ b/kernel/arm/dcopy_vfp.S @@ -65,15 +65,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_F4 pld [ X, #X_PRE ] - fldmiad X!, { d0 - d3 } - fstmiad Y!, { d0 - d3 } + vldmia.f64 X!, { d0 - d3 } + vstmia.f64 Y!, { d0 - d3 } .endm .macro COPY_F1 - fldmiad X!, { d0 } - fstmiad Y!, { d0 } + vldmia.f64 X!, { d0 } + vstmia.f64 Y!, { d0 } .endm @@ -83,23 +83,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S4 nop - fldmiad X, { d0 } - fstmiad Y, { d0 } + vldmia.f64 X, { d0 } + vstmia.f64 Y, { d0 } add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d1 } - fstmiad Y, { d1 } + vldmia.f64 X, { d1 } + vstmia.f64 Y, { d1 } add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d0 } - fstmiad Y, { d0 } + vldmia.f64 X, { d0 } + vstmia.f64 Y, { d0 } add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d1 } - fstmiad Y, { d1 } + vldmia.f64 X, { d1 } + vstmia.f64 Y, { d1 } add X, X, INC_X add Y, Y, INC_Y @@ -108,8 +108,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S1 - fldmiad X, { d0 } - fstmiad Y, { d0 } + vldmia.f64 X, { d0 } + vstmia.f64 Y, { d0 } add X, X, INC_X add Y, Y, INC_Y diff --git a/kernel/arm/ddot_vfp.S b/kernel/arm/ddot_vfp.S index fb294d8b4..4dff5a3e1 100644 --- a/kernel/arm/ddot_vfp.S +++ b/kernel/arm/ddot_vfp.S @@ -67,26 +67,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmiad X!, { d8 } + vldmia.f64 X!, { d8 } pld [ Y, #X_PRE ] - fldmiad Y!, { d4 } - fldmiad Y!, { d5 } + vldmia.f64 Y!, { d4 } + vldmia.f64 Y!, { d5 } fmacd d0 , d4, d8 - fldmiad X!, { d9 } - fldmiad Y!, { d6 } + vldmia.f64 X!, { d9 } + vldmia.f64 Y!, { d6 } fmacd d1 , d5, d9 - fldmiad X!, { d10 } - fldmiad X!, { d11 } + vldmia.f64 X!, { d10 } + vldmia.f64 X!, { d11 } fmacd d0 , d6, d10 - fldmiad Y!, { d7 } + vldmia.f64 Y!, { d7 } fmacd d1 , d7, d11 .endm .macro KERNEL_F1 - fldmiad X!, { d4 } - fldmiad Y!, { d8 } + vldmia.f64 X!, { d4 } + vldmia.f64 Y!, { d8 } fmacd d0 , d4, d8 .endm @@ -97,26 +97,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4 nop - fldmiad X, { d4 } - fldmiad Y, { d8 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d8 } add X, X, INC_X add Y, Y, INC_Y fmacd d0 , d4, d8 - fldmiad X, { d5 } - fldmiad Y, { d9 } + vldmia.f64 X, { d5 } + vldmia.f64 Y, { d9 } add X, X, INC_X add Y, Y, INC_Y fmacd d1 , d5, d9 - fldmiad X, { d6 } - fldmiad Y, { d10 } + vldmia.f64 X, { d6 } + vldmia.f64 Y, { d10 } add X, X, INC_X add Y, Y, INC_Y fmacd d0 , d6, d10 - fldmiad X, { d7 } - fldmiad Y, { d11 } + vldmia.f64 X, { d7 } + vldmia.f64 Y, { d11 } add X, X, INC_X add Y, Y, INC_Y fmacd d1 , d7, d11 @@ -126,8 +126,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmiad X, { d4 } - fldmiad Y, { d8 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d8 } add X, X, INC_X fmacd d0 , d4, d8 add Y, Y, INC_Y @@ -164,11 +164,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp N, #0 ble ddot_kernel_L999 - cmp INC_X, #0 - beq ddot_kernel_L999 +# cmp INC_X, #0 +# beq ddot_kernel_L999 - cmp INC_Y, #0 - beq ddot_kernel_L999 +# cmp INC_Y, #0 +# beq ddot_kernel_L999 cmp INC_X, #1 bne ddot_kernel_S_BEGIN diff --git a/kernel/arm/dgemm_kernel_4x4_vfpv3.S b/kernel/arm/dgemm_kernel_4x4_vfpv3.S index 1744b54d8..d852c2dad 100644 --- a/kernel/arm/dgemm_kernel_4x4_vfpv3.S +++ b/kernel/arm/dgemm_kernel_4x4_vfpv3.S @@ -331,7 +331,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add r4 , CO2, r3 pld [ CO2 , #C_PRE ] - fldmiad CO1, { d8 - d11 } + vldmia.f64 CO1, { d8 - d11 } pld [ r4 , #C_PRE ] fmacd d8 , d0 , d16 @@ -352,7 +352,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fmacd d15, d0 , d23 fstd d11, [CO1, #24 ] - fldmiad r4, { d8 - d11 } + vldmia.f64 r4, { d8 - d11 } fmacd d8 , d0 , d24 fstd d12, [CO2] @@ -367,7 +367,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ CO2 , #C_PRE ] - fldmiad CO2, { d12 - d15 } + vldmia.f64 CO2, { d12 - d15 } fstd d8 , [r4 ] fmacd d12, d0 , d28 @@ -378,7 +378,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fstd d11, [r4 , #24 ] fmacd d15, d0 , d31 - fstmiad CO2, { d12 - d15 } + vstmia.f64 CO2, { d12 - d15 } add CO1, CO1, #32 diff --git a/kernel/arm/dgemm_ncopy_2_vfp.S b/kernel/arm/dgemm_ncopy_2_vfp.S index 6266c61d2..9642b6478 100644 --- a/kernel/arm/dgemm_ncopy_2_vfp.S +++ b/kernel/arm/dgemm_ncopy_2_vfp.S @@ -73,7 +73,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d3 , [ AO2, #8 ] add AO1, AO1, #16 - fstmiad BO!, { d0 - d3 } + vstmia.f64 BO!, { d0 - d3 } add AO2, AO2, #16 .endm @@ -85,7 +85,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d1 , [ AO2, #0 ] add AO1, AO1, #8 - fstmiad BO!, { d0 - d1 } + vstmia.f64 BO!, { d0 - d1 } add AO2, AO2, #8 .endm @@ -95,7 +95,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0 , [ AO1, #0 ] fldd d1 , [ AO1, #8 ] - fstmiad BO!, { d0 - d1 } + vstmia.f64 BO!, { d0 - d1 } add AO1, AO1, #16 .endm @@ -105,7 +105,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0 , [ AO1, #0 ] - fstmiad BO!, { d0 } + vstmia.f64 BO!, { d0 } add AO1, AO1, #8 .endm diff --git a/kernel/arm/dgemm_ncopy_4_vfp.S b/kernel/arm/dgemm_ncopy_4_vfp.S index ffc19a9cc..5760cbd8a 100644 --- a/kernel/arm/dgemm_ncopy_4_vfp.S +++ b/kernel/arm/dgemm_ncopy_4_vfp.S @@ -105,10 +105,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d11, [ AO4, #16 ] fldd d15, [ AO4, #24 ] - fstmiad BO!, { d0 - d3 } + vstmia.f64 BO!, { d0 - d3 } add AO4, AO4, #32 - fstmiad BO!, { d4 - d7 } - fstmiad BO!, { d8 - d15 } + vstmia.f64 BO!, { d4 - d7 } + vstmia.f64 BO!, { d8 - d15 } .endm @@ -122,7 +122,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d3 , [ AO4, #0 ] add AO3, AO3, #8 - fstmiad BO!, { d0 - d3 } + vstmia.f64 BO!, { d0 - d3 } add AO4, AO4, #8 .endm @@ -140,7 +140,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d5 , [ AO2, #16 ] fldd d7 , [ AO2, #24 ] - fstmiad BO!, { d0 - d7 } + vstmia.f64 BO!, { d0 - d7 } add AO2, AO2, #32 .endm @@ -152,7 +152,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d1 , [ AO2, #0 ] add AO1, AO1, #8 - fstmiad BO!, { d0 - d1 } + vstmia.f64 BO!, { d0 - d1 } add AO2, AO2, #8 .endm @@ -164,7 +164,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d2 , [ AO1, #16 ] fldd d3 , [ AO1, #24 ] - fstmiad BO!, { d0 - d3 } + vstmia.f64 BO!, { d0 - d3 } add AO1, AO1, #32 .endm @@ -174,7 +174,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0 , [ AO1, #0 ] - fstmiad BO!, { d0 } + vstmia.f64 BO!, { d0 } add AO1, AO1, #8 .endm diff --git a/kernel/arm/dgemm_tcopy_4_vfp.S b/kernel/arm/dgemm_tcopy_4_vfp.S index 937f43957..8335de27c 100644 --- a/kernel/arm/dgemm_tcopy_4_vfp.S +++ b/kernel/arm/dgemm_tcopy_4_vfp.S @@ -76,21 +76,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY4x4 pld [ AO1, #A_PRE ] - fldmiad AO1, { d0 - d3 } + vldmia.f64 AO1, { d0 - d3 } add r3, AO1, LDA pld [ r3, #A_PRE ] - fldmiad r3, { d4 - d7 } + vldmia.f64 r3, { d4 - d7 } add r3, r3, LDA pld [ r3, #A_PRE ] - fldmiad r3, { d8 - d11 } + vldmia.f64 r3, { d8 - d11 } add r3, r3, LDA pld [ r3, #A_PRE ] - fldmiad r3, { d12 - d15 } + vldmia.f64 r3, { d12 - d15 } - fstmiad BO1, { d0 - d15 } + vstmia.f64 BO1, { d0 - d15 } add AO1, AO1, #32 add BO1, BO1, M4 @@ -98,18 +98,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY2x4 - fldmiad AO1, { d0 - d1 } + vldmia.f64 AO1, { d0 - d1 } add r3, AO1, LDA - fldmiad r3, { d2 - d3 } + vldmia.f64 r3, { d2 - d3 } add r3, r3, LDA - fldmiad r3, { d4 - d5 } + vldmia.f64 r3, { d4 - d5 } add r3, r3, LDA - fldmiad r3, { d6 - d7 } + vldmia.f64 r3, { d6 - d7 } - fstmiad BO2, { d0 - d7 } + vstmia.f64 BO2, { d0 - d7 } add AO1, AO1, #16 add BO2, BO2, #64 @@ -117,18 +117,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x4 - fldmiad AO1, { d0 } + vldmia.f64 AO1, { d0 } add r3, AO1, LDA - fldmiad r3, { d1 } + vldmia.f64 r3, { d1 } add r3, r3, LDA - fldmiad r3, { d2 } + vldmia.f64 r3, { d2 } add r3, r3, LDA - fldmiad r3, { d3 } + vldmia.f64 r3, { d3 } - fstmiad BO3, { d0 - d3 } + vstmia.f64 BO3, { d0 - d3 } add AO1, AO1, #8 add BO3, BO3, #32 @@ -139,13 +139,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY4x2 pld [ AO1, #A_PRE ] - fldmiad AO1, { d0 - d3 } + vldmia.f64 AO1, { d0 - d3 } add r3, AO1, LDA pld [ r3, #A_PRE ] - fldmiad r3, { d4 - d7 } + vldmia.f64 r3, { d4 - d7 } - fstmiad BO1, { d0 - d7 } + vstmia.f64 BO1, { d0 - d7 } add AO1, AO1, #32 add BO1, BO1, M4 @@ -153,12 +153,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY2x2 - fldmiad AO1, { d0 - d1 } + vldmia.f64 AO1, { d0 - d1 } add r3, AO1, LDA - fldmiad r3, { d2 - d3 } + vldmia.f64 r3, { d2 - d3 } - fstmiad BO2, { d0 - d3 } + vstmia.f64 BO2, { d0 - d3 } add AO1, AO1, #16 add BO2, BO2, #32 @@ -166,12 +166,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x2 - fldmiad AO1, { d0 } + vldmia.f64 AO1, { d0 } add r3, AO1, LDA - fldmiad r3, { d1 } + vldmia.f64 r3, { d1 } - fstmiad BO3, { d0 - d1 } + vstmia.f64 BO3, { d0 - d1 } add AO1, AO1, #8 add BO3, BO3, #16 @@ -182,9 +182,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY4x1 pld [ AO1, #A_PRE ] - fldmiad AO1, { d0 - d3 } + vldmia.f64 AO1, { d0 - d3 } - fstmiad BO1, { d0 - d3 } + vstmia.f64 BO1, { d0 - d3 } add AO1, AO1, #32 add BO1, BO1, M4 @@ -192,9 +192,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY2x1 - fldmiad AO1, { d0 - d1 } + vldmia.f64 AO1, { d0 - d1 } - fstmiad BO2, { d0 - d1 } + vstmia.f64 BO2, { d0 - d1 } add AO1, AO1, #16 add BO2, BO2, #16 @@ -202,9 +202,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x1 - fldmiad AO1, { d0 } + vldmia.f64 AO1, { d0 } - fstmiad BO3, { d0 } + vstmia.f64 BO3, { d0 } add AO1, AO1, #8 add BO3, BO3, #8 diff --git a/kernel/arm/dtrmm_kernel_4x4_vfpv3.S b/kernel/arm/dtrmm_kernel_4x4_vfpv3.S index c0c6a1677..e73936cdd 100644 --- a/kernel/arm/dtrmm_kernel_4x4_vfpv3.S +++ b/kernel/arm/dtrmm_kernel_4x4_vfpv3.S @@ -128,10 +128,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d8 , [ BO ] pld [ AO , #A_PRE ] - fldmiad AO!, { d0 - d1} + vldmia.f64 AO!, { d0 - d1} fmuld d16 , d0, d8 - fldmiad AO!, { d2 - d3} + vldmia.f64 AO!, { d2 - d3} fmuld d17 , d1, d8 fldd d9 , [ BO, #8 ] fmuld d18 , d2, d8 @@ -148,10 +148,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fmuld d23 , d3, d9 fmuld d24 , d0, d10 - fldmiad AO!, { d4 - d5 } + vldmia.f64 AO!, { d4 - d5 } fmuld d25 , d1, d10 fmuld d26 , d2, d10 - fldmiad AO!, { d6 - d7 } + vldmia.f64 AO!, { d6 - d7 } fmuld d27 , d3, d10 fldd d13, [ BO, #8 ] @@ -173,10 +173,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d8 , [ BO ] pld [ AO , #A_PRE ] - fldmiad AO!, { d0 - d1} + vldmia.f64 AO!, { d0 - d1} fmacd d16 , d0, d8 - fldmiad AO!, { d2 - d3} + vldmia.f64 AO!, { d2 - d3} fmacd d17 , d1, d8 fldd d9 , [ BO, #8 ] fmacd d18 , d2, d8 @@ -193,10 +193,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fmacd d23 , d3, d9 fmacd d24 , d0, d10 - fldmiad AO!, { d4 - d5 } + vldmia.f64 AO!, { d4 - d5 } fmacd d25 , d1, d10 fmacd d26 , d2, d10 - fldmiad AO!, { d6 - d7 } + vldmia.f64 AO!, { d6 - d7 } fmacd d27 , d3, d10 fldd d13, [ BO, #8 ] @@ -225,11 +225,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d8 , [ BO ] fmacd d21 , d5, d13 fmacd d22 , d6, d13 - fldmiad AO!, { d0 - d1 } + vldmia.f64 AO!, { d0 - d1 } fmacd d23 , d7, d13 fmacd d24 , d4, d14 - fldmiad AO!, { d2 - d3 } + vldmia.f64 AO!, { d2 - d3 } fmacd d25 , d5, d14 fldd d9 , [ BO, #8 ] fmacd d26 , d6, d14 @@ -257,10 +257,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fmacd d19 , d3, d8 fmacd d20 , d0, d9 - fldmiad AO!, { d4 - d5 } + vldmia.f64 AO!, { d4 - d5 } fmacd d21 , d1, d9 fmacd d22 , d2, d9 - fldmiad AO!, { d6 - d7 } + vldmia.f64 AO!, { d6 - d7 } fmacd d23 , d3, d9 fmacd d24 , d0, d10 @@ -390,7 +390,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fstd d11, [r4 , #24 ] fmuld d15, d0 , d31 - fstmiad CO2, { d12 - d15 } + vstmia.f64 CO2, { d12 - d15 } add CO1, CO1, #32 diff --git a/kernel/arm/gemv_n_vfp.S b/kernel/arm/gemv_n_vfp.S index 7c154d741..753ac27c6 100644 --- a/kernel/arm/gemv_n_vfp.S +++ b/kernel/arm/gemv_n_vfp.S @@ -139,8 +139,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F8X1 pld [ AO2 , #A_PRE ] - fldmiad XO! , { d2 } - fldmiad AO1 , { d4 - d7 } + vldmia.f64 XO! , { d2 } + vldmia.f64 AO1 , { d4 - d7 } vmla.f64 d8 , d2 , d4 pld [ AO2 , #4*SIZE ] @@ -150,7 +150,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmla.f64 d11 , d2 , d7 - fldmiad r3 , { d4 - d7 } + vldmia.f64 r3 , { d4 - d7 } vmla.f64 d12 , d2 , d4 vmla.f64 d13 , d2 , d5 @@ -164,23 +164,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F8 - fldmiad YO, { d4 - d7 } + vldmia.f64 YO, { d4 - d7 } vmla.f64 d4 , d0, d8 vmla.f64 d5 , d0, d9 vmla.f64 d6 , d0, d10 vmla.f64 d7 , d0, d11 - fstmiad YO!, { d4 - d7 } + vstmia.f64 YO!, { d4 - d7 } - fldmiad YO, { d4 - d7 } + vldmia.f64 YO, { d4 - d7 } vmla.f64 d4 , d0, d12 vmla.f64 d5 , d0, d13 vmla.f64 d6 , d0, d14 vmla.f64 d7 , d0, d15 - fstmiad YO!, { d4 - d7 } + vstmia.f64 YO!, { d4 - d7 } .endm @@ -195,8 +195,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmiad XO! , { d2 } - fldmiad AO1 , { d8 } + vldmia.f64 XO! , { d2 } + vldmia.f64 AO1 , { d8 } vmla.f64 d12 , d2 , d8 add AO1, AO1, LDA @@ -204,9 +204,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F1 - fldmiad YO, { d4 } + vldmia.f64 YO, { d4 } vmla.f64 d4, d0, d12 - fstmiad YO!, { d4 } + vstmia.f64 YO!, { d4 } .endm @@ -234,8 +234,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4X1 pld [ AO2 , #A_PRE ] - fldmiad XO , { d2 } - fldmiad AO1 , { d8 - d11 } + vldmia.f64 XO , { d2 } + vldmia.f64 AO1 , { d8 - d11 } vmla.f64 d12 , d2 , d8 add AO1, AO1, LDA @@ -249,24 +249,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S4 - fldmiad YO, { d4 } + vldmia.f64 YO, { d4 } vmla.f64 d4 , d0, d12 - fstmiad YO, { d4 } + vstmia.f64 YO, { d4 } add YO, YO, INC_Y - fldmiad YO, { d5 } + vldmia.f64 YO, { d5 } vmla.f64 d5 , d0, d13 - fstmiad YO, { d5 } + vstmia.f64 YO, { d5 } add YO, YO, INC_Y - fldmiad YO, { d4 } + vldmia.f64 YO, { d4 } vmla.f64 d4 , d0, d14 - fstmiad YO, { d4 } + vstmia.f64 YO, { d4 } add YO, YO, INC_Y - fldmiad YO, { d5 } + vldmia.f64 YO, { d5 } vmla.f64 d5 , d0, d15 - fstmiad YO, { d5 } + vstmia.f64 YO, { d5 } add YO, YO, INC_Y .endm @@ -282,8 +282,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmiad XO , { d2 } - fldmiad AO1 , { d8 } + vldmia.f64 XO , { d2 } + vldmia.f64 AO1 , { d8 } vmla.f64 d12 , d2 , d8 add AO1, AO1, LDA add XO, XO , INC_X @@ -292,9 +292,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmiad YO, { d4 } + vldmia.f64 YO, { d4 } vmla.f64 d4, d0, d12 - fstmiad YO , { d4 } + vstmia.f64 YO , { d4 } add YO, YO, INC_Y .endm @@ -338,8 +338,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F8X1 pld [ AO2, #A_PRE ] - fldmias XO! , { s2 } - fldmias AO1 , { s4 - s7 } + vldmia.f32 XO! , { s2 } + vldmia.f32 AO1 , { s4 - s7 } vmla.f32 s8 , s2 , s4 vmla.f32 s9 , s2 , s5 @@ -348,7 +348,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add r3, AO1, #4*SIZE - fldmias r3 , { s4 - s7 } + vldmia.f32 r3 , { s4 - s7 } vmla.f32 s12 , s2 , s4 vmla.f32 s13 , s2 , s5 @@ -362,24 +362,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F8 - fldmias YO, { s4 - s7 } + vldmia.f32 YO, { s4 - s7 } vmla.f32 s4 , s0, s8 vmla.f32 s5 , s0, s9 vmla.f32 s6 , s0, s10 vmla.f32 s7 , s0, s11 - fstmias YO!, { s4 - s7 } + vstmia.f32 YO!, { s4 - s7 } - fldmias YO, { s4 - s7 } + vldmia.f32 YO, { s4 - s7 } vmla.f32 s4 , s0, s12 vmla.f32 s5 , s0, s13 vmla.f32 s6 , s0, s14 vmla.f32 s7 , s0, s15 - fstmias YO!, { s4 - s7 } + vstmia.f32 YO!, { s4 - s7 } .endm @@ -394,8 +394,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmias XO! , { s2 } - fldmias AO1 , { s8 } + vldmia.f32 XO! , { s2 } + vldmia.f32 AO1 , { s8 } vmla.f32 s12 , s2 , s8 add AO1, AO1, LDA @@ -403,9 +403,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F1 - fldmias YO, { s4 } + vldmia.f32 YO, { s4 } vmla.f32 s4, s0, s12 - fstmias YO!, { s4 } + vstmia.f32 YO!, { s4 } .endm @@ -434,8 +434,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4X1 - fldmias XO , { s2 } - fldmias AO1 , { s8 - s11 } + vldmia.f32 XO , { s2 } + vldmia.f32 AO1 , { s8 - s11 } vmla.f32 s12 , s2 , s8 vmla.f32 s13 , s2 , s9 @@ -449,24 +449,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S4 - fldmias YO, { s4 } + vldmia.f32 YO, { s4 } vmla.f32 s4 , s0, s12 - fstmias YO, { s4 } + vstmia.f32 YO, { s4 } add YO, YO, INC_Y - fldmias YO, { s5 } + vldmia.f32 YO, { s5 } vmla.f32 s5 , s0, s13 - fstmias YO, { s5 } + vstmia.f32 YO, { s5 } add YO, YO, INC_Y - fldmias YO, { s4 } + vldmia.f32 YO, { s4 } vmla.f32 s4 , s0, s14 - fstmias YO, { s4 } + vstmia.f32 YO, { s4 } add YO, YO, INC_Y - fldmias YO, { s5 } + vldmia.f32 YO, { s5 } vmla.f32 s5 , s0, s15 - fstmias YO, { s5 } + vstmia.f32 YO, { s5 } add YO, YO, INC_Y .endm @@ -482,8 +482,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmias XO , { s2 } - fldmias AO1 , { s8 } + vldmia.f32 XO , { s2 } + vldmia.f32 AO1 , { s8 } vmla.f32 s12 , s2 , s8 add AO1, AO1, LDA add XO, XO , INC_X @@ -492,9 +492,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmias YO, { s4 } + vldmia.f32 YO, { s4 } vmla.f32 s4, s0, s12 - fstmias YO , { s4 } + vstmia.f32 YO , { s4 } add YO, YO, INC_Y .endm diff --git a/kernel/arm/gemv_n_vfpv3.S b/kernel/arm/gemv_n_vfpv3.S index 54f958b7b..e80dc1458 100644 --- a/kernel/arm/gemv_n_vfpv3.S +++ b/kernel/arm/gemv_n_vfpv3.S @@ -138,8 +138,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F8X1 - fldmiad XO! , { d4 } - fldmiad AO1 , { d8 - d15 } + vldmia.f64 XO! , { d4 } + vldmia.f64 AO1 , { d8 - d15 } vmla.f64 d24 , d4 , d8 pld [ AO2 , #A_PRE ] @@ -158,7 +158,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F8 - fldmiad YO, { d16 - d23 } + vldmia.f64 YO, { d16 - d23 } vmla.f64 d16, d0, d24 vmla.f64 d17, d0, d25 @@ -169,7 +169,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmla.f64 d22, d0, d30 vmla.f64 d23, d0, d31 - fstmiad YO!, { d16 - d23 } + vstmia.f64 YO!, { d16 - d23 } .endm @@ -184,8 +184,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmiad XO! , { d4 } - fldmiad AO1 , { d8 } + vldmia.f64 XO! , { d4 } + vldmia.f64 AO1 , { d8 } vmla.f64 d24 , d4 , d8 add AO1, AO1, LDA @@ -193,9 +193,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F1 - fldmiad YO, { d16 } + vldmia.f64 YO, { d16 } vmla.f64 d16, d0, d24 - fstmiad YO!, { d16 } + vstmia.f64 YO!, { d16 } .endm @@ -234,8 +234,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ AO2 , #A_PRE ] pld [ AO2 , #A_PRE+32 ] - fldmiad XO , { d4 } - fldmiad AO1 , { d8 - d15 } + vldmia.f64 XO , { d4 } + vldmia.f64 AO1 , { d8 - d15 } vmla.f64 d24 , d4 , d8 vmla.f64 d25 , d4 , d9 @@ -253,44 +253,44 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S8 - fldmiad YO, { d16 } + vldmia.f64 YO, { d16 } vmla.f64 d16, d0, d24 - fstmiad YO, { d16 } + vstmia.f64 YO, { d16 } add YO, YO, INC_Y - fldmiad YO, { d17 } + vldmia.f64 YO, { d17 } vmla.f64 d17, d0, d25 - fstmiad YO, { d17 } + vstmia.f64 YO, { d17 } add YO, YO, INC_Y - fldmiad YO, { d18 } + vldmia.f64 YO, { d18 } vmla.f64 d18, d0, d26 - fstmiad YO, { d18 } + vstmia.f64 YO, { d18 } add YO, YO, INC_Y - fldmiad YO, { d19 } + vldmia.f64 YO, { d19 } vmla.f64 d19, d0, d27 - fstmiad YO, { d19 } + vstmia.f64 YO, { d19 } add YO, YO, INC_Y - fldmiad YO, { d20 } + vldmia.f64 YO, { d20 } vmla.f64 d20, d0, d28 - fstmiad YO, { d20 } + vstmia.f64 YO, { d20 } add YO, YO, INC_Y - fldmiad YO, { d21 } + vldmia.f64 YO, { d21 } vmla.f64 d21, d0, d29 - fstmiad YO, { d21 } + vstmia.f64 YO, { d21 } add YO, YO, INC_Y - fldmiad YO, { d22 } + vldmia.f64 YO, { d22 } vmla.f64 d22, d0, d30 - fstmiad YO, { d22 } + vstmia.f64 YO, { d22 } add YO, YO, INC_Y - fldmiad YO, { d23 } + vldmia.f64 YO, { d23 } vmla.f64 d23, d0, d31 - fstmiad YO, { d23 } + vstmia.f64 YO, { d23 } add YO, YO, INC_Y .endm @@ -306,8 +306,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmiad XO , { d4 } - fldmiad AO1 , { d8 } + vldmia.f64 XO , { d4 } + vldmia.f64 AO1 , { d8 } vmla.f64 d24 , d4 , d8 add AO1, AO1, LDA add XO, XO, INC_X @@ -316,9 +316,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmiad YO, { d16 } + vldmia.f64 YO, { d16 } vmla.f64 d16, d0, d24 - fstmiad YO, { d16 } + vstmia.f64 YO, { d16 } add YO, YO, INC_Y .endm @@ -361,8 +361,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F8X1 pld [ AO2 , #A_PRE ] - fldmias XO! , { s4 } - fldmias AO1 , { s8 - s15 } + vldmia.f32 XO! , { s4 } + vldmia.f32 AO1 , { s8 - s15 } vmla.f32 s24 , s4 , s8 vmla.f32 s25 , s4 , s9 @@ -379,7 +379,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F8 - fldmias YO, { s16 - s23 } + vldmia.f32 YO, { s16 - s23 } vmla.f32 s16, s0, s24 vmla.f32 s17, s0, s25 @@ -390,7 +390,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. vmla.f32 s22, s0, s30 vmla.f32 s23, s0, s31 - fstmias YO!, { s16 - s23 } + vstmia.f32 YO!, { s16 - s23 } .endm @@ -405,8 +405,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmias XO! , { s4 } - fldmias AO1 , { s8 } + vldmia.f32 XO! , { s4 } + vldmia.f32 AO1 , { s8 } vmla.f32 s24 , s4 , s8 add AO1, AO1, LDA @@ -414,9 +414,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F1 - fldmias YO, { s16 } + vldmia.f32 YO, { s16 } vmla.f32 s16, s0, s24 - fstmias YO!, { s16 } + vstmia.f32 YO!, { s16 } .endm @@ -454,8 +454,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S8X1 pld [ AO2 , #A_PRE ] - fldmias XO , { s4 } - fldmias AO1 , { s8 - s15 } + vldmia.f32 XO , { s4 } + vldmia.f32 AO1 , { s8 - s15 } vmla.f32 s24 , s4 , s8 vmla.f32 s25 , s4 , s9 @@ -473,44 +473,44 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S8 - fldmias YO, { s16 } + vldmia.f32 YO, { s16 } vmla.f32 s16, s0, s24 - fstmias YO, { s16 } + vstmia.f32 YO, { s16 } add YO, YO, INC_Y - fldmias YO, { s17 } + vldmia.f32 YO, { s17 } vmla.f32 s17, s0, s25 - fstmias YO, { s17 } + vstmia.f32 YO, { s17 } add YO, YO, INC_Y - fldmias YO, { s18 } + vldmia.f32 YO, { s18 } vmla.f32 s18, s0, s26 - fstmias YO, { s18 } + vstmia.f32 YO, { s18 } add YO, YO, INC_Y - fldmias YO, { s19 } + vldmia.f32 YO, { s19 } vmla.f32 s19, s0, s27 - fstmias YO, { s19 } + vstmia.f32 YO, { s19 } add YO, YO, INC_Y - fldmias YO, { s20 } + vldmia.f32 YO, { s20 } vmla.f32 s20, s0, s28 - fstmias YO, { s20 } + vstmia.f32 YO, { s20 } add YO, YO, INC_Y - fldmias YO, { s21 } + vldmia.f32 YO, { s21 } vmla.f32 s21, s0, s29 - fstmias YO, { s21 } + vstmia.f32 YO, { s21 } add YO, YO, INC_Y - fldmias YO, { s22 } + vldmia.f32 YO, { s22 } vmla.f32 s22, s0, s30 - fstmias YO, { s22 } + vstmia.f32 YO, { s22 } add YO, YO, INC_Y - fldmias YO, { s23 } + vldmia.f32 YO, { s23 } vmla.f32 s23, s0, s31 - fstmias YO, { s23 } + vstmia.f32 YO, { s23 } add YO, YO, INC_Y .endm @@ -526,8 +526,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmias XO , { s4 } - fldmias AO1 , { s8 } + vldmia.f32 XO , { s4 } + vldmia.f32 AO1 , { s8 } vmla.f32 s24 , s4 , s8 add AO1, AO1, LDA add XO, XO, INC_X @@ -536,9 +536,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmias YO, { s16 } + vldmia.f32 YO, { s16 } vmla.f32 s16, s0, s24 - fstmias YO, { s16 } + vstmia.f32 YO, { s16 } add YO, YO, INC_Y .endm diff --git a/kernel/arm/gemv_t_vfp.S b/kernel/arm/gemv_t_vfp.S index 9559d1829..fbe51cc8c 100644 --- a/kernel/arm/gemv_t_vfp.S +++ b/kernel/arm/gemv_t_vfp.S @@ -112,13 +112,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X4 pld [ XO , #X_PRE ] - fldmiad XO! , { d12 - d15 } + vldmia.f64 XO! , { d12 - d15 } pld [ AO1 , #A_PRE ] - fldmiad AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d8 - d9 } pld [ AO2 , #A_PRE ] - fldmiad AO2!, { d4 - d5 } - fldmiad AO1!, { d10 - d11 } - fldmiad AO2!, { d6 - d7 } + vldmia.f64 AO2!, { d4 - d5 } + vldmia.f64 AO1!, { d10 - d11 } + vldmia.f64 AO2!, { d6 - d7 } vmla.f64 d2 , d12 , d8 vmla.f64 d3 , d12 , d4 @@ -133,9 +133,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X1 - fldmiad XO! , { d1 } - fldmiad AO1!, { d8 } - fldmiad AO2!, { d4 } + vldmia.f64 XO! , { d1 } + vldmia.f64 AO1!, { d8 } + vldmia.f64 AO2!, { d4 } vmla.f64 d2 , d1 , d8 vmla.f64 d3 , d1 , d4 @@ -143,10 +143,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F2 - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } vmla.f64 d4, d0, d2 vmla.f64 d5, d0, d3 - fstmiad YO!, { d4 - d5 } + vstmia.f64 YO!, { d4 - d5 } .endm @@ -160,10 +160,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X4 pld [ XO , #X_PRE ] - fldmiad XO! , { d12 - d15 } + vldmia.f64 XO! , { d12 - d15 } pld [ AO1 , #A_PRE ] - fldmiad AO1!, { d8 - d9 } - fldmiad AO1!, { d10 - d11 } + vldmia.f64 AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d10 - d11 } vmla.f64 d2 , d12 , d8 vmla.f64 d2 , d13 , d9 vmla.f64 d2 , d14, d10 @@ -173,17 +173,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmiad XO! , { d1 } - fldmiad AO1!, { d8 } + vldmia.f64 XO! , { d1 } + vldmia.f64 AO1!, { d8 } vmla.f64 d2 , d1 , d8 .endm .macro SAVE_F1 - fldmiad YO, { d4 } + vldmia.f64 YO, { d4 } vmla.f64 d4, d0, d2 - fstmiad YO!, { d4 } + vstmia.f64 YO!, { d4 } .endm @@ -197,23 +197,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X4 - fldmiad XO , { d12 } + vldmia.f64 XO , { d12 } add XO, XO, INC_X pld [ AO1 , #A_PRE ] - fldmiad AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d8 - d9 } pld [ AO2 , #A_PRE ] - fldmiad AO2!, { d4 - d5 } + vldmia.f64 AO2!, { d4 - d5 } - fldmiad XO , { d13 } + vldmia.f64 XO , { d13 } add XO, XO, INC_X - fldmiad AO1!, { d10 - d11 } - fldmiad AO2!, { d6 - d7 } + vldmia.f64 AO1!, { d10 - d11 } + vldmia.f64 AO2!, { d6 - d7 } - fldmiad XO , { d14 } + vldmia.f64 XO , { d14 } add XO, XO, INC_X - fldmiad XO , { d15 } + vldmia.f64 XO , { d15 } add XO, XO, INC_X vmla.f64 d2 , d12 , d8 @@ -229,9 +229,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X1 - fldmiad XO , { d1 } - fldmiad AO1!, { d8 } - fldmiad AO2!, { d4 } + vldmia.f64 XO , { d1 } + vldmia.f64 AO1!, { d8 } + vldmia.f64 AO2!, { d4 } vmla.f64 d2 , d1 , d8 add XO, XO, INC_X vmla.f64 d3 , d1 , d4 @@ -240,14 +240,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S2 - fldmiad YO, { d4 } + vldmia.f64 YO, { d4 } vmla.f64 d4, d0, d2 - fstmiad YO, { d4 } + vstmia.f64 YO, { d4 } add YO, YO, INC_Y - fldmiad YO, { d5 } + vldmia.f64 YO, { d5 } vmla.f64 d5, d0, d3 - fstmiad YO, { d5 } + vstmia.f64 YO, { d5 } add YO, YO, INC_Y .endm @@ -261,20 +261,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X4 - fldmiad XO , { d12 } + vldmia.f64 XO , { d12 } add XO, XO, INC_X pld [ AO1 , #A_PRE ] - fldmiad AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d8 - d9 } - fldmiad XO , { d13 } + vldmia.f64 XO , { d13 } add XO, XO, INC_X - fldmiad AO1!, { d10 - d11 } + vldmia.f64 AO1!, { d10 - d11 } - fldmiad XO , { d14 } + vldmia.f64 XO , { d14 } add XO, XO, INC_X - fldmiad XO , { d15 } + vldmia.f64 XO , { d15 } add XO, XO, INC_X vmla.f64 d2 , d12 , d8 @@ -286,8 +286,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmiad XO , { d1 } - fldmiad AO1!, { d8 } + vldmia.f64 XO , { d1 } + vldmia.f64 AO1!, { d8 } vmla.f64 d2 , d1 , d8 add XO, XO, INC_X @@ -295,9 +295,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmiad YO, { d4 } + vldmia.f64 YO, { d4 } vmla.f64 d4, d0, d2 - fstmiad YO, { d4 } + vstmia.f64 YO, { d4 } add YO, YO, INC_Y .endm @@ -315,11 +315,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X4 - fldmias XO! , { s12 - s15 } - fldmias AO1!, { s8 - s9 } - fldmias AO2!, { s4 - s5 } - fldmias AO1!, { s10 - s11 } - fldmias AO2!, { s6 - s7 } + vldmia.f32 XO! , { s12 - s15 } + vldmia.f32 AO1!, { s8 - s9 } + vldmia.f32 AO2!, { s4 - s5 } + vldmia.f32 AO1!, { s10 - s11 } + vldmia.f32 AO2!, { s6 - s7 } vmla.f32 s2 , s12 , s8 vmla.f32 s3 , s12 , s4 @@ -334,9 +334,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X1 - fldmias XO! , { s1 } - fldmias AO1!, { s8 } - fldmias AO2!, { s4 } + vldmia.f32 XO! , { s1 } + vldmia.f32 AO1!, { s8 } + vldmia.f32 AO2!, { s4 } vmla.f32 s2 , s1 , s8 vmla.f32 s3 , s1 , s4 @@ -344,10 +344,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F2 - fldmias YO, { s4 - s5 } + vldmia.f32 YO, { s4 - s5 } vmla.f32 s4, s0, s2 vmla.f32 s5, s0, s3 - fstmias YO!, { s4 - s5 } + vstmia.f32 YO!, { s4 - s5 } .endm @@ -359,9 +359,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X4 - fldmias XO! , { s12 - s15 } - fldmias AO1!, { s8 - s9 } - fldmias AO1!, { s10 - s11 } + vldmia.f32 XO! , { s12 - s15 } + vldmia.f32 AO1!, { s8 - s9 } + vldmia.f32 AO1!, { s10 - s11 } vmla.f32 s2 , s12 , s8 vmla.f32 s2 , s13 , s9 vmla.f32 s2 , s14, s10 @@ -371,17 +371,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmias XO! , { s1 } - fldmias AO1!, { s8 } + vldmia.f32 XO! , { s1 } + vldmia.f32 AO1!, { s8 } vmla.f32 s2 , s1 , s8 .endm .macro SAVE_F1 - fldmias YO, { s4 } + vldmia.f32 YO, { s4 } vmla.f32 s4, s0, s2 - fstmias YO!, { s4 } + vstmia.f32 YO!, { s4 } .endm @@ -395,21 +395,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X4 - fldmias XO , { s12 } + vldmia.f32 XO , { s12 } add XO, XO, INC_X - fldmias AO1!, { s8 - s9 } - fldmias AO2!, { s4 - s5 } + vldmia.f32 AO1!, { s8 - s9 } + vldmia.f32 AO2!, { s4 - s5 } - fldmias XO , { s13 } + vldmia.f32 XO , { s13 } add XO, XO, INC_X - fldmias AO1!, { s10 - s11 } - fldmias AO2!, { s6 - s7 } + vldmia.f32 AO1!, { s10 - s11 } + vldmia.f32 AO2!, { s6 - s7 } - fldmias XO , { s14 } + vldmia.f32 XO , { s14 } add XO, XO, INC_X - fldmias XO , { s15 } + vldmia.f32 XO , { s15 } add XO, XO, INC_X vmla.f32 s2 , s12 , s8 @@ -425,9 +425,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X1 - fldmias XO , { s1 } - fldmias AO1!, { s8 } - fldmias AO2!, { s4 } + vldmia.f32 XO , { s1 } + vldmia.f32 AO1!, { s8 } + vldmia.f32 AO2!, { s4 } vmla.f32 s2 , s1 , s8 add XO, XO, INC_X vmla.f32 s3 , s1 , s4 @@ -436,14 +436,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S2 - fldmias YO, { s4 } + vldmia.f32 YO, { s4 } vmla.f32 s4, s0, s2 - fstmias YO, { s4 } + vstmia.f32 YO, { s4 } add YO, YO, INC_Y - fldmias YO, { s5 } + vldmia.f32 YO, { s5 } vmla.f32 s5, s0, s3 - fstmias YO, { s5 } + vstmia.f32 YO, { s5 } add YO, YO, INC_Y .endm @@ -456,20 +456,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X4 - fldmias XO , { s12 } + vldmia.f32 XO , { s12 } add XO, XO, INC_X pld [ AO1 , #A_PRE ] - fldmias AO1!, { s8 - s9 } + vldmia.f32 AO1!, { s8 - s9 } - fldmias XO , { s13 } + vldmia.f32 XO , { s13 } add XO, XO, INC_X - fldmias AO1!, { s10 - s11 } + vldmia.f32 AO1!, { s10 - s11 } - fldmias XO , { s14 } + vldmia.f32 XO , { s14 } add XO, XO, INC_X - fldmias XO , { s15 } + vldmia.f32 XO , { s15 } add XO, XO, INC_X vmla.f32 s2 , s12 , s8 @@ -481,8 +481,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmias XO , { s1 } - fldmias AO1!, { s8 } + vldmia.f32 XO , { s1 } + vldmia.f32 AO1!, { s8 } vmla.f32 s2 , s1 , s8 add XO, XO, INC_X @@ -490,9 +490,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmias YO, { s4 } + vldmia.f32 YO, { s4 } vmla.f32 s4, s0, s2 - fstmias YO, { s4 } + vstmia.f32 YO, { s4 } add YO, YO, INC_Y .endm diff --git a/kernel/arm/gemv_t_vfpv3.S b/kernel/arm/gemv_t_vfpv3.S index b1d3dadf1..a88d70016 100644 --- a/kernel/arm/gemv_t_vfpv3.S +++ b/kernel/arm/gemv_t_vfpv3.S @@ -108,17 +108,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X4 pld [ XO , #X_PRE ] - fldmiad XO! , { d28 - d31 } + vldmia.f64 XO! , { d28 - d31 } pld [ AO1 , #A_PRE ] - fldmiad AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d8 - d9 } pld [ AO2 , #A_PRE ] - fldmiad AO2!, { d16 - d17 } + vldmia.f64 AO2!, { d16 - d17 } vmla.f64 d4 , d28 , d8 vmla.f64 d5 , d28 , d16 - fldmiad AO1!, { d10 - d11 } + vldmia.f64 AO1!, { d10 - d11 } vmla.f64 d4 , d29 , d9 vmla.f64 d5 , d29 , d17 - fldmiad AO2!, { d18 - d19 } + vldmia.f64 AO2!, { d18 - d19 } vmla.f64 d4 , d30, d10 vmla.f64 d5 , d30, d18 vmla.f64 d4 , d31, d11 @@ -129,9 +129,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X1 - fldmiad XO! , { d2 } - fldmiad AO1!, { d8 } - fldmiad AO2!, { d16 } + vldmia.f64 XO! , { d2 } + vldmia.f64 AO1!, { d8 } + vldmia.f64 AO2!, { d16 } vmla.f64 d4 , d2 , d8 vmla.f64 d5 , d2 , d16 @@ -139,10 +139,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F2 - fldmiad YO, { d24 - d25 } + vldmia.f64 YO, { d24 - d25 } vmla.f64 d24, d0, d4 vmla.f64 d25, d0, d5 - fstmiad YO!, { d24 - d25 } + vstmia.f64 YO!, { d24 - d25 } .endm @@ -156,23 +156,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X4 pld [ AO1 , #A_PRE ] - fldmiad XO , { d28 } + vldmia.f64 XO , { d28 } add XO, XO, INC_X - fldmiad AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d8 - d9 } pld [ AO2 , #A_PRE ] - fldmiad AO2!, { d16 - d17 } + vldmia.f64 AO2!, { d16 - d17 } vmla.f64 d4 , d28 , d8 - fldmiad XO , { d29 } + vldmia.f64 XO , { d29 } add XO, XO, INC_X vmla.f64 d5 , d28 , d16 - fldmiad AO1!, { d10 - d11 } + vldmia.f64 AO1!, { d10 - d11 } vmla.f64 d4 , d29 , d9 - fldmiad XO , { d30 } + vldmia.f64 XO , { d30 } add XO, XO, INC_X vmla.f64 d5 , d29 , d17 - fldmiad AO2!, { d18 - d19 } + vldmia.f64 AO2!, { d18 - d19 } vmla.f64 d4 , d30, d10 - fldmiad XO , { d31 } + vldmia.f64 XO , { d31 } add XO, XO, INC_X vmla.f64 d5 , d30, d18 vmla.f64 d4 , d31, d11 @@ -183,10 +183,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X1 - fldmiad XO , { d2 } - fldmiad AO1!, { d8 } + vldmia.f64 XO , { d2 } + vldmia.f64 AO1!, { d8 } add XO, XO, INC_X - fldmiad AO2!, { d16 } + vldmia.f64 AO2!, { d16 } vmla.f64 d4 , d2 , d8 vmla.f64 d5 , d2 , d16 @@ -194,14 +194,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S2 - fldmiad YO, { d24 } + vldmia.f64 YO, { d24 } vmla.f64 d24, d0, d4 - fstmiad YO, { d24 } + vstmia.f64 YO, { d24 } add YO, YO, INC_Y - fldmiad YO, { d24 } + vldmia.f64 YO, { d24 } vmla.f64 d24, d0, d5 - fstmiad YO, { d24 } + vstmia.f64 YO, { d24 } add YO, YO, INC_Y .endm @@ -215,11 +215,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X4 pld [ XO , #X_PRE ] - fldmiad XO! , { d28 - d31 } + vldmia.f64 XO! , { d28 - d31 } pld [ AO1 , #A_PRE ] - fldmiad AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d8 - d9 } vmla.f64 d4 , d28 , d8 - fldmiad AO1!, { d10 - d11 } + vldmia.f64 AO1!, { d10 - d11 } vmla.f64 d4 , d29 , d9 vmla.f64 d4 , d30, d10 vmla.f64 d4 , d31, d11 @@ -229,17 +229,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmiad XO! , { d2 } - fldmiad AO1!, { d8 } + vldmia.f64 XO! , { d2 } + vldmia.f64 AO1!, { d8 } vmla.f64 d4 , d2 , d8 .endm .macro SAVE_F1 - fldmiad YO, { d24 } + vldmia.f64 YO, { d24 } vmla.f64 d24, d0, d4 - fstmiad YO!, { d24 } + vstmia.f64 YO!, { d24 } .endm @@ -252,18 +252,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X4 pld [ AO1 , #A_PRE ] - fldmiad XO , { d28 } + vldmia.f64 XO , { d28 } add XO, XO, INC_X - fldmiad AO1!, { d8 - d9 } + vldmia.f64 AO1!, { d8 - d9 } vmla.f64 d4 , d28 , d8 - fldmiad XO , { d29 } + vldmia.f64 XO , { d29 } add XO, XO, INC_X - fldmiad AO1!, { d10 - d11 } + vldmia.f64 AO1!, { d10 - d11 } vmla.f64 d4 , d29 , d9 - fldmiad XO , { d30 } + vldmia.f64 XO , { d30 } add XO, XO, INC_X vmla.f64 d4 , d30, d10 - fldmiad XO , { d31 } + vldmia.f64 XO , { d31 } add XO, XO, INC_X vmla.f64 d4 , d31, d11 @@ -272,8 +272,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmiad XO , { d2 } - fldmiad AO1!, { d8 } + vldmia.f64 XO , { d2 } + vldmia.f64 AO1!, { d8 } add XO, XO, INC_X vmla.f64 d4 , d2 , d8 @@ -281,9 +281,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmiad YO, { d24 } + vldmia.f64 YO, { d24 } vmla.f64 d24, d0, d4 - fstmiad YO, { d24 } + vstmia.f64 YO, { d24 } add YO, YO, INC_Y .endm @@ -300,15 +300,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X4 - fldmias XO! , { s28 - s31 } - fldmias AO1!, { s8 - s9 } - fldmias AO2!, { s16 - s17 } + vldmia.f32 XO! , { s28 - s31 } + vldmia.f32 AO1!, { s8 - s9 } + vldmia.f32 AO2!, { s16 - s17 } vmla.f32 s4 , s28 , s8 vmla.f32 s5 , s28 , s16 - fldmias AO1!, { s10 - s11 } + vldmia.f32 AO1!, { s10 - s11 } vmla.f32 s4 , s29 , s9 vmla.f32 s5 , s29 , s17 - fldmias AO2!, { s18 - s19 } + vldmia.f32 AO2!, { s18 - s19 } vmla.f32 s4 , s30, s10 vmla.f32 s5 , s30, s18 vmla.f32 s4 , s31, s11 @@ -319,9 +319,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X1 - fldmias XO! , { s2 } - fldmias AO1!, { s8 } - fldmias AO2!, { s16 } + vldmia.f32 XO! , { s2 } + vldmia.f32 AO1!, { s8 } + vldmia.f32 AO2!, { s16 } vmla.f32 s4 , s2 , s8 vmla.f32 s5 , s2 , s16 @@ -329,10 +329,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F2 - fldmias YO, { s24 - s25 } + vldmia.f32 YO, { s24 - s25 } vmla.f32 s24, s0, s4 vmla.f32 s25, s0, s5 - fstmias YO!, { s24 - s25 } + vstmia.f32 YO!, { s24 - s25 } .endm @@ -345,22 +345,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X4 - fldmias XO , { s28 } + vldmia.f32 XO , { s28 } add XO, XO, INC_X - fldmias AO1!, { s8 - s9 } - fldmias AO2!, { s16 - s17 } + vldmia.f32 AO1!, { s8 - s9 } + vldmia.f32 AO2!, { s16 - s17 } vmla.f32 s4 , s28 , s8 - fldmias XO , { s29 } + vldmia.f32 XO , { s29 } add XO, XO, INC_X vmla.f32 s5 , s28 , s16 - fldmias AO1!, { s10 - s11 } + vldmia.f32 AO1!, { s10 - s11 } vmla.f32 s4 , s29 , s9 - fldmias XO , { s30 } + vldmia.f32 XO , { s30 } add XO, XO, INC_X vmla.f32 s5 , s29 , s17 - fldmias AO2!, { s18 - s19 } + vldmia.f32 AO2!, { s18 - s19 } vmla.f32 s4 , s30, s10 - fldmias XO , { s31 } + vldmia.f32 XO , { s31 } add XO, XO, INC_X vmla.f32 s5 , s30, s18 vmla.f32 s4 , s31, s11 @@ -371,10 +371,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X1 - fldmias XO , { s2 } - fldmias AO1!, { s8 } + vldmia.f32 XO , { s2 } + vldmia.f32 AO1!, { s8 } add XO, XO, INC_X - fldmias AO2!, { s16 } + vldmia.f32 AO2!, { s16 } vmla.f32 s4 , s2 , s8 vmla.f32 s5 , s2 , s16 @@ -382,14 +382,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S2 - fldmias YO, { s24 } + vldmia.f32 YO, { s24 } vmla.f32 s24, s0, s4 - fstmias YO, { s24 } + vstmia.f32 YO, { s24 } add YO, YO, INC_Y - fldmias YO, { s24 } + vldmia.f32 YO, { s24 } vmla.f32 s24, s0, s5 - fstmias YO, { s24 } + vstmia.f32 YO, { s24 } add YO, YO, INC_Y .endm @@ -402,10 +402,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X4 - fldmias XO! , { s28 - s31 } - fldmias AO1!, { s8 - s9 } + vldmia.f32 XO! , { s28 - s31 } + vldmia.f32 AO1!, { s8 - s9 } vmla.f32 s4 , s28 , s8 - fldmias AO1!, { s10 - s11 } + vldmia.f32 AO1!, { s10 - s11 } vmla.f32 s4 , s29 , s9 vmla.f32 s4 , s30, s10 vmla.f32 s4 , s31, s11 @@ -415,17 +415,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmias XO! , { s2 } - fldmias AO1!, { s8 } + vldmia.f32 XO! , { s2 } + vldmia.f32 AO1!, { s8 } vmla.f32 s4 , s2 , s8 .endm .macro SAVE_F1 - fldmias YO, { s24 } + vldmia.f32 YO, { s24 } vmla.f32 s24, s0, s4 - fstmias YO!, { s24 } + vstmia.f32 YO!, { s24 } .endm @@ -437,18 +437,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X4 - fldmias XO , { s28 } + vldmia.f32 XO , { s28 } add XO, XO, INC_X - fldmias AO1!, { s8 - s9 } + vldmia.f32 AO1!, { s8 - s9 } vmla.f32 s4 , s28 , s8 - fldmias XO , { s29 } + vldmia.f32 XO , { s29 } add XO, XO, INC_X - fldmias AO1!, { s10 - s11 } + vldmia.f32 AO1!, { s10 - s11 } vmla.f32 s4 , s29 , s9 - fldmias XO , { s30 } + vldmia.f32 XO , { s30 } add XO, XO, INC_X vmla.f32 s4 , s30, s10 - fldmias XO , { s31 } + vldmia.f32 XO , { s31 } add XO, XO, INC_X vmla.f32 s4 , s31, s11 @@ -457,8 +457,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmias XO , { s2 } - fldmias AO1!, { s8 } + vldmia.f32 XO , { s2 } + vldmia.f32 AO1!, { s8 } add XO, XO, INC_X vmla.f32 s4 , s2 , s8 @@ -466,9 +466,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmias YO, { s24 } + vldmia.f32 YO, { s24 } vmla.f32 s24, s0, s4 - fstmias YO, { s24 } + vstmia.f32 YO, { s24 } add YO, YO, INC_Y .endm diff --git a/kernel/arm/iamax_vfp.S b/kernel/arm/iamax_vfp.S index fab05c9c8..fd43b15b1 100644 --- a/kernel/arm/iamax_vfp.S +++ b/kernel/arm/iamax_vfp.S @@ -114,7 +114,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_F - fldmiad X!, { d0 } + vldmia.f64 X!, { d0 } VABS( d0, d0 ) mov Z, #1 mov INDEX, Z @@ -123,7 +123,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 } + vldmia.f64 X!, { d4 } add Z, Z, #1 VABS( d4, d4 ) vcmpe.f64 d4, d0 @@ -135,7 +135,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_S - fldmiad X, { d0 } + vldmia.f64 X, { d0 } VABS( d0, d0 ) mov Z, #1 mov INDEX, Z @@ -146,7 +146,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmiad X, { d4 } + vldmia.f64 X, { d4 } add Z, Z, #1 VABS( d4, d4 ) vcmpe.f64 d4, d0 @@ -161,7 +161,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_F - fldmias X!, { s0 } + vldmia.f32 X!, { s0 } VABS( s0, s0 ) mov Z, #1 mov INDEX, Z @@ -170,7 +170,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 } + vldmia.f32 X!, { s4 } add Z, Z, #1 VABS( s4, s4 ) vcmpe.f32 s4, s0 @@ -182,7 +182,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_S - fldmias X, { s0 } + vldmia.f32 X, { s0 } VABS( s0, s0 ) mov Z, #1 mov INDEX, Z @@ -193,7 +193,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X, { s4 } + vldmia.f32 X, { s4 } add Z, Z, #1 VABS( s4, s4 ) vcmpe.f32 s4, s0 @@ -215,7 +215,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_F - fldmiad X!, { d0 -d1 } + vldmia.f64 X!, { d0 -d1 } vabs.f64 d0, d0 vabs.f64 d1, d1 vadd.f64 d0 , d0, d1 @@ -227,7 +227,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 - d5 } + vldmia.f64 X!, { d4 - d5 } add Z, Z, #1 vabs.f64 d4, d4 vabs.f64 d5, d5 @@ -241,7 +241,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_S - fldmiad X, { d0 -d1 } + vldmia.f64 X, { d0 -d1 } vabs.f64 d0, d0 vabs.f64 d1, d1 vadd.f64 d0 , d0, d1 @@ -255,7 +255,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } add Z, Z, #1 vabs.f64 d4, d4 vabs.f64 d5, d5 @@ -272,7 +272,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_F - fldmias X!, { s0 -s1 } + vldmia.f32 X!, { s0 -s1 } vabs.f32 s0, s0 vabs.f32 s1, s1 vadd.f32 s0 , s0, s1 @@ -284,7 +284,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 - s5 } + vldmia.f32 X!, { s4 - s5 } add Z, Z, #1 vabs.f32 s4, s4 vabs.f32 s5, s5 @@ -298,7 +298,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro INIT_S - fldmias X, { s0 -s1 } + vldmia.f32 X, { s0 -s1 } vabs.f32 s0, s0 vabs.f32 s1, s1 vadd.f32 s0 , s0, s1 @@ -312,7 +312,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } add Z, Z, #1 vabs.f32 s4, s4 vabs.f32 s5, s5 diff --git a/kernel/arm/nrm2_vfp.S b/kernel/arm/nrm2_vfp.S index 16ac5a632..8e0937851 100644 --- a/kernel/arm/nrm2_vfp.S +++ b/kernel/arm/nrm2_vfp.S @@ -58,7 +58,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 } + vldmia.f64 X!, { d4 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_F1_NEXT_\@ @@ -95,7 +95,7 @@ KERNEL_F1_NEXT_\@: .macro KERNEL_S1 - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_S1_NEXT @@ -121,7 +121,7 @@ KERNEL_S1_NEXT: .macro KERNEL_F1 - fldmias X!, { s4 } + vldmia.f32 X!, { s4 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_F1_NEXT_\@ @@ -158,7 +158,7 @@ KERNEL_F1_NEXT_\@: .macro KERNEL_S1 - fldmias X, { s4 } + vldmia.f32 X, { s4 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_S1_NEXT @@ -191,7 +191,7 @@ KERNEL_S1_NEXT: .macro KERNEL_F1 - fldmiad X!, { d4 - d5 } + vldmia.f64 X!, { d4 - d5 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr @@ -249,7 +249,7 @@ KERNEL_F1_END_\@: .macro KERNEL_S1 - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr @@ -294,7 +294,7 @@ KERNEL_S1_END_\@: .macro KERNEL_F1 - fldmias X!, { s4 - s5 } + vldmia.f32 X!, { s4 - s5 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr @@ -350,7 +350,7 @@ KERNEL_F1_END_\@: .macro KERNEL_S1 - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr diff --git a/kernel/arm/nrm2_vfpv3.S b/kernel/arm/nrm2_vfpv3.S index 84977901d..7be1e977e 100644 --- a/kernel/arm/nrm2_vfpv3.S +++ b/kernel/arm/nrm2_vfpv3.S @@ -58,7 +58,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 } + vldmia.f64 X!, { d4 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_F1_NEXT_\@ @@ -95,7 +95,7 @@ KERNEL_F1_NEXT_\@: .macro KERNEL_S1 - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_S1_NEXT @@ -121,7 +121,7 @@ KERNEL_S1_NEXT: .macro KERNEL_F1 - fldmias X!, { s4 } + vldmia.f32 X!, { s4 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_F1_NEXT_\@ @@ -158,7 +158,7 @@ KERNEL_F1_NEXT_\@: .macro KERNEL_S1 - fldmias X, { s4 } + vldmia.f32 X, { s4 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr beq KERNEL_S1_NEXT @@ -191,7 +191,7 @@ KERNEL_S1_NEXT: .macro KERNEL_F1 - fldmiad X!, { d4 - d5 } + vldmia.f64 X!, { d4 - d5 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr @@ -249,7 +249,7 @@ KERNEL_F1_END_\@: .macro KERNEL_S1 - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vcmpe.f64 d4, d6 // compare with 0.0 vmrs APSR_nzcv, fpscr @@ -294,7 +294,7 @@ KERNEL_S1_END_\@: .macro KERNEL_F1 - fldmias X!, { s4 - s5 } + vldmia.f32 X!, { s4 - s5 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr @@ -350,7 +350,7 @@ KERNEL_F1_END_\@: .macro KERNEL_S1 - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vcmpe.f32 s4, s6 // compare with 0.0 vmrs APSR_nzcv, fpscr diff --git a/kernel/arm/rot_vfp.S b/kernel/arm/rot_vfp.S index 25f563690..6aec06205 100644 --- a/kernel/arm/rot_vfp.S +++ b/kernel/arm/rot_vfp.S @@ -77,68 +77,68 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmiad X, { d4 } - fldmiad Y, { d5 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d5 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d5 vmul.f64 d3 , d0, d5 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } - fldmiad X, { d4 } - fldmiad Y, { d5 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d5 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d5 vmul.f64 d3 , d0, d5 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } - fldmiad X, { d4 } - fldmiad Y, { d5 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d5 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d5 vmul.f64 d3 , d0, d5 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } - fldmiad X, { d4 } - fldmiad Y, { d5 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d5 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d5 vmul.f64 d3 , d0, d5 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } .endm .macro KERNEL_F1 - fldmiad X, { d4 } - fldmiad Y, { d5 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d5 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d5 vmul.f64 d3 , d0, d5 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } .endm .macro KERNEL_S1 - fldmiad X, { d4 } - fldmiad Y, { d5 } + vldmia.f64 X, { d4 } + vldmia.f64 Y, { d5 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d5 vmul.f64 d3 , d0, d5 vmls.f64 d3 , d1, d4 - fstmiad X, { d2 } - fstmiad Y, { d3 } + vstmia.f64 X, { d2 } + vstmia.f64 Y, { d3 } add X, X, INC_X add Y, Y, INC_Y @@ -149,68 +149,68 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 - fldmias X, { s4 } - fldmias Y, { s5 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s5 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s5 vmul.f32 s3 , s0, s5 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } - fldmias X, { s4 } - fldmias Y, { s5 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s5 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s5 vmul.f32 s3 , s0, s5 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } - fldmias X, { s4 } - fldmias Y, { s5 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s5 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s5 vmul.f32 s3 , s0, s5 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } - fldmias X, { s4 } - fldmias Y, { s5 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s5 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s5 vmul.f32 s3 , s0, s5 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } .endm .macro KERNEL_F1 - fldmias X, { s4 } - fldmias Y, { s5 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s5 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s5 vmul.f32 s3 , s0, s5 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } .endm .macro KERNEL_S1 - fldmias X, { s4 } - fldmias Y, { s5 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s5 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s5 vmul.f32 s3 , s0, s5 vmls.f32 s3 , s1, s4 - fstmias X, { s2 } - fstmias Y, { s3 } + vstmia.f32 X, { s2 } + vstmia.f32 Y, { s3 } add X, X, INC_X add Y, Y, INC_Y @@ -230,96 +230,96 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmiad X, { d4 - d5 } - fldmiad Y, { d6 - d7 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d6 - d7 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d6 vmul.f64 d3 , d0, d6 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } vmul.f64 d2 , d0, d5 fmacd d2 , d1, d7 vmul.f64 d3 , d0, d7 vmls.f64 d3 , d1, d5 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } - fldmiad X, { d4 - d5 } - fldmiad Y, { d6 - d7 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d6 - d7 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d6 vmul.f64 d3 , d0, d6 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } vmul.f64 d2 , d0, d5 fmacd d2 , d1, d7 vmul.f64 d3 , d0, d7 vmls.f64 d3 , d1, d5 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmiad X, { d4 - d5 } - fldmiad Y, { d6 - d7 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d6 - d7 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d6 vmul.f64 d3 , d0, d6 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } vmul.f64 d2 , d0, d5 fmacd d2 , d1, d7 vmul.f64 d3 , d0, d7 vmls.f64 d3 , d1, d5 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } - fldmiad X, { d4 - d5 } - fldmiad Y, { d6 - d7 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d6 - d7 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d6 vmul.f64 d3 , d0, d6 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } vmul.f64 d2 , d0, d5 fmacd d2 , d1, d7 vmul.f64 d3 , d0, d7 vmls.f64 d3 , d1, d5 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } .endm .macro KERNEL_F1 - fldmiad X, { d4 - d5 } - fldmiad Y, { d6 - d7 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d6 - d7 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d6 vmul.f64 d3 , d0, d6 vmls.f64 d3 , d1, d4 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } vmul.f64 d2 , d0, d5 fmacd d2 , d1, d7 vmul.f64 d3 , d0, d7 vmls.f64 d3 , d1, d5 - fstmiad X!, { d2 } - fstmiad Y!, { d3 } + vstmia.f64 X!, { d2 } + vstmia.f64 Y!, { d3 } .endm .macro KERNEL_S1 - fldmiad X, { d4 - d5 } - fldmiad Y, { d6 - d7 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d6 - d7 } vmul.f64 d2 , d0, d4 fmacd d2 , d1, d6 vmul.f64 d3 , d0, d6 @@ -347,96 +347,96 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmias X, { s4 - s5 } - fldmias Y, { s6 - s7 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s6 - s7 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s6 vmul.f32 s3 , s0, s6 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } vmul.f32 s2 , s0, s5 fmacs s2 , s1, s7 vmul.f32 s3 , s0, s7 vmls.f32 s3 , s1, s5 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } - fldmias X, { s4 - s5 } - fldmias Y, { s6 - s7 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s6 - s7 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s6 vmul.f32 s3 , s0, s6 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } vmul.f32 s2 , s0, s5 fmacs s2 , s1, s7 vmul.f32 s3 , s0, s7 vmls.f32 s3 , s1, s5 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmias X, { s4 - s5 } - fldmias Y, { s6 - s7 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s6 - s7 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s6 vmul.f32 s3 , s0, s6 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } vmul.f32 s2 , s0, s5 fmacs s2 , s1, s7 vmul.f32 s3 , s0, s7 vmls.f32 s3 , s1, s5 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } - fldmias X, { s4 - s5 } - fldmias Y, { s6 - s7 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s6 - s7 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s6 vmul.f32 s3 , s0, s6 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } vmul.f32 s2 , s0, s5 fmacs s2 , s1, s7 vmul.f32 s3 , s0, s7 vmls.f32 s3 , s1, s5 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } .endm .macro KERNEL_F1 - fldmias X, { s4 - s5 } - fldmias Y, { s6 - s7 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s6 - s7 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s6 vmul.f32 s3 , s0, s6 vmls.f32 s3 , s1, s4 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } vmul.f32 s2 , s0, s5 fmacs s2 , s1, s7 vmul.f32 s3 , s0, s7 vmls.f32 s3 , s1, s5 - fstmias X!, { s2 } - fstmias Y!, { s3 } + vstmia.f32 X!, { s2 } + vstmia.f32 Y!, { s3 } .endm .macro KERNEL_S1 - fldmias X, { s4 - s5 } - fldmias Y, { s6 - s7 } + vldmia.f32 X, { s4 - s5 } + vldmia.f32 Y, { s6 - s7 } vmul.f32 s2 , s0, s4 fmacs s2 , s1, s6 vmul.f32 s3 , s0, s6 @@ -483,13 +483,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp N, #0 ble rot_kernel_L999 - +/* cmp INC_X, #0 beq rot_kernel_L999 cmp INC_Y, #0 beq rot_kernel_L999 - +*/ cmp INC_X, #1 bne rot_kernel_S_BEGIN @@ -584,6 +584,12 @@ rot_kernel_S1: rot_kernel_S10: KERNEL_S1 + + cmp INC_X, #0 + beq rot_kernel_L999 + + cmp INC_Y, #0 + beq rot_kernel_L999 subs I, I, #1 bne rot_kernel_S10 diff --git a/kernel/arm/scal_vfp.S b/kernel/arm/scal_vfp.S index cc3e3b98d..8992c35a8 100644 --- a/kernel/arm/scal_vfp.S +++ b/kernel/arm/scal_vfp.S @@ -64,30 +64,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 pld [ X, #X_PRE ] - fldmiad X, { d4 - d7 } + vldmia.f64 X, { d4 - d7 } vmul.f64 d4, d4, d0 vmul.f64 d5, d5, d0 vmul.f64 d6, d6, d0 - fstmiad X!, { d4 - d5 } + vstmia.f64 X!, { d4 - d5 } vmul.f64 d7, d7, d0 - fstmiad X!, { d6 - d7 } + vstmia.f64 X!, { d6 - d7 } .endm .macro KERNEL_F1 - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vmul.f64 d4, d4, d0 - fstmiad X!, { d4 } + vstmia.f64 X!, { d4 } .endm .macro KERNEL_S1 - fldmiad X, { d4 } + vldmia.f64 X, { d4 } vmul.f64 d4, d4, d0 - fstmiad X, { d4 } + vstmia.f64 X, { d4 } add X, X, INC_X .endm @@ -96,30 +96,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 - fldmias X, { s4 - s7 } + vldmia.f32 X, { s4 - s7 } vmul.f32 s4, s4, s0 vmul.f32 s5, s5, s0 vmul.f32 s6, s6, s0 - fstmias X!, { s4 - s5 } + vstmia.f32 X!, { s4 - s5 } vmul.f32 s7, s7, s0 - fstmias X!, { s6 - s7 } + vstmia.f32 X!, { s6 - s7 } .endm .macro KERNEL_F1 - fldmias X, { s4 } + vldmia.f32 X, { s4 } vmul.f32 s4, s4, s0 - fstmias X!, { s4 } + vstmia.f32 X!, { s4 } .endm .macro KERNEL_S1 - fldmias X, { s4 } + vldmia.f32 X, { s4 } vmul.f32 s4, s4, s0 - fstmias X, { s4 } + vstmia.f32 X, { s4 } add X, X, INC_X .endm @@ -136,58 +136,58 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vmul.f64 d2, d0, d4 vmls.f64 d2, d1, d5 vmul.f64 d3, d0, d5 fmacd d3, d1, d4 - fstmiad X!, { d2 - d3 } + vstmia.f64 X!, { d2 - d3 } - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vmul.f64 d2, d0, d4 vmls.f64 d2, d1, d5 vmul.f64 d3, d0, d5 fmacd d3, d1, d4 - fstmiad X!, { d2 - d3 } + vstmia.f64 X!, { d2 - d3 } pld [ X, #X_PRE ] - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vmul.f64 d2, d0, d4 vmls.f64 d2, d1, d5 vmul.f64 d3, d0, d5 fmacd d3, d1, d4 - fstmiad X!, { d2 - d3 } + vstmia.f64 X!, { d2 - d3 } - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vmul.f64 d2, d0, d4 vmls.f64 d2, d1, d5 vmul.f64 d3, d0, d5 fmacd d3, d1, d4 - fstmiad X!, { d2 - d3 } + vstmia.f64 X!, { d2 - d3 } .endm .macro KERNEL_F1 - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vmul.f64 d2, d0, d4 vmls.f64 d2, d1, d5 vmul.f64 d3, d0, d5 fmacd d3, d1, d4 - fstmiad X!, { d2 - d3 } + vstmia.f64 X!, { d2 - d3 } .endm .macro KERNEL_S1 - fldmiad X, { d4 - d5 } + vldmia.f64 X, { d4 - d5 } vmul.f64 d2, d0, d4 vmls.f64 d2, d1, d5 vmul.f64 d3, d0, d5 fmacd d3, d1, d4 - fstmiad X, { d2 - d3 } + vstmia.f64 X, { d2 - d3 } add X, X, INC_X .endm @@ -199,56 +199,56 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vmul.f32 s2, s0, s4 vmls.f32 s2, s1, s5 vmul.f32 s3, s0, s5 fmacs s3, s1, s4 - fstmias X!, { s2 - s3 } + vstmia.f32 X!, { s2 - s3 } - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vmul.f32 s2, s0, s4 vmls.f32 s2, s1, s5 vmul.f32 s3, s0, s5 fmacs s3, s1, s4 - fstmias X!, { s2 - s3 } + vstmia.f32 X!, { s2 - s3 } - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vmul.f32 s2, s0, s4 vmls.f32 s2, s1, s5 vmul.f32 s3, s0, s5 fmacs s3, s1, s4 - fstmias X!, { s2 - s3 } + vstmia.f32 X!, { s2 - s3 } - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vmul.f32 s2, s0, s4 vmls.f32 s2, s1, s5 vmul.f32 s3, s0, s5 fmacs s3, s1, s4 - fstmias X!, { s2 - s3 } + vstmia.f32 X!, { s2 - s3 } .endm .macro KERNEL_F1 - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vmul.f32 s2, s0, s4 vmls.f32 s2, s1, s5 vmul.f32 s3, s0, s5 fmacs s3, s1, s4 - fstmias X!, { s2 - s3 } + vstmia.f32 X!, { s2 - s3 } .endm .macro KERNEL_S1 - fldmias X, { s4 - s5 } + vldmia.f32 X, { s4 - s5 } vmul.f32 s2, s0, s4 vmls.f32 s2, s1, s5 vmul.f32 s3, s0, s5 fmacs s3, s1, s4 - fstmias X, { s2 - s3 } + vstmia.f32 X, { s2 - s3 } add X, X, INC_X .endm diff --git a/kernel/arm/scopy_vfp.S b/kernel/arm/scopy_vfp.S index 0fd815db8..1ccd29c95 100644 --- a/kernel/arm/scopy_vfp.S +++ b/kernel/arm/scopy_vfp.S @@ -65,17 +65,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_F8 pld [ X, #X_PRE ] - fldmias X!, { s0 - s3 } - fldmias X!, { s4 - s7 } - fstmias Y!, { s0 - s3 } - fstmias Y!, { s4 - s7 } + vldmia.f32 X!, { s0 - s3 } + vldmia.f32 X!, { s4 - s7 } + vstmia.f32 Y!, { s0 - s3 } + vstmia.f32 Y!, { s4 - s7 } .endm .macro COPY_F1 - fldmias X!, { s0 } - fstmias Y!, { s0 } + vldmia.f32 X!, { s0 } + vstmia.f32 Y!, { s0 } .endm @@ -85,23 +85,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S4 nop - fldmias X, { s0 } - fstmias Y, { s0 } + vldmia.f32 X, { s0 } + vstmia.f32 Y, { s0 } add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s1 } - fstmias Y, { s1 } + vldmia.f32 X, { s1 } + vstmia.f32 Y, { s1 } add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s0 } - fstmias Y, { s0 } + vldmia.f32 X, { s0 } + vstmia.f32 Y, { s0 } add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s1 } - fstmias Y, { s1 } + vldmia.f32 X, { s1 } + vstmia.f32 Y, { s1 } add X, X, INC_X add Y, Y, INC_Y @@ -110,8 +110,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S1 - fldmias X, { s0 } - fstmias Y, { s0 } + vldmia.f32 X, { s0 } + vstmia.f32 Y, { s0 } add X, X, INC_X add Y, Y, INC_Y diff --git a/kernel/arm/sdot_vfp.S b/kernel/arm/sdot_vfp.S index 5f4f424bf..bb374b5ee 100644 --- a/kernel/arm/sdot_vfp.S +++ b/kernel/arm/sdot_vfp.S @@ -68,26 +68,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 - fldmias X!, { s14 } - fldmias Y!, { s15 } + vldmia.f32 X!, { s14 } + vldmia.f32 Y!, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 - fldmias X!, { s14 } - fldmias Y!, { s15 } + vldmia.f32 X!, { s14 } + vldmia.f32 Y!, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 - fldmias X!, { s14 } - fldmias Y!, { s15 } + vldmia.f32 X!, { s14 } + vldmia.f32 Y!, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 - fldmias X!, { s14 } - fldmias Y!, { s15 } + vldmia.f32 X!, { s14 } + vldmia.f32 Y!, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 @@ -96,8 +96,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s14 } - fldmias Y!, { s15 } + vldmia.f32 X!, { s14 } + vldmia.f32 Y!, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 @@ -109,32 +109,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nop - fldmias X, { s14 } - fldmias Y, { s15 } + vldmia.f32 X, { s14 } + vldmia.f32 Y, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s14 } - fldmias Y, { s15 } + vldmia.f32 X, { s14 } + vldmia.f32 Y, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s14 } - fldmias Y, { s15 } + vldmia.f32 X, { s14 } + vldmia.f32 Y, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 add X, X, INC_X add Y, Y, INC_Y - fldmias X, { s14 } - fldmias Y, { s15 } + vldmia.f32 X, { s14 } + vldmia.f32 Y, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 @@ -146,8 +146,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X, { s14 } - fldmias Y, { s15 } + vldmia.f32 X, { s14 } + vldmia.f32 Y, { s15 } vmul.f32 s15, s14, s15 vcvt.f64.f32 d4, s15 vadd.f64 d0 , d0, d4 @@ -162,12 +162,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 - fldmias X!, { s8 - s9 } - fldmias Y!, { s4 - s5} + vldmia.f32 X!, { s8 - s9 } + vldmia.f32 Y!, { s4 - s5} fmacs s0 , s4, s8 - fldmias X!, { s10 - s11 } + vldmia.f32 X!, { s10 - s11 } fmacs s1 , s5, s9 - fldmias Y!, { s6 - s7 } + vldmia.f32 Y!, { s6 - s7 } fmacs s0 , s6, s10 fmacs s1 , s7, s11 @@ -175,8 +175,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmias X!, { s4 } - fldmias Y!, { s8 } + vldmia.f32 X!, { s4 } + vldmia.f32 Y!, { s8 } fmacs s0 , s4, s8 .endm @@ -185,26 +185,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S4 nop - fldmias X, { s4 } - fldmias Y, { s8 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s8 } add X, X, INC_X add Y, Y, INC_Y fmacs s0 , s4, s8 - fldmias X, { s5 } - fldmias Y, { s9 } + vldmia.f32 X, { s5 } + vldmia.f32 Y, { s9 } add X, X, INC_X add Y, Y, INC_Y fmacs s1 , s5, s9 - fldmias X, { s6 } - fldmias Y, { s10 } + vldmia.f32 X, { s6 } + vldmia.f32 Y, { s10 } add X, X, INC_X add Y, Y, INC_Y fmacs s0 , s6, s10 - fldmias X, { s7 } - fldmias Y, { s11 } + vldmia.f32 X, { s7 } + vldmia.f32 Y, { s11 } add X, X, INC_X add Y, Y, INC_Y fmacs s1 , s7, s11 @@ -214,8 +214,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmias X, { s4 } - fldmias Y, { s8 } + vldmia.f32 X, { s4 } + vldmia.f32 Y, { s8 } add X, X, INC_X fmacs s0 , s4, s8 add Y, Y, INC_Y @@ -253,11 +253,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp N, #0 ble sdot_kernel_L999 - cmp INC_X, #0 - beq sdot_kernel_L999 +# cmp INC_X, #0 +# beq sdot_kernel_L999 - cmp INC_Y, #0 - beq sdot_kernel_L999 +# cmp INC_Y, #0 +# beq sdot_kernel_L999 cmp INC_X, #1 bne sdot_kernel_S_BEGIN diff --git a/kernel/arm/sgemm_kernel_4x2_vfp.S b/kernel/arm/sgemm_kernel_4x2_vfp.S index 1f21e5a1f..c072f4126 100644 --- a/kernel/arm/sgemm_kernel_4x2_vfp.S +++ b/kernel/arm/sgemm_kernel_4x2_vfp.S @@ -112,8 +112,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x2_SUB - fldmias AO! , { s0 - s3 } - fldmias BO! , { s4 - s5 } + vldmia.f32 AO! , { s0 - s3 } + vldmia.f32 BO! , { s4 - s5 } fmacs s8 , s0, s4 fmacs s9 , s1, s4 diff --git a/kernel/arm/sgemm_kernel_4x4_vfpv3.S b/kernel/arm/sgemm_kernel_4x4_vfpv3.S index 6491d3571..789643f56 100644 --- a/kernel/arm/sgemm_kernel_4x4_vfpv3.S +++ b/kernel/arm/sgemm_kernel_4x4_vfpv3.S @@ -136,29 +136,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x4_I pld [ AO , #A_PRE ] - fldmias AO!, { s0 - s1 } + vldmia.f32 AO!, { s0 - s1 } pld [ BO , #B_PRE ] - fldmias BO!, { s8 - s9 } + vldmia.f32 BO!, { s8 - s9 } fmuls s16 , s0, s8 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmuls s17 , s1, s8 fmuls s18 , s2, s8 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmuls s19 , s3, s8 fmuls s20 , s0, s9 - fldmias AO!, { s4 - s5 } + vldmia.f32 AO!, { s4 - s5 } fmuls s21 , s1, s9 fmuls s22 , s2, s9 - fldmias AO!, { s6 - s7 } + vldmia.f32 AO!, { s6 - s7 } fmuls s23 , s3, s9 fmuls s24 , s0, s10 - fldmias BO!, { s12 - s13 } + vldmia.f32 BO!, { s12 - s13 } fmuls s25 , s1, s10 fmuls s26 , s2, s10 - fldmias BO!, { s14 - s15 } + vldmia.f32 BO!, { s14 - s15 } fmuls s27 , s3, s10 fmuls s28 , s0, s11 @@ -174,20 +174,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ AO , #A_PRE ] fmacs s16 , s4, s12 fmacs s17 , s5, s12 - fldmias AO!, { s0 - s3 } + vldmia.f32 AO!, { s0 - s3 } fmacs s18 , s6, s12 pld [ BO , #B_PRE ] fmacs s19 , s7, s12 fmacs s20 , s4, s13 - fldmias BO!, { s8 - s11 } + vldmia.f32 BO!, { s8 - s11 } fmacs s21 , s5, s13 fmacs s22 , s6, s13 - //fldmias AO!, { s2 - s3 } + //vldmia.f32 AO!, { s2 - s3 } fmacs s23 , s7, s13 fmacs s24 , s4, s14 - //fldmias BO!, { s10 - s11 } + //vldmia.f32 BO!, { s10 - s11 } fmacs s25 , s5, s14 fmacs s26 , s6, s14 fmacs s27 , s7, s14 @@ -203,17 +203,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x4_M1 fmacs s16 , s0, s8 - fldmias AO!, { s4 - s7 } + vldmia.f32 AO!, { s4 - s7 } fmacs s17 , s1, s8 fmacs s18 , s2, s8 - fldmias BO!, { s12 - s15 } - //fldmias AO!, { s6 - s7 } + vldmia.f32 BO!, { s12 - s15 } + //vldmia.f32 AO!, { s6 - s7 } fmacs s19 , s3, s8 fmacs s20 , s0, s9 fmacs s21 , s1, s9 fmacs s22 , s2, s9 - //fldmias BO!, { s14 - s15 } + //vldmia.f32 BO!, { s14 - s15 } fmacs s23 , s3, s9 fmacs s24 , s0, s10 @@ -300,7 +300,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0, ALPHA add r4 , CO2, r3 - fldmias CO1, { s8 - s11 } + vldmia.f32 CO1, { s8 - s11 } fmacs s8 , s0 , s16 flds s12, [CO2] @@ -322,7 +322,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ CO1 , #C_PRE ] - fldmias r4, { s8 - s11 } + vldmia.f32 r4, { s8 - s11 } fmacs s8 , s0 , s24 fsts s12, [CO2] @@ -338,7 +338,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add CO2, r4 , r3 - fldmias CO2, { s12 - s15 } + vldmia.f32 CO2, { s12 - s15 } fsts s8 , [r4 ] fmacs s12, s0 , s28 @@ -350,7 +350,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fmacs s15, s0 , s31 pld [ r4 , #C_PRE ] - fstmias CO2, { s12 - s15 } + vstmia.f32 CO2, { s12 - s15 } pld [ CO2 , #C_PRE ] add CO1, CO1, #16 diff --git a/kernel/arm/sgemm_ncopy_2_vfp.S b/kernel/arm/sgemm_ncopy_2_vfp.S index ff4ff0845..dd4596602 100644 --- a/kernel/arm/sgemm_ncopy_2_vfp.S +++ b/kernel/arm/sgemm_ncopy_2_vfp.S @@ -73,7 +73,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s3 , [ AO2, #4 ] add AO1, AO1, #8 - fstmias BO!, { s0 - s3 } + vstmia.f32 BO!, { s0 - s3 } add AO2, AO2, #8 .endm @@ -85,7 +85,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s1 , [ AO2, #0 ] add AO1, AO1, #4 - fstmias BO!, { s0 - s1 } + vstmia.f32 BO!, { s0 - s1 } add AO2, AO2, #4 .endm @@ -95,7 +95,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0 , [ AO1, #0 ] flds s1 , [ AO1, #4 ] - fstmias BO!, { s0 - s1 } + vstmia.f32 BO!, { s0 - s1 } add AO1, AO1, #8 .endm @@ -105,7 +105,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0 , [ AO1, #0 ] - fstmias BO!, { s0 } + vstmia.f32 BO!, { s0 } add AO1, AO1, #4 .endm diff --git a/kernel/arm/sgemm_ncopy_4_vfp.S b/kernel/arm/sgemm_ncopy_4_vfp.S index ab013134e..dbcea5961 100644 --- a/kernel/arm/sgemm_ncopy_4_vfp.S +++ b/kernel/arm/sgemm_ncopy_4_vfp.S @@ -100,10 +100,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s11, [ AO4, #8 ] flds s15, [ AO4, #12 ] - fstmias BO!, { s0 - s3 } + vstmia.f32 BO!, { s0 - s3 } add AO4, AO4, #16 - fstmias BO!, { s4 - s7 } - fstmias BO!, { s8 - s15 } + vstmia.f32 BO!, { s4 - s7 } + vstmia.f32 BO!, { s8 - s15 } .endm @@ -117,7 +117,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s3 , [ AO4, #0 ] add AO3, AO3, #4 - fstmias BO!, { s0 - s3 } + vstmia.f32 BO!, { s0 - s3 } add AO4, AO4, #4 .endm @@ -135,7 +135,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s5 , [ AO2, #8 ] flds s7 , [ AO2, #12 ] - fstmias BO!, { s0 - s7 } + vstmia.f32 BO!, { s0 - s7 } add AO2, AO2, #16 .endm @@ -147,7 +147,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s1 , [ AO2, #0 ] add AO1, AO1, #4 - fstmias BO!, { s0 - s1 } + vstmia.f32 BO!, { s0 - s1 } add AO2, AO2, #4 .endm @@ -159,7 +159,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s2 , [ AO1, #8 ] flds s3 , [ AO1, #12 ] - fstmias BO!, { s0 - s3 } + vstmia.f32 BO!, { s0 - s3 } add AO1, AO1, #16 .endm @@ -169,7 +169,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. flds s0 , [ AO1, #0 ] - fstmias BO!, { s0 } + vstmia.f32 BO!, { s0 } add AO1, AO1, #4 .endm diff --git a/kernel/arm/sgemm_tcopy_4_vfp.S b/kernel/arm/sgemm_tcopy_4_vfp.S index 9bb0e46b1..e61613c5c 100644 --- a/kernel/arm/sgemm_tcopy_4_vfp.S +++ b/kernel/arm/sgemm_tcopy_4_vfp.S @@ -76,21 +76,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY4x4_1 pld [ AO1, #A_PRE ] - fldmias AO1, { s0 - s3 } + vldmia.f32 AO1, { s0 - s3 } add r3, AO1, LDA pld [ r3, #A_PRE ] - fldmias r3, { s4 - s7 } + vldmia.f32 r3, { s4 - s7 } add r3, r3, LDA pld [ r3, #A_PRE ] - fldmias r3, { s8 - s11 } + vldmia.f32 r3, { s8 - s11 } add r3, r3, LDA pld [ r3, #A_PRE ] - fldmias r3, { s12 - s15 } + vldmia.f32 r3, { s12 - s15 } - fstmias BO1, { s0 - s15 } + vstmia.f32 BO1, { s0 - s15 } add AO1, AO1, #16 add BO1, BO1, M4 @@ -98,18 +98,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY4x4_2 - fldmias AO1, { s0 - s3 } + vldmia.f32 AO1, { s0 - s3 } add r3, AO1, LDA - fldmias r3, { s4 - s7 } + vldmia.f32 r3, { s4 - s7 } add r3, r3, LDA - fldmias r3, { s8 - s11 } + vldmia.f32 r3, { s8 - s11 } add r3, r3, LDA - fldmias r3, { s12 - s15 } + vldmia.f32 r3, { s12 - s15 } - fstmias BO1, { s0 - s15 } + vstmia.f32 BO1, { s0 - s15 } add AO1, AO1, #16 add BO1, BO1, M4 @@ -118,18 +118,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY2x4 - fldmias AO1, { s0 - s1 } + vldmia.f32 AO1, { s0 - s1 } add r3, AO1, LDA - fldmias r3, { s2 - s3 } + vldmia.f32 r3, { s2 - s3 } add r3, r3, LDA - fldmias r3, { s4 - s5 } + vldmia.f32 r3, { s4 - s5 } add r3, r3, LDA - fldmias r3, { s6 - s7 } + vldmia.f32 r3, { s6 - s7 } - fstmias BO2, { s0 - s7 } + vstmia.f32 BO2, { s0 - s7 } add AO1, AO1, #8 add BO2, BO2, #32 @@ -137,18 +137,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x4 - fldmias AO1, { s0 } + vldmia.f32 AO1, { s0 } add r3, AO1, LDA - fldmias r3, { s1 } + vldmia.f32 r3, { s1 } add r3, r3, LDA - fldmias r3, { s2 } + vldmia.f32 r3, { s2 } add r3, r3, LDA - fldmias r3, { s3 } + vldmia.f32 r3, { s3 } - fstmias BO3, { s0 - s3 } + vstmia.f32 BO3, { s0 - s3 } add AO1, AO1, #4 add BO3, BO3, #16 @@ -158,12 +158,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY4x2 - fldmias AO1, { s0 - s3 } + vldmia.f32 AO1, { s0 - s3 } add r3, AO1, LDA - fldmias r3, { s4 - s7 } + vldmia.f32 r3, { s4 - s7 } - fstmias BO1, { s0 - s7 } + vstmia.f32 BO1, { s0 - s7 } add AO1, AO1, #16 add BO1, BO1, M4 @@ -171,12 +171,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY2x2 - fldmias AO1, { s0 - s1 } + vldmia.f32 AO1, { s0 - s1 } add r3, AO1, LDA - fldmias r3, { s2 - s3 } + vldmia.f32 r3, { s2 - s3 } - fstmias BO2, { s0 - s3 } + vstmia.f32 BO2, { s0 - s3 } add AO1, AO1, #8 add BO2, BO2, #16 @@ -184,12 +184,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x2 - fldmias AO1, { s0 } + vldmia.f32 AO1, { s0 } add r3, AO1, LDA - fldmias r3, { s1 } + vldmia.f32 r3, { s1 } - fstmias BO3, { s0 - s1 } + vstmia.f32 BO3, { s0 - s1 } add AO1, AO1, #4 add BO3, BO3, #8 @@ -199,9 +199,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY4x1 - fldmias AO1, { s0 - s3 } + vldmia.f32 AO1, { s0 - s3 } - fstmias BO1, { s0 - s3 } + vstmia.f32 BO1, { s0 - s3 } add AO1, AO1, #16 add BO1, BO1, M4 @@ -209,9 +209,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY2x1 - fldmias AO1, { s0 - s1 } + vldmia.f32 AO1, { s0 - s1 } - fstmias BO2, { s0 - s1 } + vstmia.f32 BO2, { s0 - s1 } add AO1, AO1, #8 add BO2, BO2, #8 @@ -219,9 +219,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x1 - fldmias AO1, { s0 } + vldmia.f32 AO1, { s0 } - fstmias BO3, { s0 } + vstmia.f32 BO3, { s0 } add AO1, AO1, #4 add BO3, BO3, #4 diff --git a/kernel/arm/strmm_kernel_4x2_vfp.S b/kernel/arm/strmm_kernel_4x2_vfp.S index 635b1dd13..34fa0ee39 100644 --- a/kernel/arm/strmm_kernel_4x2_vfp.S +++ b/kernel/arm/strmm_kernel_4x2_vfp.S @@ -118,8 +118,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x2_SUB - fldmias AO!, { s0 - s3 } - fldmias BO!, { s4 - s5 } + vldmia.f32 AO!, { s0 - s3 } + vldmia.f32 BO!, { s4 - s5 } fmacs s8 , s0, s4 fmacs s9 , s1, s4 diff --git a/kernel/arm/strmm_kernel_4x4_vfpv3.S b/kernel/arm/strmm_kernel_4x4_vfpv3.S index e24d24eba..0f601d5b8 100644 --- a/kernel/arm/strmm_kernel_4x4_vfpv3.S +++ b/kernel/arm/strmm_kernel_4x4_vfpv3.S @@ -122,30 +122,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x4_I - fldmias AO!, { s0 - s1 } + vldmia.f32 AO!, { s0 - s1 } pld [ AO , #A_PRE-8 ] - fldmias BO!, { s8 - s9 } + vldmia.f32 BO!, { s8 - s9 } pld [ BO , #B_PRE-8 ] fmuls s16 , s0, s8 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmuls s17 , s1, s8 fmuls s18 , s2, s8 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmuls s19 , s3, s8 fmuls s20 , s0, s9 - fldmias AO!, { s4 - s5 } + vldmia.f32 AO!, { s4 - s5 } fmuls s21 , s1, s9 fmuls s22 , s2, s9 - fldmias AO!, { s6 - s7 } + vldmia.f32 AO!, { s6 - s7 } fmuls s23 , s3, s9 fmuls s24 , s0, s10 - fldmias BO!, { s12 - s13 } + vldmia.f32 BO!, { s12 - s13 } fmuls s25 , s1, s10 fmuls s26 , s2, s10 - fldmias BO!, { s14 - s15 } + vldmia.f32 BO!, { s14 - s15 } fmuls s27 , s3, s10 fmuls s28 , s0, s11 @@ -161,20 +161,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ AO , #A_PRE ] fmacs s16 , s4, s12 fmacs s17 , s5, s12 - fldmias AO!, { s0 - s1 } + vldmia.f32 AO!, { s0 - s1 } fmacs s18 , s6, s12 pld [ BO , #B_PRE ] fmacs s19 , s7, s12 fmacs s20 , s4, s13 - fldmias AO!, { s2 - s3 } + vldmia.f32 AO!, { s2 - s3 } fmacs s21 , s5, s13 fmacs s22 , s6, s13 - fldmias BO!, { s8 - s9 } + vldmia.f32 BO!, { s8 - s9 } fmacs s23 , s7, s13 fmacs s24 , s4, s14 - fldmias BO!, { s10 - s11 } + vldmia.f32 BO!, { s10 - s11 } fmacs s25 , s5, s14 fmacs s26 , s6, s14 fmacs s27 , s7, s14 @@ -190,17 +190,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL4x4_M1 fmacs s16 , s0, s8 - fldmias AO!, { s4 - s5 } + vldmia.f32 AO!, { s4 - s5 } fmacs s17 , s1, s8 fmacs s18 , s2, s8 - fldmias AO!, { s6 - s7 } + vldmia.f32 AO!, { s6 - s7 } fmacs s19 , s3, s8 fmacs s20 , s0, s9 - fldmias BO!, { s12 - s13 } + vldmia.f32 BO!, { s12 - s13 } fmacs s21 , s1, s9 fmacs s22 , s2, s9 - fldmias BO!, { s14 - s15 } + vldmia.f32 BO!, { s14 - s15 } fmacs s23 , s3, s9 fmacs s24 , s0, s10 @@ -325,7 +325,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fsts s11, [r4 , #12 ] fmuls s15, s0 , s31 - fstmias CO2, { s12 - s15 } + vstmia.f32 CO2, { s12 - s15 } add CO1, CO1, #16 diff --git a/kernel/arm/swap_vfp.S b/kernel/arm/swap_vfp.S index 76661da79..0b3d98912 100644 --- a/kernel/arm/swap_vfp.S +++ b/kernel/arm/swap_vfp.S @@ -103,29 +103,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmiad X, { d0 - d3 } - fldmiad Y, { d4 - d7 } - fstmiad Y!, { d0 - d3 } - fstmiad X!, { d4 - d7} + vldmia.f64 X, { d0 - d3 } + vldmia.f64 Y, { d4 - d7 } + vstmia.f64 Y!, { d0 - d3 } + vstmia.f64 X!, { d4 - d7} .endm .macro KERNEL_F1 - fldmiad X, { d0 } - fldmiad Y, { d4 } - fstmiad Y!, { d0 } - fstmiad X!, { d4 } + vldmia.f64 X, { d0 } + vldmia.f64 Y, { d4 } + vstmia.f64 Y!, { d0 } + vstmia.f64 X!, { d4 } .endm .macro KERNEL_S1 - fldmiad X, { d0 } - fldmiad Y, { d4 } - fstmiad Y, { d0 } - fstmiad X, { d4 } + vldmia.f64 X, { d0 } + vldmia.f64 Y, { d4 } + vstmia.f64 Y, { d0 } + vstmia.f64 X, { d4 } add X, X, INC_X add Y, Y, INC_Y @@ -135,29 +135,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F4 - fldmias X, { s0 - s3 } - fldmias Y, { s4 - s7 } - fstmias Y!, { s0 - s3 } - fstmias X!, { s4 - s7} + vldmia.f32 X, { s0 - s3 } + vldmia.f32 Y, { s4 - s7 } + vstmia.f32 Y!, { s0 - s3 } + vstmia.f32 X!, { s4 - s7} .endm .macro KERNEL_F1 - fldmias X, { s0 } - fldmias Y, { s4 } - fstmias Y!, { s0 } - fstmias X!, { s4 } + vldmia.f32 X, { s0 } + vldmia.f32 Y, { s4 } + vstmia.f32 Y!, { s0 } + vstmia.f32 X!, { s4 } .endm .macro KERNEL_S1 - fldmias X, { s0 } - fldmias Y, { s4 } - fstmias Y, { s0 } - fstmias X, { s4 } + vldmia.f32 X, { s0 } + vldmia.f32 Y, { s4 } + vstmia.f32 Y, { s0 } + vstmia.f32 X, { s4 } add X, X, INC_X add Y, Y, INC_Y @@ -174,35 +174,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmiad X, { d0 - d3 } - fldmiad Y, { d4 - d7 } - fstmiad Y!, { d0 - d3 } - fstmiad X!, { d4 - d7} + vldmia.f64 X, { d0 - d3 } + vldmia.f64 Y, { d4 - d7 } + vstmia.f64 Y!, { d0 - d3 } + vstmia.f64 X!, { d4 - d7} pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmiad X, { d0 - d3 } - fldmiad Y, { d4 - d7 } - fstmiad Y!, { d0 - d3 } - fstmiad X!, { d4 - d7} + vldmia.f64 X, { d0 - d3 } + vldmia.f64 Y, { d4 - d7 } + vstmia.f64 Y!, { d0 - d3 } + vstmia.f64 X!, { d4 - d7} .endm .macro KERNEL_F1 - fldmiad X, { d0 - d1 } - fldmiad Y, { d4 - d5 } - fstmiad Y!, { d0 - d1 } - fstmiad X!, { d4 - d5 } + vldmia.f64 X, { d0 - d1 } + vldmia.f64 Y, { d4 - d5 } + vstmia.f64 Y!, { d0 - d1 } + vstmia.f64 X!, { d4 - d5 } .endm .macro KERNEL_S1 - fldmiad X, { d0 - d1 } - fldmiad Y, { d4 - d5 } - fstmiad Y, { d0 - d1 } - fstmiad X, { d4 - d5 } + vldmia.f64 X, { d0 - d1 } + vldmia.f64 Y, { d4 - d5 } + vstmia.f64 Y, { d0 - d1 } + vstmia.f64 X, { d4 - d5 } add X, X, INC_X add Y, Y, INC_Y @@ -215,33 +215,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmias X, { s0 - s3 } - fldmias Y, { s4 - s7 } - fstmias Y!, { s0 - s3 } - fstmias X!, { s4 - s7} + vldmia.f32 X, { s0 - s3 } + vldmia.f32 Y, { s4 - s7 } + vstmia.f32 Y!, { s0 - s3 } + vstmia.f32 X!, { s4 - s7} - fldmias X, { s0 - s3 } - fldmias Y, { s4 - s7 } - fstmias Y!, { s0 - s3 } - fstmias X!, { s4 - s7} + vldmia.f32 X, { s0 - s3 } + vldmia.f32 Y, { s4 - s7 } + vstmia.f32 Y!, { s0 - s3 } + vstmia.f32 X!, { s4 - s7} .endm .macro KERNEL_F1 - fldmias X, { s0 - s1 } - fldmias Y, { s4 - s5 } - fstmias Y!, { s0 - s1 } - fstmias X!, { s4 - s5 } + vldmia.f32 X, { s0 - s1 } + vldmia.f32 Y, { s4 - s5 } + vstmia.f32 Y!, { s0 - s1 } + vstmia.f32 X!, { s4 - s5 } .endm .macro KERNEL_S1 - fldmias X, { s0 - s1 } - fldmias Y, { s4 - s5 } - fstmias Y, { s0 - s1 } - fstmias X, { s4 - s5 } + vldmia.f32 X, { s0 - s1 } + vldmia.f32 Y, { s4 - s5 } + vstmia.f32 Y, { s0 - s1 } + vstmia.f32 X, { s4 - s5 } add X, X, INC_X add Y, Y, INC_Y diff --git a/kernel/arm/zcopy_vfp.S b/kernel/arm/zcopy_vfp.S index 48aee4ce0..899dd1e36 100644 --- a/kernel/arm/zcopy_vfp.S +++ b/kernel/arm/zcopy_vfp.S @@ -66,15 +66,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ X, #X_PRE+32 ] - fldmiad X!, { d0 - d7 } - fstmiad Y!, { d0 - d7 } + vldmia.f64 X!, { d0 - d7 } + vstmia.f64 Y!, { d0 - d7 } .endm .macro COPY_F1 - fldmiad X!, { d0 - d1 } - fstmiad Y!, { d0 - d1 } + vldmia.f64 X!, { d0 - d1 } + vstmia.f64 Y!, { d0 - d1 } .endm @@ -84,23 +84,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S4 nop - fldmiad X, { d0 - d1 } - fstmiad Y, { d0 - d1 } + vldmia.f64 X, { d0 - d1 } + vstmia.f64 Y, { d0 - d1 } add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d2 - d3 } - fstmiad Y, { d2 - d3 } + vldmia.f64 X, { d2 - d3 } + vstmia.f64 Y, { d2 - d3 } add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d0 - d1 } - fstmiad Y, { d0 - d1 } + vldmia.f64 X, { d0 - d1 } + vstmia.f64 Y, { d0 - d1 } add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d2 - d3 } - fstmiad Y, { d2 - d3 } + vldmia.f64 X, { d2 - d3 } + vstmia.f64 Y, { d2 - d3 } add X, X, INC_X add Y, Y, INC_Y @@ -109,8 +109,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY_S1 - fldmiad X, { d0 - d1 } - fstmiad Y, { d0 - d1 } + vldmia.f64 X, { d0 - d1 } + vstmia.f64 Y, { d0 - d1 } add X, X, INC_X add Y, Y, INC_Y diff --git a/kernel/arm/zdot_vfp.S b/kernel/arm/zdot_vfp.S index 43f2c0c0b..5ef9f16a9 100644 --- a/kernel/arm/zdot_vfp.S +++ b/kernel/arm/zdot_vfp.S @@ -76,15 +76,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ X, #X_PRE ] pld [ Y, #X_PRE ] - fldmiad X!, { d4 - d5 } - fldmiad Y!, { d8 - d9 } + vldmia.f64 X!, { d4 - d5 } + vldmia.f64 Y!, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 - fldmiad X!, { d6 - d7 } + vldmia.f64 X!, { d6 - d7 } fmacd d2 , d5, d9 fmacd d3 , d5, d8 - fldmiad Y!, { d10 - d11 } + vldmia.f64 Y!, { d10 - d11 } fmacd d0 , d6, d10 fmacd d1 , d6, d11 pld [ X, #X_PRE ] @@ -93,15 +93,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. pld [ Y, #X_PRE ] - fldmiad X!, { d4 - d5 } - fldmiad Y!, { d8 - d9 } + vldmia.f64 X!, { d4 - d5 } + vldmia.f64 Y!, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 - fldmiad X!, { d6 - d7 } + vldmia.f64 X!, { d6 - d7 } fmacd d2 , d5, d9 fmacd d3 , d5, d8 - fldmiad Y!, { d10 - d11 } + vldmia.f64 Y!, { d10 - d11 } fmacd d0 , d6, d10 fmacd d1 , d6, d11 fmacd d2 , d7, d11 @@ -111,8 +111,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1 - fldmiad X!, { d4 - d5 } - fldmiad Y!, { d8 - d9 } + vldmia.f64 X!, { d4 - d5 } + vldmia.f64 Y!, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 fmacd d2 , d5, d9 @@ -127,8 +127,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. nop - fldmiad X, { d4 - d5 } - fldmiad Y, { d8 - d9 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 fmacd d2 , d5, d9 @@ -136,8 +136,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d4 - d5 } - fldmiad Y, { d8 - d9 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 fmacd d2 , d5, d9 @@ -145,8 +145,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d4 - d5 } - fldmiad Y, { d8 - d9 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 fmacd d2 , d5, d9 @@ -154,8 +154,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. add X, X, INC_X add Y, Y, INC_Y - fldmiad X, { d4 - d5 } - fldmiad Y, { d8 - d9 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 fmacd d2 , d5, d9 @@ -168,8 +168,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1 - fldmiad X, { d4 - d5 } - fldmiad Y, { d8 - d9 } + vldmia.f64 X, { d4 - d5 } + vldmia.f64 Y, { d8 - d9 } fmacd d0 , d4, d8 fmacd d1 , d4, d9 fmacd d2 , d5, d9 @@ -218,11 +218,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmp N, #0 ble zdot_kernel_L999 - cmp INC_X, #0 - beq zdot_kernel_L999 +# cmp INC_X, #0 +# beq zdot_kernel_L999 - cmp INC_Y, #0 - beq zdot_kernel_L999 +# cmp INC_Y, #0 +# beq zdot_kernel_L999 cmp INC_X, #1 bne zdot_kernel_S_BEGIN diff --git a/kernel/arm/zgemm_kernel_2x2_vfp.S b/kernel/arm/zgemm_kernel_2x2_vfp.S index 53d18b07b..7934a500e 100644 --- a/kernel/arm/zgemm_kernel_2x2_vfp.S +++ b/kernel/arm/zgemm_kernel_2x2_vfp.S @@ -360,7 +360,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d7 } + vldmia.f64 CO1, { d4 - d7 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 @@ -372,9 +372,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d11 FMAC_I2 d7 , d1 , d10 - fstmiad CO1, { d4 - d7 } + vstmia.f64 CO1, { d4 - d7 } - fldmiad CO2, { d4 - d7 } + vldmia.f64 CO2, { d4 - d7 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 @@ -386,7 +386,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d15 FMAC_I2 d7 , d1 , d14 - fstmiad CO2, { d4 - d7 } + vstmia.f64 CO2, { d4 - d7 } add CO1, CO1, #32 @@ -543,23 +543,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d5 } + vldmia.f64 CO1, { d4 - d5 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 FMAC_R2 d4 , d1 , d9 FMAC_I2 d5 , d1 , d8 - fstmiad CO1, { d4 - d5 } + vstmia.f64 CO1, { d4 - d5 } - fldmiad CO2, { d4 - d5 } + vldmia.f64 CO2, { d4 - d5 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 FMAC_R2 d4 , d1 , d13 FMAC_I2 d5 , d1 , d12 - fstmiad CO2, { d4 - d5 } + vstmia.f64 CO2, { d4 - d5 } add CO1, CO1, #16 @@ -714,7 +714,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d7 } + vldmia.f64 CO1, { d4 - d7 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 @@ -726,7 +726,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d11 FMAC_I2 d7 , d1 , d10 - fstmiad CO1, { d4 - d7 } + vstmia.f64 CO1, { d4 - d7 } add CO1, CO1, #32 @@ -843,14 +843,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d5 } + vldmia.f64 CO1, { d4 - d5 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 FMAC_R2 d4 , d1 , d9 FMAC_I2 d5 , d1 , d8 - fstmiad CO1, { d4 - d5 } + vstmia.f64 CO1, { d4 - d5 } add CO1, CO1, #16 diff --git a/kernel/arm/zgemm_kernel_2x2_vfpv3.S b/kernel/arm/zgemm_kernel_2x2_vfpv3.S index a9d4eddeb..cbb10f342 100644 --- a/kernel/arm/zgemm_kernel_2x2_vfpv3.S +++ b/kernel/arm/zgemm_kernel_2x2_vfpv3.S @@ -374,8 +374,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d7 } - fldmiad CO2, { d8 - d11 } + vldmia.f64 CO1, { d4 - d7 } + vldmia.f64 CO2, { d8 - d11 } FADD_R d16, d24 , d16 FADD_I d17, d25 , d17 @@ -406,8 +406,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d10, d1 , d23 FMAC_I2 d11, d1 , d22 - fstmiad CO1, { d4 - d7 } - fstmiad CO2, { d8 - d11 } + vstmia.f64 CO1, { d4 - d7 } + vstmia.f64 CO2, { d8 - d11 } add CO1, CO1, #32 @@ -570,8 +570,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d5 } - fldmiad CO2, { d8 - d9 } + vldmia.f64 CO1, { d4 - d5 } + vldmia.f64 CO2, { d8 - d9 } FADD_R d16, d24 , d16 FADD_I d17, d25 , d17 @@ -588,8 +588,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d8 , d1 , d21 FMAC_I2 d9 , d1 , d20 - fstmiad CO1, { d4 - d5 } - fstmiad CO2, { d8 - d9 } + vstmia.f64 CO1, { d4 - d5 } + vstmia.f64 CO2, { d8 - d9 } add CO1, CO1, #16 @@ -752,7 +752,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d7 } + vldmia.f64 CO1, { d4 - d7 } FADD_R d16, d24 , d16 FADD_I d17, d25 , d17 @@ -769,7 +769,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d19 FMAC_I2 d7 , d1 , d18 - fstmiad CO1, { d4 - d7 } + vstmia.f64 CO1, { d4 - d7 } add CO1, CO1, #32 @@ -887,7 +887,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad CO1, { d4 - d5 } + vldmia.f64 CO1, { d4 - d5 } FADD_R d16, d24 , d16 FADD_I d17, d25 , d17 @@ -897,7 +897,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d4 , d1 , d17 FMAC_I2 d5 , d1 , d16 - fstmiad CO1, { d4 - d5 } + vstmia.f64 CO1, { d4 - d5 } add CO1, CO1, #16 diff --git a/kernel/arm/zgemm_ncopy_2_vfp.S b/kernel/arm/zgemm_ncopy_2_vfp.S index b3fa225bb..d0661da2a 100644 --- a/kernel/arm/zgemm_ncopy_2_vfp.S +++ b/kernel/arm/zgemm_ncopy_2_vfp.S @@ -87,7 +87,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d6 , [ AO2, #16 ] fldd d7 , [ AO2, #24 ] - fstmiad BO!, { d0 - d7 } + vstmia.f64 BO!, { d0 - d7 } add AO2, AO2, #32 .endm @@ -101,7 +101,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d3 , [ AO2, #8 ] add AO1, AO1, #16 - fstmiad BO!, { d0 - d3 } + vstmia.f64 BO!, { d0 - d3 } add AO2, AO2, #16 .endm @@ -113,7 +113,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d2 , [ AO1, #16 ] fldd d3 , [ AO1, #24 ] - fstmiad BO!, { d0 - d3 } + vstmia.f64 BO!, { d0 - d3 } add AO1, AO1, #32 .endm @@ -124,7 +124,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0 , [ AO1, #0 ] fldd d1 , [ AO1, #8 ] - fstmiad BO!, { d0 - d1 } + vstmia.f64 BO!, { d0 - d1 } add AO1, AO1, #16 .endm diff --git a/kernel/arm/zgemm_tcopy_2_vfp.S b/kernel/arm/zgemm_tcopy_2_vfp.S index 7e27ca6a6..5e1a384b1 100644 --- a/kernel/arm/zgemm_tcopy_2_vfp.S +++ b/kernel/arm/zgemm_tcopy_2_vfp.S @@ -74,13 +74,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY2x2 pld [ AO1, #A_PRE ] - fldmiad AO1, { d0 - d3 } + vldmia.f64 AO1, { d0 - d3 } add r3, AO1, LDA pld [ r3, #A_PRE ] - fldmiad r3, { d4 - d7 } + vldmia.f64 r3, { d4 - d7 } - fstmiad BO1, { d0 - d7 } + vstmia.f64 BO1, { d0 - d7 } add AO1, AO1, #32 add BO1, BO1, M4 @@ -88,12 +88,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x2 - fldmiad AO1, { d0 -d1 } + vldmia.f64 AO1, { d0 -d1 } add r3, AO1, LDA - fldmiad r3, { d2 - d3 } + vldmia.f64 r3, { d2 - d3 } - fstmiad BO2, { d0 - d3 } + vstmia.f64 BO2, { d0 - d3 } add AO1, AO1, #16 add BO2, BO2, #32 @@ -102,9 +102,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /*************************************************************************************************************************/ .macro COPY2x1 - fldmiad AO1, { d0 - d3 } + vldmia.f64 AO1, { d0 - d3 } - fstmiad BO1, { d0 - d3 } + vstmia.f64 BO1, { d0 - d3 } add AO1, AO1, #32 add BO1, BO1, M4 @@ -112,9 +112,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro COPY1x1 - fldmiad AO1, { d0 - d1 } + vldmia.f64 AO1, { d0 - d1 } - fstmiad BO2, { d0 - d1 } + vstmia.f64 BO2, { d0 - d1 } add AO1, AO1, #16 add BO2, BO2, #16 diff --git a/kernel/arm/zgemv_n_vfp.S b/kernel/arm/zgemv_n_vfp.S index 3e3a1bc07..4e64d8785 100644 --- a/kernel/arm/zgemv_n_vfp.S +++ b/kernel/arm/zgemv_n_vfp.S @@ -204,7 +204,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad YO, { d4 - d7 } + vldmia.f64 YO, { d4 - d7 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 @@ -216,9 +216,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d11 FMAC_I2 d7 , d1 , d10 - fstmiad YO!, { d4 - d7 } + vstmia.f64 YO!, { d4 - d7 } - fldmiad YO, { d4 - d7 } + vldmia.f64 YO, { d4 - d7 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 @@ -230,7 +230,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d15 FMAC_I2 d7 , d1 , d14 - fstmiad YO!, { d4 - d7 } + vstmia.f64 YO!, { d4 - d7 } .endm @@ -269,14 +269,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 FMAC_R2 d4 , d1 , d9 FMAC_I2 d5 , d1 , d8 - fstmiad YO, { d4 - d5 } + vstmia.f64 YO, { d4 - d5 } add YO, YO, #16 @@ -352,47 +352,47 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 FMAC_R2 d4 , d1 , d9 FMAC_I2 d5 , d1 , d8 - fstmiad YO, { d4 - d5 } + vstmia.f64 YO, { d4 - d5 } add YO, YO, INC_Y - fldmiad YO, { d6 - d7 } + vldmia.f64 YO, { d6 - d7 } FMAC_R1 d6 , d0 , d10 FMAC_I1 d7 , d0 , d11 FMAC_R2 d6 , d1 , d11 FMAC_I2 d7 , d1 , d10 - fstmiad YO, { d6 - d7 } + vstmia.f64 YO, { d6 - d7 } add YO, YO, INC_Y - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 FMAC_R2 d4 , d1 , d13 FMAC_I2 d5 , d1 , d12 - fstmiad YO, { d4 - d5 } + vstmia.f64 YO, { d4 - d5 } add YO, YO, INC_Y - fldmiad YO, { d6 - d7 } + vldmia.f64 YO, { d6 - d7 } FMAC_R1 d6 , d0 , d14 FMAC_I1 d7 , d0 , d15 FMAC_R2 d6 , d1 , d15 FMAC_I2 d7 , d1 , d14 - fstmiad YO, { d6 - d7 } + vstmia.f64 YO, { d6 - d7 } add YO, YO, INC_Y @@ -433,14 +433,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fldd d0, ALPHA_R fldd d1, ALPHA_I - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } FMAC_R1 d4 , d0 , d8 FMAC_I1 d5 , d0 , d9 FMAC_R2 d4 , d1 , d9 FMAC_I2 d5 , d1 , d8 - fstmiad YO, { d4 - d5 } + vstmia.f64 YO, { d4 - d5 } add YO, YO, INC_Y diff --git a/kernel/arm/zgemv_t_vfp.S b/kernel/arm/zgemv_t_vfp.S index 2193083af..c66fa4fb8 100644 --- a/kernel/arm/zgemv_t_vfp.S +++ b/kernel/arm/zgemv_t_vfp.S @@ -151,12 +151,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F2X1 - fldmiad XO! , { d2 - d3 } - fldmiad AO1!, { d4 - d5 } + vldmia.f64 XO! , { d2 - d3 } + vldmia.f64 AO1!, { d4 - d5 } fmacd d12 , d4 , d2 fmacd d13 , d4 , d3 - fldmiad AO2!, { d8 - d9 } + vldmia.f64 AO2!, { d8 - d9 } KMAC_R d12 , d5 , d3 KMAC_I d13 , d5 , d2 @@ -169,7 +169,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F2 - fldmiad YO, { d4 - d7 } + vldmia.f64 YO, { d4 - d7 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 @@ -181,7 +181,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d15 FMAC_I2 d7 , d1 , d14 - fstmiad YO!, { d4 - d7 } + vstmia.f64 YO!, { d4 - d7 } .endm @@ -205,8 +205,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_F1X1 - fldmiad XO! , { d2 - d3 } - fldmiad AO1!, { d4 - d5 } + vldmia.f64 XO! , { d2 - d3 } + vldmia.f64 AO1!, { d4 - d5 } fmacd d12 , d4 , d2 fmacd d13 , d4 , d3 @@ -217,14 +217,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_F1 - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 FMAC_R2 d4 , d1 , d13 FMAC_I2 d5 , d1 , d12 - fstmiad YO!, { d4 - d5 } + vstmia.f64 YO!, { d4 - d5 } .endm @@ -250,9 +250,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S2X1 - fldmiad XO , { d2 - d3 } - fldmiad AO1!, { d4 - d5 } - fldmiad AO2!, { d8 - d9 } + vldmia.f64 XO , { d2 - d3 } + vldmia.f64 AO1!, { d4 - d5 } + vldmia.f64 AO2!, { d8 - d9 } fmacd d12 , d4 , d2 fmacd d13 , d4 , d3 @@ -270,25 +270,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S2 - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 FMAC_R2 d4 , d1 , d13 FMAC_I2 d5 , d1 , d12 - fstmiad YO, { d4 - d5 } + vstmia.f64 YO, { d4 - d5 } add YO, YO, INC_Y - fldmiad YO, { d6 - d7 } + vldmia.f64 YO, { d6 - d7 } FMAC_R1 d6 , d0 , d14 FMAC_I1 d7 , d0 , d15 FMAC_R2 d6 , d1 , d15 FMAC_I2 d7 , d1 , d14 - fstmiad YO, { d6 - d7 } + vstmia.f64 YO, { d6 - d7 } add YO, YO, INC_Y @@ -314,8 +314,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro KERNEL_S1X1 - fldmiad XO , { d2 - d3 } - fldmiad AO1!, { d4 - d5 } + vldmia.f64 XO , { d2 - d3 } + vldmia.f64 AO1!, { d4 - d5 } fmacd d12 , d4 , d2 fmacd d13 , d4 , d3 @@ -328,14 +328,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .macro SAVE_S1 - fldmiad YO, { d4 - d5 } + vldmia.f64 YO, { d4 - d5 } FMAC_R1 d4 , d0 , d12 FMAC_I1 d5 , d0 , d13 FMAC_R2 d4 , d1 , d13 FMAC_I2 d5 , d1 , d12 - fstmiad YO, { d4 - d5 } + vstmia.f64 YO, { d4 - d5 } add YO, YO, INC_Y diff --git a/kernel/arm/ztrmm_kernel_2x2_vfp.S b/kernel/arm/ztrmm_kernel_2x2_vfp.S index cb6bc050e..4393bc9f6 100644 --- a/kernel/arm/ztrmm_kernel_2x2_vfp.S +++ b/kernel/arm/ztrmm_kernel_2x2_vfp.S @@ -385,7 +385,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d11 FMAC_I2 d7 , d1 , d10 - fstmiad CO1, { d4 - d7 } + vstmia.f64 CO1, { d4 - d7 } fldd d4 , FP_ZERO vmov.f64 d5 , d4 @@ -402,7 +402,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d15 FMAC_I2 d7 , d1 , d14 - fstmiad CO2, { d4 - d7 } + vstmia.f64 CO2, { d4 - d7 } add CO1, CO1, #32 @@ -567,7 +567,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d4 , d1 , d9 FMAC_I2 d5 , d1 , d8 - fstmiad CO1, { d4 - d5 } + vstmia.f64 CO1, { d4 - d5 } fldd d4 , FP_ZERO vmov.f64 d5 , d4 @@ -577,7 +577,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d4 , d1 , d13 FMAC_I2 d5 , d1 , d12 - fstmiad CO2, { d4 - d5 } + vstmia.f64 CO2, { d4 - d5 } add CO1, CO1, #16 @@ -747,7 +747,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d11 FMAC_I2 d7 , d1 , d10 - fstmiad CO1, { d4 - d7 } + vstmia.f64 CO1, { d4 - d7 } add CO1, CO1, #32 @@ -872,7 +872,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d4 , d1 , d9 FMAC_I2 d5 , d1 , d8 - fstmiad CO1, { d4 - d5 } + vstmia.f64 CO1, { d4 - d5 } add CO1, CO1, #16 diff --git a/kernel/arm/ztrmm_kernel_2x2_vfpv3.S b/kernel/arm/ztrmm_kernel_2x2_vfpv3.S index 3e6962f06..39b12caa0 100644 --- a/kernel/arm/ztrmm_kernel_2x2_vfpv3.S +++ b/kernel/arm/ztrmm_kernel_2x2_vfpv3.S @@ -391,8 +391,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d10, d1 , d23 FMAC_I2 d11, d1 , d22 - fstmiad CO1, { d4 - d7 } - fstmiad CO2, { d8 - d11 } + vstmia.f64 CO1, { d4 - d7 } + vstmia.f64 CO2, { d8 - d11 } add CO1, CO1, #32 @@ -569,8 +569,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d8 , d1 , d21 FMAC_I2 d9 , d1 , d20 - fstmiad CO1, { d4 - d5 } - fstmiad CO2, { d8 - d9 } + vstmia.f64 CO1, { d4 - d5 } + vstmia.f64 CO2, { d8 - d9 } add CO1, CO1, #16 @@ -747,7 +747,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d6 , d1 , d19 FMAC_I2 d7 , d1 , d18 - fstmiad CO1, { d4 - d7 } + vstmia.f64 CO1, { d4 - d7 } add CO1, CO1, #32 @@ -872,7 +872,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FMAC_R2 d4 , d1 , d17 FMAC_I2 d5 , d1 , d16 - fstmiad CO1, { d4 - d5 } + vstmia.f64 CO1, { d4 - d5 } add CO1, CO1, #16 diff --git a/kernel/arm64/KERNEL b/kernel/arm64/KERNEL index aeccfbf4c..f936cdf47 100644 --- a/kernel/arm64/KERNEL +++ b/kernel/arm64/KERNEL @@ -1,17 +1,17 @@ ifndef SNRM2KERNEL -SNRM2KERNEL = nrm2.c +SNRM2KERNEL = ../arm/nrm2.c endif ifndef DNRM2KERNEL -DNRM2KERNEL = nrm2.c +DNRM2KERNEL = ../arm/nrm2.c endif ifndef CNRM2KERNEL -CNRM2KERNEL = znrm2.c +CNRM2KERNEL = ../arm/znrm2.c endif ifndef ZNRM2KERNEL -ZNRM2KERNEL = znrm2.c +ZNRM2KERNEL = ../arm/znrm2.c endif ifndef SCABS_KERNEL diff --git a/kernel/arm64/KERNEL.ARMV8 b/kernel/arm64/KERNEL.ARMV8 index 4fc0968cd..a2a435738 100644 --- a/kernel/arm64/KERNEL.ARMV8 +++ b/kernel/arm64/KERNEL.ARMV8 @@ -1,8 +1,3 @@ -SAMAXKERNEL = ../arm/amax.c -DAMAXKERNEL = ../arm/amax.c -CAMAXKERNEL = ../arm/zamax.c -ZAMAXKERNEL = ../arm/zamax.c - SAMINKERNEL = ../arm/amin.c DAMINKERNEL = ../arm/amin.c CAMINKERNEL = ../arm/zamin.c @@ -14,11 +9,6 @@ DMAXKERNEL = ../arm/max.c SMINKERNEL = ../arm/min.c DMINKERNEL = ../arm/min.c -ISAMAXKERNEL = ../arm/iamax.c -IDAMAXKERNEL = ../arm/iamax.c -ICAMAXKERNEL = ../arm/izamax.c -IZAMAXKERNEL = ../arm/izamax.c - ISAMINKERNEL = ../arm/iamin.c IDAMINKERNEL = ../arm/iamin.c ICAMINKERNEL = ../arm/izamin.c @@ -30,85 +20,6 @@ IDMAXKERNEL = ../arm/imax.c ISMINKERNEL = ../arm/imin.c IDMINKERNEL = ../arm/imin.c -SASUMKERNEL = ../arm/asum.c -DASUMKERNEL = ../arm/asum.c -CASUMKERNEL = ../arm/zasum.c -ZASUMKERNEL = ../arm/zasum.c - -SAXPYKERNEL = ../arm/axpy.c -DAXPYKERNEL = ../arm/axpy.c -CAXPYKERNEL = ../arm/zaxpy.c -ZAXPYKERNEL = ../arm/zaxpy.c - -SCOPYKERNEL = ../arm/copy.c -DCOPYKERNEL = ../arm/copy.c -CCOPYKERNEL = ../arm/zcopy.c -ZCOPYKERNEL = ../arm/zcopy.c - -SDOTKERNEL = ../arm/dot.c -DDOTKERNEL = ../arm/dot.c -CDOTKERNEL = ../arm/zdot.c -ZDOTKERNEL = ../arm/zdot.c - -SNRM2KERNEL = ../arm/nrm2.c -DNRM2KERNEL = ../arm/nrm2.c -CNRM2KERNEL = ../arm/znrm2.c -ZNRM2KERNEL = ../arm/znrm2.c - -SROTKERNEL = ../arm/rot.c -DROTKERNEL = ../arm/rot.c -CROTKERNEL = ../arm/zrot.c -ZROTKERNEL = ../arm/zrot.c - -SSCALKERNEL = ../arm/scal.c -DSCALKERNEL = ../arm/scal.c -CSCALKERNEL = ../arm/zscal.c -ZSCALKERNEL = ../arm/zscal.c - -SSWAPKERNEL = ../arm/swap.c -DSWAPKERNEL = ../arm/swap.c -CSWAPKERNEL = ../arm/zswap.c -ZSWAPKERNEL = ../arm/zswap.c - -SGEMVNKERNEL = ../arm/gemv_n.c -DGEMVNKERNEL = ../arm/gemv_n.c -CGEMVNKERNEL = ../arm/zgemv_n.c -ZGEMVNKERNEL = ../arm/zgemv_n.c - -SGEMVTKERNEL = ../arm/gemv_t.c -DGEMVTKERNEL = ../arm/gemv_t.c -CGEMVTKERNEL = ../arm/zgemv_t.c -ZGEMVTKERNEL = ../arm/zgemv_t.c - -STRMMKERNEL = ../generic/trmmkernel_4x4.c -DTRMMKERNEL = ../generic/trmmkernel_2x2.c -CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c -ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c - -SGEMMKERNEL = sgemm_kernel_4x4.S -SGEMMONCOPY = ../generic/gemm_ncopy_4.c -SGEMMOTCOPY = ../generic/gemm_tcopy_4.c -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o - -DGEMMKERNEL = ../generic/gemmkernel_2x2.c -DGEMMONCOPY = ../generic/gemm_ncopy_2.c -DGEMMOTCOPY = ../generic/gemm_tcopy_2.c -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o - -CGEMMKERNEL = ../generic/zgemmkernel_2x2.c -CGEMMONCOPY = ../generic/zgemm_ncopy_2.c -CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o - -ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c -ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c -ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c -ZGEMMONCOPYOBJ = zgemm_oncopy.o -ZGEMMOTCOPYOBJ = zgemm_otcopy.o - STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c @@ -129,6 +40,167 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +SAMAXKERNEL = amax.S +DAMAXKERNEL = amax.S +CAMAXKERNEL = zamax.S +ZAMAXKERNEL = zamax.S + +SAXPYKERNEL = axpy.S +DAXPYKERNEL = axpy.S +CAXPYKERNEL = zaxpy.S +ZAXPYKERNEL = zaxpy.S + +SROTKERNEL = rot.S +DROTKERNEL = rot.S +CROTKERNEL = zrot.S +ZROTKERNEL = zrot.S + +SSCALKERNEL = scal.S +DSCALKERNEL = scal.S +CSCALKERNEL = zscal.S +ZSCALKERNEL = zscal.S + +SGEMVNKERNEL = gemv_n.S +DGEMVNKERNEL = gemv_n.S +CGEMVNKERNEL = zgemv_n.S +ZGEMVNKERNEL = zgemv_n.S + +SGEMVTKERNEL = gemv_t.S +DGEMVTKERNEL = gemv_t.S +CGEMVTKERNEL = zgemv_t.S +ZGEMVTKERNEL = zgemv_t.S +SASUMKERNEL = asum.S +DASUMKERNEL = asum.S +CASUMKERNEL = casum.S +ZASUMKERNEL = zasum.S +SCOPYKERNEL = copy.S +DCOPYKERNEL = copy.S +CCOPYKERNEL = copy.S +ZCOPYKERNEL = copy.S + +SSWAPKERNEL = swap.S +DSWAPKERNEL = swap.S +CSWAPKERNEL = swap.S +ZSWAPKERNEL = swap.S + +ISAMAXKERNEL = iamax.S +IDAMAXKERNEL = iamax.S +ICAMAXKERNEL = izamax.S +IZAMAXKERNEL = izamax.S + +ifneq ($(OS_DARWIN)$(CROSS),11) +SNRM2KERNEL = nrm2.S +DNRM2KERNEL = nrm2.S +CNRM2KERNEL = znrm2.S +ZNRM2KERNEL = znrm2.S +endif + +DDOTKERNEL = dot.S +SDOTKERNEL = dot.S +CDOTKERNEL = zdot.S +ZDOTKERNEL = zdot.S +DSDOTKERNEL = dot.S + +ifeq ($(OS_DARWIN)$(CROSS),11) + +STRMMKERNEL = ../generic/trmmkernel_2x2.c +DTRMMKERNEL = ../generic/trmmkernel_2x2.c +CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c +ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c + +SGEMMKERNEL = ../generic/gemmkernel_2x2.c +SGEMMONCOPY = ../generic/gemm_ncopy_2.c +SGEMMOTCOPY = ../generic/gemm_tcopy_2.c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = ../generic/gemmkernel_2x2.c +DGEMMONCOPY = ../generic/gemm_ncopy_2.c +DGEMMOTCOPY = ../generic/gemm_tcopy_2.c +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = ../generic/zgemmkernel_2x2.c +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) + +else +SGEMMKERNEL = sgemm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = dgemm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S +DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S + +ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N)) + +ifeq ($(DGEMM_UNROLL_M), 8) +DGEMMINCOPY = dgemm_ncopy_$(DGEMM_UNROLL_M).S +DGEMMITCOPY = dgemm_tcopy_$(DGEMM_UNROLL_M).S +else +DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c +DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c +endif + +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif + +ifeq ($(DGEMM_UNROLL_N), 4) +DGEMMONCOPY = dgemm_ncopy_$(DGEMM_UNROLL_N).S +DGEMMOTCOPY = dgemm_tcopy_$(DGEMM_UNROLL_N).S +else +DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c +DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c +endif + +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = cgemm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N)) +CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c +CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c +CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ZGEMMKERNEL = zgemm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N)) +ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c +ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) + +endif diff --git a/kernel/arm64/KERNEL.CORTEXA53 b/kernel/arm64/KERNEL.CORTEXA53 new file mode 100644 index 000000000..c1d33fa3e --- /dev/null +++ b/kernel/arm64/KERNEL.CORTEXA53 @@ -0,0 +1,3 @@ +include $(KERNELDIR)/KERNEL.ARMV8 + + diff --git a/kernel/arm64/KERNEL.CORTEXA57 b/kernel/arm64/KERNEL.CORTEXA57 index 2bf88867e..04d6940d7 100644 --- a/kernel/arm64/KERNEL.CORTEXA57 +++ b/kernel/arm64/KERNEL.CORTEXA57 @@ -1,4 +1,49 @@ -include $(KERNELDIR)/KERNEL.ARMV8 +SAMINKERNEL = ../arm/amin.c +DAMINKERNEL = ../arm/amin.c +CAMINKERNEL = ../arm/zamin.c +ZAMINKERNEL = ../arm/zamin.c + +SMAXKERNEL = ../arm/max.c +DMAXKERNEL = ../arm/max.c + +SMINKERNEL = ../arm/min.c +DMINKERNEL = ../arm/min.c + +ISAMINKERNEL = ../arm/iamin.c +IDAMINKERNEL = ../arm/iamin.c +ICAMINKERNEL = ../arm/izamin.c +IZAMINKERNEL = ../arm/izamin.c + +ISMAXKERNEL = ../arm/imax.c +IDMAXKERNEL = ../arm/imax.c + +ISMINKERNEL = ../arm/imin.c +IDMINKERNEL = ../arm/imin.c + +STRMMKERNEL = ../generic/trmmkernel_4x4.c +DTRMMKERNEL = ../generic/trmmkernel_2x2.c +CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c +ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c SAMAXKERNEL = amax.S DAMAXKERNEL = amax.S @@ -29,6 +74,7 @@ SDOTKERNEL = dot.S DDOTKERNEL = dot.S CDOTKERNEL = zdot.S ZDOTKERNEL = zdot.S +DSDOTKERNEL = dot.S SNRM2KERNEL = nrm2.S DNRM2KERNEL = nrm2.S @@ -65,13 +111,13 @@ STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c -SGEMMINCOPYOBJ = sgemm_incopy.o -SGEMMITCOPYOBJ = sgemm_itcopy.o +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) endif SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c -SGEMMONCOPYOBJ = sgemm_oncopy.o -SGEMMOTCOPYOBJ = sgemm_otcopy.o +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) DGEMMKERNEL = dgemm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S @@ -86,8 +132,8 @@ DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c endif -DGEMMINCOPYOBJ = dgemm_incopy.o -DGEMMITCOPYOBJ = dgemm_itcopy.o +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) endif ifeq ($(DGEMM_UNROLL_N), 4) @@ -98,32 +144,32 @@ DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c endif -DGEMMONCOPYOBJ = dgemm_oncopy.o -DGEMMOTCOPYOBJ = dgemm_otcopy.o +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) CGEMMKERNEL = cgemm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N)) CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c -CGEMMINCOPYOBJ = cgemm_incopy.o -CGEMMITCOPYOBJ = cgemm_itcopy.o +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) endif CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c -CGEMMONCOPYOBJ = cgemm_oncopy.o -CGEMMOTCOPYOBJ = cgemm_otcopy.o +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) ZGEMMKERNEL = zgemm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N)) ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c -ZGEMMINCOPYOBJ = zgemm_incopy.o -ZGEMMITCOPYOBJ = zgemm_itcopy.o +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) endif ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c -ZGEMMONCOPYOBJ = zgemm_oncopy.o -ZGEMMOTCOPYOBJ = zgemm_otcopy.o +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/arm64/KERNEL.CORTEXA72 b/kernel/arm64/KERNEL.CORTEXA72 new file mode 100644 index 000000000..007b2ce26 --- /dev/null +++ b/kernel/arm64/KERNEL.CORTEXA72 @@ -0,0 +1,3 @@ +include $(KERNELDIR)/KERNEL.CORTEXA57 + + diff --git a/kernel/arm64/KERNEL.CORTEXA73 b/kernel/arm64/KERNEL.CORTEXA73 new file mode 100644 index 000000000..007b2ce26 --- /dev/null +++ b/kernel/arm64/KERNEL.CORTEXA73 @@ -0,0 +1,3 @@ +include $(KERNELDIR)/KERNEL.CORTEXA57 + + diff --git a/kernel/arm64/KERNEL.FALKOR b/kernel/arm64/KERNEL.FALKOR new file mode 100644 index 000000000..007b2ce26 --- /dev/null +++ b/kernel/arm64/KERNEL.FALKOR @@ -0,0 +1,3 @@ +include $(KERNELDIR)/KERNEL.CORTEXA57 + + diff --git a/kernel/arm64/KERNEL.THUNDERX b/kernel/arm64/KERNEL.THUNDERX index 11b7a2ca8..cb02c7bc5 100644 --- a/kernel/arm64/KERNEL.THUNDERX +++ b/kernel/arm64/KERNEL.THUNDERX @@ -1,6 +1,133 @@ -include $(KERNELDIR)/KERNEL.ARMV8 +SAMAXKERNEL = amax.S +DAMAXKERNEL = amax.S +CAMAXKERNEL = zamax.S +ZAMAXKERNEL = zamax.S + +SAMINKERNEL = ../arm/amin.c +DAMINKERNEL = ../arm/amin.c +CAMINKERNEL = ../arm/zamin.c +ZAMINKERNEL = ../arm/zamin.c + +SMAXKERNEL = ../arm/max.c +DMAXKERNEL = ../arm/max.c + +SMINKERNEL = ../arm/min.c +DMINKERNEL = ../arm/min.c + +ISAMAXKERNEL = iamax.S +IDAMAXKERNEL = iamax.S +ICAMAXKERNEL = izamax.S +IZAMAXKERNEL = izamax.S + +ISAMINKERNEL = ../arm/iamin.c +IDAMINKERNEL = ../arm/iamin.c +ICAMINKERNEL = ../arm/izamin.c +IZAMINKERNEL = ../arm/izamin.c + +ISMAXKERNEL = ../arm/imax.c +IDMAXKERNEL = ../arm/imax.c + +ISMINKERNEL = ../arm/imin.c +IDMINKERNEL = ../arm/imin.c + +SASUMKERNEL = asum.S +DASUMKERNEL = asum.S +CASUMKERNEL = casum.S +ZASUMKERNEL = zasum.S + +SAXPYKERNEL = axpy.S +DAXPYKERNEL = daxpy_thunderx.c +CAXPYKERNEL = zaxpy.S +ZAXPYKERNEL = zaxpy.S + +SCOPYKERNEL = copy.S +DCOPYKERNEL = copy.S +CCOPYKERNEL = copy.S +ZCOPYKERNEL = copy.S + +SDOTKERNEL = dot_thunderx.c +DDOTKERNEL = ddot_thunderx.c +CDOTKERNEL = zdot.S +ZDOTKERNEL = zdot.S +DSDOTKERNEL = dot.S + +SNRM2KERNEL = nrm2.S +DNRM2KERNEL = nrm2.S +CNRM2KERNEL = znrm2.S +ZNRM2KERNEL = znrm2.S + +SROTKERNEL = rot.S +DROTKERNEL = rot.S +CROTKERNEL = zrot.S +ZROTKERNEL = zrot.S + +SSCALKERNEL = scal.S +DSCALKERNEL = scal.S +CSCALKERNEL = zscal.S +ZSCALKERNEL = zscal.S + +SSWAPKERNEL = swap.S +DSWAPKERNEL = swap.S +CSWAPKERNEL = swap.S +ZSWAPKERNEL = swap.S + +SGEMVNKERNEL = gemv_n.S +DGEMVNKERNEL = gemv_n.S +CGEMVNKERNEL = zgemv_n.S +ZGEMVNKERNEL = zgemv_n.S + +SGEMVTKERNEL = gemv_t.S +DGEMVTKERNEL = gemv_t.S +CGEMVTKERNEL = zgemv_t.S +ZGEMVTKERNEL = zgemv_t.S + +STRMMKERNEL = ../generic/trmmkernel_4x4.c +DTRMMKERNEL = ../generic/trmmkernel_2x2.c +CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c +ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c + +SGEMMKERNEL = sgemm_kernel_4x4.S +SGEMMONCOPY = ../generic/gemm_ncopy_4.c +SGEMMOTCOPY = ../generic/gemm_tcopy_4.c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DGEMMKERNEL = ../generic/gemmkernel_2x2.c +DGEMMONCOPY = ../generic/gemm_ncopy_2.c +DGEMMOTCOPY = ../generic/gemm_tcopy_2.c +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CGEMMKERNEL = ../generic/zgemmkernel_2x2.c +CGEMMONCOPY = ../generic/zgemm_ncopy_2.c +CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c +ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -SDOTKERNEL=dot_thunderx.c -DDOTKERNEL=ddot_thunderx.c -DAXPYKERNEL=daxpy_thunderx.c diff --git a/kernel/arm64/KERNEL.THUNDERX2T99 b/kernel/arm64/KERNEL.THUNDERX2T99 index b66cd0e8b..a20d0d4a6 100644 --- a/kernel/arm64/KERNEL.THUNDERX2T99 +++ b/kernel/arm64/KERNEL.THUNDERX2T99 @@ -1,4 +1,137 @@ -include $(KERNELDIR)/KERNEL.CORTEXA57 +SAMINKERNEL = ../arm/amin.c +DAMINKERNEL = ../arm/amin.c +CAMINKERNEL = ../arm/zamin.c +ZAMINKERNEL = ../arm/zamin.c + +SMAXKERNEL = ../arm/max.c +DMAXKERNEL = ../arm/max.c + +SMINKERNEL = ../arm/min.c +DMINKERNEL = ../arm/min.c + +ISAMINKERNEL = ../arm/iamin.c +IDAMINKERNEL = ../arm/iamin.c +ICAMINKERNEL = ../arm/izamin.c +IZAMINKERNEL = ../arm/izamin.c + +ISMAXKERNEL = ../arm/imax.c +IDMAXKERNEL = ../arm/imax.c + +ISMINKERNEL = ../arm/imin.c +IDMINKERNEL = ../arm/imin.c + +STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c +ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c +ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c +ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c + +SAMAXKERNEL = amax.S +DAMAXKERNEL = amax.S +CAMAXKERNEL = zamax.S +ZAMAXKERNEL = zamax.S + +SAXPYKERNEL = axpy.S +DAXPYKERNEL = daxpy_thunderx2t99.S +CAXPYKERNEL = zaxpy.S +ZAXPYKERNEL = zaxpy.S + +SROTKERNEL = rot.S +DROTKERNEL = rot.S +CROTKERNEL = zrot.S +ZROTKERNEL = zrot.S + +SSCALKERNEL = scal.S +DSCALKERNEL = scal.S +CSCALKERNEL = zscal.S +ZSCALKERNEL = zscal.S + +SGEMVNKERNEL = gemv_n.S +DGEMVNKERNEL = gemv_n.S +CGEMVNKERNEL = zgemv_n.S +ZGEMVNKERNEL = zgemv_n.S + +SGEMVTKERNEL = gemv_t.S +DGEMVTKERNEL = gemv_t.S +CGEMVTKERNEL = zgemv_t.S +ZGEMVTKERNEL = zgemv_t.S + +STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S +ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N)) +SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c +SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c +SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) +SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c +SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c +SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX) +SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) + +DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S + +ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N)) + +ifeq ($(DGEMM_UNROLL_M), 8) +DGEMMINCOPY = dgemm_ncopy_$(DGEMM_UNROLL_M).S +DGEMMITCOPY = dgemm_tcopy_$(DGEMM_UNROLL_M).S +else +DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c +DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c +endif + +DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) +DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif + +ifeq ($(DGEMM_UNROLL_N), 4) +DGEMMONCOPY = dgemm_ncopy_$(DGEMM_UNROLL_N).S +DGEMMOTCOPY = dgemm_tcopy_$(DGEMM_UNROLL_N).S +else +DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c +DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c +endif + +DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX) +DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX) + +CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S +ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N)) +CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c +CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c +CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX) +CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c +CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c +CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX) +CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX) + +ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S +ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N)) +ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c +ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c +ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX) +ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX) +endif +ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c +ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c +ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX) +ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX) SASUMKERNEL = sasum_thunderx2t99.c DASUMKERNEL = dasum_thunderx2t99.c @@ -27,12 +160,12 @@ CNRM2KERNEL = scnrm2_thunderx2t99.c DNRM2KERNEL = dznrm2_thunderx2t99.c ZNRM2KERNEL = dznrm2_thunderx2t99.c -DAXPYKERNEL = daxpy_thunderx2t99.S DDOTKERNEL = dot_thunderx2t99.c SDOTKERNEL = dot_thunderx2t99.c CDOTKERNEL = zdot_thunderx2t99.c ZDOTKERNEL = zdot_thunderx2t99.c +DSDOTKERNEL = dot.S ifeq ($(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N), 8x4) DGEMMKERNEL = dgemm_kernel_8x4_thunderx2t99.S diff --git a/kernel/arm64/KERNEL.VULCAN b/kernel/arm64/KERNEL.VULCAN deleted file mode 100644 index 8b0273951..000000000 --- a/kernel/arm64/KERNEL.VULCAN +++ /dev/null @@ -1,3 +0,0 @@ -include $(KERNELDIR)/KERNEL.THUNDERX2T99 - - diff --git a/kernel/arm64/KERNEL.XGENE1 b/kernel/arm64/KERNEL.XGENE1 deleted file mode 100644 index 6ee0c730c..000000000 --- a/kernel/arm64/KERNEL.XGENE1 +++ /dev/null @@ -1 +0,0 @@ -include $(KERNELDIR)/KERNEL.ARMV8 \ No newline at end of file diff --git a/kernel/arm64/casum_thunderx2t99.c b/kernel/arm64/casum_thunderx2t99.c index cd5d936c5..c6dbb3f77 100644 --- a/kernel/arm64/casum_thunderx2t99.c +++ b/kernel/arm64/casum_thunderx2t99.c @@ -233,13 +233,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) FLOAT asum = 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { asum = casum_compute(n, x, inc_x); diff --git a/kernel/arm64/copy_thunderx2t99.c b/kernel/arm64/copy_thunderx2t99.c index bd67b48b0..e31876139 100644 --- a/kernel/arm64/copy_thunderx2t99.c +++ b/kernel/arm64/copy_thunderx2t99.c @@ -183,13 +183,10 @@ int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) if (n <= 0) return 0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { do_copy(n, x, inc_x, y, inc_y); diff --git a/kernel/arm64/dasum_thunderx2t99.c b/kernel/arm64/dasum_thunderx2t99.c index ba12fc776..a212c9534 100644 --- a/kernel/arm64/dasum_thunderx2t99.c +++ b/kernel/arm64/dasum_thunderx2t99.c @@ -228,13 +228,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) FLOAT asum = 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { asum = dasum_compute(n, x, inc_x); diff --git a/kernel/arm64/dgemm_kernel_8x4_thunderx2t99.S b/kernel/arm64/dgemm_kernel_8x4_thunderx2t99.S index 598db6e0c..d1551ffea 100644 --- a/kernel/arm64/dgemm_kernel_8x4_thunderx2t99.S +++ b/kernel/arm64/dgemm_kernel_8x4_thunderx2t99.S @@ -943,13 +943,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. prfm PLDL1KEEP, [origPB] prfm PLDL1KEEP, [origPA] - - ldr A_PRE_SIZE, =dgemm_prefetch_size_a - ldr A_PRE_SIZE, [A_PRE_SIZE] - ldr B_PRE_SIZE, =dgemm_prefetch_size_b - ldr B_PRE_SIZE, [B_PRE_SIZE] - ldr C_PRE_SIZE, =dgemm_prefetch_size_c - ldr C_PRE_SIZE, [C_PRE_SIZE] + mov A_PRE_SIZE, #3584 + mov B_PRE_SIZE, #512 + mov C_PRE_SIZE, #128 add A_PRE_SIZE_64, A_PRE_SIZE, #64 add B_PRE_SIZE_64, B_PRE_SIZE, #64 diff --git a/kernel/arm64/dot.S b/kernel/arm64/dot.S index a1a5bf20b..3f01385fe 100644 --- a/kernel/arm64/dot.S +++ b/kernel/arm64/dot.S @@ -74,8 +74,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if !defined(DSDOT) fmadd DOTF, TMPX, TMPY, DOTF #else // DSDOT - fmul TMPX, TMPX, TMPY + fcvt d3, TMPY fcvt d2, TMPX + fmul d2, d2, d3 fadd DOTF, DOTF, d2 #endif .endm @@ -87,12 +88,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if !defined(DSDOT) fmla v0.4s, v2.4s, v3.4s #else - fmul v2.4s, v2.4s, v3.4s - ext v3.16b, v2.16b, v2.16b, #8 - fcvtl v2.2d, v2.2s + fcvtl2 v5.2d, v3.4s + fcvtl2 v4.2d, v2.4s fcvtl v3.2d, v3.2s + fcvtl v2.2d, v2.2s + fmul v4.2d, v4.2d, v5.2d + fmul v2.2d, v2.2d, v3.2d + fadd v2.2d, v2.2d, v4.2d fadd v0.2d, v0.2d, v2.2d - fadd v0.2d, v0.2d, v3.2d #endif #else //DOUBLE ld1 {v2.2d, v3.2d}, [X], #32 @@ -136,8 +139,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if !defined(DSDOT) fmadd DOTF, TMPX, TMPY, DOTF #else // DSDOT - fmul TMPX, TMPX, TMPY + fcvt d3, TMPY fcvt d2, TMPX + fmul d2, d2, d3 fadd DOTF, DOTF, d2 #endif .endm diff --git a/kernel/arm64/dot_thunderx2t99.c b/kernel/arm64/dot_thunderx2t99.c index 8eeb94f36..3940acddd 100644 --- a/kernel/arm64/dot_thunderx2t99.c +++ b/kernel/arm64/dot_thunderx2t99.c @@ -199,7 +199,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. " faddp "DOTF", v0.2d \n" #endif /* !defined(DSDOT) */ -#else /* !defined(DOUBLE) */ +#else /* !defined(DOUBLE) */ #define KERNEL_F1 \ " ldr "TMPX", ["X"] \n" \ " ldr "TMPY", ["Y"] \n" \ @@ -384,13 +384,10 @@ RETURN_TYPE CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y RETURN_TYPE dot = 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0 || inc_y == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || inc_y == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { dot = dot_compute(n, x, inc_x, y, inc_y); diff --git a/kernel/arm64/dznrm2_thunderx2t99.c b/kernel/arm64/dznrm2_thunderx2t99.c index 2aea9b4a9..b94f0cffc 100644 --- a/kernel/arm64/dznrm2_thunderx2t99.c +++ b/kernel/arm64/dznrm2_thunderx2t99.c @@ -328,10 +328,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) if (n <= 0 || inc_x <= 0) return 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - if (n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { nrm2_compute(n, x, inc_x, &ssq, &scale); diff --git a/kernel/arm64/dznrm2_thunderx2t99_fast.c b/kernel/arm64/dznrm2_thunderx2t99_fast.c index 8b04a3eb6..8405b388b 100644 --- a/kernel/arm64/dznrm2_thunderx2t99_fast.c +++ b/kernel/arm64/dznrm2_thunderx2t99_fast.c @@ -235,10 +235,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) if (n <= 0 || inc_x <= 0) return 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - if (n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { nrm2 = nrm2_compute(n, x, inc_x); diff --git a/kernel/arm64/iamax_thunderx2t99.c b/kernel/arm64/iamax_thunderx2t99.c index a11b18419..e3bec4a20 100644 --- a/kernel/arm64/iamax_thunderx2t99.c +++ b/kernel/arm64/iamax_thunderx2t99.c @@ -321,13 +321,10 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) BLASLONG max_index = 0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { max_index = iamax_compute(n, x, inc_x); diff --git a/kernel/arm64/izamax_thunderx2t99.c b/kernel/arm64/izamax_thunderx2t99.c index 8d70b0515..b2e2828f0 100644 --- a/kernel/arm64/izamax_thunderx2t99.c +++ b/kernel/arm64/izamax_thunderx2t99.c @@ -330,13 +330,10 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) BLASLONG max_index = 0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { max_index = izamax_compute(n, x, inc_x); diff --git a/kernel/arm64/sasum_thunderx2t99.c b/kernel/arm64/sasum_thunderx2t99.c index 28fc34c62..014c667ba 100644 --- a/kernel/arm64/sasum_thunderx2t99.c +++ b/kernel/arm64/sasum_thunderx2t99.c @@ -230,13 +230,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) FLOAT asum = 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { asum = sasum_compute(n, x, inc_x); diff --git a/kernel/arm64/scnrm2_thunderx2t99.c b/kernel/arm64/scnrm2_thunderx2t99.c index b8df4962b..f96de441e 100644 --- a/kernel/arm64/scnrm2_thunderx2t99.c +++ b/kernel/arm64/scnrm2_thunderx2t99.c @@ -318,10 +318,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) if (n <= 0 || inc_x <= 0) return 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - if (n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { nrm2_double = nrm2_compute(n, x, inc_x); diff --git a/kernel/arm64/zasum_thunderx2t99.c b/kernel/arm64/zasum_thunderx2t99.c index 140e5a741..1d303a9a3 100644 --- a/kernel/arm64/zasum_thunderx2t99.c +++ b/kernel/arm64/zasum_thunderx2t99.c @@ -230,13 +230,10 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) FLOAT asum = 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { asum = zasum_compute(n, x, inc_x); diff --git a/kernel/arm64/zdot_thunderx2t99.c b/kernel/arm64/zdot_thunderx2t99.c index 70d683077..6185bc7d9 100644 --- a/kernel/arm64/zdot_thunderx2t99.c +++ b/kernel/arm64/zdot_thunderx2t99.c @@ -317,13 +317,10 @@ OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLA CIMAG(zdot) = 0.0; #if defined(SMP) - nthreads = num_cpu_avail(1); - - if (inc_x == 0 || inc_y == 0) - nthreads = 1; - - if (n <= 10000) + if (inc_x == 0 || inc_y == 0 || n <= 10000) nthreads = 1; + else + nthreads = num_cpu_avail(1); if (nthreads == 1) { zdot_compute(n, x, inc_x, y, inc_y, &zdot); diff --git a/kernel/generic/trmm_lncopy_16.c b/kernel/generic/trmm_lncopy_16.c index 4c0a76cbd..0f4b0a9f7 100644 --- a/kernel/generic/trmm_lncopy_16.c +++ b/kernel/generic/trmm_lncopy_16.c @@ -661,7 +661,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b[ 9] = ZERO; b[ 10] = ZERO; b[ 11] = ZERO; - b[ 11] = ZERO; + b[ 12] = ZERO; b[ 13] = ZERO; b[ 14] = ZERO; b[ 15] = ZERO; diff --git a/kernel/generic/trmm_ltcopy_2.c b/kernel/generic/trmm_ltcopy_2.c index 60cdeed1c..e9ad45fa0 100644 --- a/kernel/generic/trmm_ltcopy_2.c +++ b/kernel/generic/trmm_ltcopy_2.c @@ -116,22 +116,34 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON if (m & 1) { if (X > posY) { - /* ao1 += 1; - ao2 += 1; */ + ao1 += 1; + ao2 += 1; b += 2; } else -#ifdef UNIT if (X < posY) { -#endif - b[ 0] = *(ao1 + 0); -#ifdef UNIT + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + + b[ 0] = data01; + b[ 1] = data02; + ao1 += lda; + b += 2; } else { +#ifdef UNIT + data02 = *(ao1 + 1); b[ 0] = ONE; + b[ 1] = data02; +#else + data01 = *(ao1 + 0); + data02 = *(ao1 + 1); + + b[ 0] = data01; + b[ 1] = data02; +#endif + ao1 += 2; + b += 2; } -#endif - b[ 1] = *(ao1 + 1); - b += 2; } posY += 2; @@ -178,7 +190,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } while (i > 0); } - // posY += 1; + posY += 1; } return 0; diff --git a/kernel/generic/trmm_utcopy_16.c b/kernel/generic/trmm_utcopy_16.c index 12642e7db..b83989f55 100644 --- a/kernel/generic/trmm_utcopy_16.c +++ b/kernel/generic/trmm_utcopy_16.c @@ -518,7 +518,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON i = (m & 15); if (i > 0) { if (X < posY) { - /* a01 += i; + a01 += i; a02 += i; a03 += i; a04 += i; @@ -533,7 +533,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON a13 += i; a14 += i; a15 += i; - a16 += i; */ + a16 += i; b += 16 * i; } else if (X > posY) { @@ -1130,14 +1130,14 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON i = (m & 7); if (i > 0) { if (X < posY) { - /* a01 += i; + a01 += i; a02 += i; a03 += i; a04 += i; a05 += i; a06 += i; a07 += i; - a08 += i; */ + a08 += i; b += 8 * i; } else if (X > posY) { @@ -1156,13 +1156,13 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b += 8; } - /* a02 += i * lda; + a02 += i * lda; a03 += i * lda; a04 += i * lda; a05 += i * lda; a06 += i * lda; a07 += i * lda; - a08 += i * lda; */ + a08 += i * lda; } else { #ifdef UNIT b[ 0] = ONE; @@ -1371,10 +1371,10 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON i = (m & 3); if (i > 0) { if (X < posY) { - /* a01 += i; + a01 += i; a02 += i; a03 += i; - a04 += i; */ + a04 += i; b += 4 * i; } else if (X > posY) { @@ -1387,9 +1387,9 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON a01 += lda; b += 4; } - /* a02 += lda; + a02 += lda; a03 += lda; - a04 += lda; */ + a04 += lda; } else { #ifdef UNIT @@ -1487,19 +1487,23 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON if (X < posY) { a01 ++; a02 ++; - } else { -#ifdef UNIT + b += 2; + } else if (X > posY) { -#endif b[ 0] = *(a01 + 0); -#ifdef UNIT + b[ 1] = *(a01 + 1); + a01 += lda; + b += 2; } else { +#ifdef UNIT b[ 0] = ONE; - } + b[ 1] = *(a01 + 1); +#else + b[ 0] = *(a01 + 0); + b[ 1] = *(a01 + 1); #endif - b[ 1] = *(a01 + 1); - } - b += 2; + b += 2; + } } posY += 2; } @@ -1518,25 +1522,28 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON if (i > 0) { do { if (X < posY) { - a01 ++; - } else { -#ifdef UNIT + a01 += 1; + b ++; + } else if (X > posY) { -#endif b[ 0] = *(a01 + 0); -#ifdef UNIT + a01 += lda; + b ++; } else { +#ifdef UNIT b[ 0] = ONE; - } +#else + b[ 0] = *(a01 + 0); #endif - a01 += lda; - } - b ++; - X ++; - i --; + a01 += lda; + b ++; + } + + X += 1; + i --; } while (i > 0); } - // posY += 1; + posY += 1; } return 0; diff --git a/kernel/generic/trmm_utcopy_2.c b/kernel/generic/trmm_utcopy_2.c index 75076c382..ae4a19e32 100644 --- a/kernel/generic/trmm_utcopy_2.c +++ b/kernel/generic/trmm_utcopy_2.c @@ -117,8 +117,8 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON if (m & 1) { if (X < posY) { - /* ao1 += 1; - ao2 += 1; */ + ao1 += 1; + ao2 += 1; b += 2; } else if (X > posY) { @@ -127,7 +127,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b[ 0] = data01; b[ 1] = data02; - // ao1 += lda; + ao1 += lda; b += 2; } else { #ifdef UNIT @@ -139,7 +139,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b[ 0] = data01; b[ 1] = ZERO; #endif - // ao1 += lda; + ao1 += lda; b += 2; } } @@ -161,18 +161,27 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON i = m; if (m > 0) { do { + if (X < posY) { + b += 1; + ao1 += 1; + } else + if (X > posY) { + data01 = *(ao1 + 0); + b[ 0] = data01; + b += 1; + ao1 += lda; + } else { #ifdef UNIT - if (X > posY) { + b[ 0] = ONE; +#else + data01 = *(ao1 + 0); + b[ 0] = data01; #endif - b[ 0] = *(ao1 + 0); -#ifdef UNIT - } else { - b[ 0] = ONE; - } -#endif - b ++; - ao1 += lda; - X ++; + b += 1; + ao1 += lda; + } + + X += 1; i --; } while (i > 0); } diff --git a/kernel/generic/trmm_utcopy_4.c b/kernel/generic/trmm_utcopy_4.c index e5844094e..441f7338b 100644 --- a/kernel/generic/trmm_utcopy_4.c +++ b/kernel/generic/trmm_utcopy_4.c @@ -201,18 +201,18 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON if (X < posY) { if (m & 2) { - /* ao1 += 2; + ao1 += 2; ao2 += 2; ao3 += 2; - ao4 += 2; */ + ao4 += 2; b += 8; } if (m & 1) { - /* ao1 += 1; + ao1 += 1; ao2 += 1; ao3 += 1; - ao4 += 1; */ + ao4 += 1; b += 4; } @@ -238,7 +238,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b[ 7] = data08; ao1 += 2 * lda; - // ao2 += 2 * lda; + ao2 += 2 * lda; b += 8; } @@ -253,7 +253,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b[ 2] = data03; b[ 3] = data04; - // ao1 += lda; + ao1 += lda; b += 4; } @@ -401,7 +401,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON if (i) { if (X < posY) { - // ao1 += 2; + ao1 += 2; b += 2; } else if (X > posY) { @@ -411,7 +411,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b[ 0] = data01; b[ 1] = data02; - // ao1 += lda; + ao1 += lda; b += 2; } else { #ifdef UNIT @@ -443,21 +443,26 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON do { if (X < posY) { + b += 1; ao1 += 1; - } else { -#ifdef UNIT + } else if (X > posY) { -#endif - b[ 0] = *(ao1 + 0); -#ifdef UNIT + data01 = *(ao1 + 0); + b[ 0] = data01; + ao1 += lda; + b += 1; } else { +#ifdef UNIT b[ 0] = ONE; - } +#else + data01 = *(ao1 + 0); + b[ 0] = data01; #endif - ao1 += lda; - } - b ++; - X ++; + ao1 += lda; + b += 1; + } + + X += 1; i --; } while (i > 0); } diff --git a/kernel/generic/trsm_ltcopy_4.c b/kernel/generic/trsm_ltcopy_4.c index 07bb137d4..12043eb33 100644 --- a/kernel/generic/trsm_ltcopy_4.c +++ b/kernel/generic/trsm_ltcopy_4.c @@ -206,7 +206,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT } a1 += 2 * lda; - // a2 += 2 * lda; + a2 += 2 * lda; b += 8; ii += 2; diff --git a/kernel/generic/ztrmm_ltcopy_2.c b/kernel/generic/ztrmm_ltcopy_2.c index 7969f4f3d..457890ceb 100644 --- a/kernel/generic/ztrmm_ltcopy_2.c +++ b/kernel/generic/ztrmm_ltcopy_2.c @@ -139,18 +139,48 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } if (m & 1) { -#ifdef UNIT + + if (X > posY) { + ao1 += 2; + ao2 += 2; + b += 4; + + } else if (X < posY) { -#endif - b[ 0] = *(ao1 + 0); - b[ 1] = *(ao1 + 1); -#ifdef UNIT + data1 = *(ao1 + 0); + data2 = *(ao1 + 1); + data3 = *(ao1 + 2); + data4 = *(ao1 + 3); + + b[ 0] = data1; + b[ 1] = data2; + b[ 2] = data3; + b[ 3] = data4; + + ao1 += lda; + b += 4; } else { +#ifdef UNIT + data3 = *(ao1 + 2); + data4 = *(ao1 + 3); + b[ 0] = ONE; b[ 1] = ZERO; - } + b[ 2] = data3; + b[ 3] = data4; +#else + data1 = *(ao1 + 0); + data2 = *(ao1 + 1); + data3 = *(ao1 + 2); + data4 = *(ao1 + 3); + + b[ 0] = data1; + b[ 1] = data2; + b[ 2] = data3; + b[ 3] = data4; #endif - b += 4; + b += 4; + } } posY += 2; @@ -203,7 +233,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON } while (i > 0); } - // posY += 1; + posY += 1; } return 0; diff --git a/kernel/generic/ztrsm_utcopy_1.c b/kernel/generic/ztrsm_utcopy_1.c index 0e33a7d18..08f85e891 100644 --- a/kernel/generic/ztrsm_utcopy_1.c +++ b/kernel/generic/ztrsm_utcopy_1.c @@ -43,7 +43,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT BLASLONG i, ii, j, jj; - FLOAT data01 = 0.0, data02 = 0.0; + FLOAT data01, data02; FLOAT *a1; lda *= 2; diff --git a/kernel/generic/ztrsm_utcopy_2.c b/kernel/generic/ztrsm_utcopy_2.c index c34d741ee..387bb2532 100644 --- a/kernel/generic/ztrsm_utcopy_2.c +++ b/kernel/generic/ztrsm_utcopy_2.c @@ -43,8 +43,8 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG offset, FLOAT BLASLONG i, ii, j, jj; - FLOAT data01 = 0.0, data02 = 0.0, data03, data04; - FLOAT data05, data06, data07 = 0.0, data08 = 0.0; + FLOAT data01, data02, data03, data04; + FLOAT data05, data06, data07, data08; FLOAT *a1, *a2; lda *= 2; diff --git a/kernel/mips/KERNEL.1004K b/kernel/mips/KERNEL.1004K new file mode 100644 index 000000000..67135356e --- /dev/null +++ b/kernel/mips/KERNEL.1004K @@ -0,0 +1 @@ +include $(KERNELDIR)/KERNEL.P5600 diff --git a/kernel/mips/KERNEL.P5600 b/kernel/mips/KERNEL.P5600 index 9a16704d5..1ab193069 100644 --- a/kernel/mips/KERNEL.P5600 +++ b/kernel/mips/KERNEL.P5600 @@ -38,8 +38,8 @@ ZASUMKERNEL = ../mips/zasum_msa.c else SASUMKERNEL = ../mips/asum.c DASUMKERNEL = ../mips/asum.c -CASUMKERNEL = ../mips/asum.c -ZASUMKERNEL = ../mips/asum.c +CASUMKERNEL = ../mips/zasum.c +ZASUMKERNEL = ../mips/zasum.c endif ifdef HAVE_MSA @@ -253,4 +253,4 @@ ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c -endif \ No newline at end of file +endif diff --git a/kernel/mips/dgemv_n_msa.c b/kernel/mips/dgemv_n_msa.c index 82c3a96cf..380b94d06 100644 --- a/kernel/mips/dgemv_n_msa.c +++ b/kernel/mips/dgemv_n_msa.c @@ -484,10 +484,10 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *A, FLOAT *pa0, *pa1, *pa2, *pa3, *pa4, *pa5, *pa6, *pa7; FLOAT temp, temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; v2f64 v_alpha; - v2f64 x0, x1, x2, x3, y0 = 0.0, y1 = 0.0, y2 = 0.0, y3 = 0.0; + v2f64 x0, x1, x2, x3, y0 = {0,0}, y1 = {0,0}, y2 = {0,0}, y3 = {0,0}; v2f64 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15; v2f64 t16, t17, t18, t19, t20, t21, t22, t23, t24, t25, t26, t27, t28, t29; - v2f64 t30, t31, tp0 = 0.0, tp1 = 0.0, tp2 = 0.0, tp3 = 0.0, tp4 = 0.0, tp5 = 0.0, tp6 = 0.0, tp7 = 0.0; + v2f64 t30, t31, tp0 = {0,0}, tp1 = {0,0}, tp2 = {0,0}, tp3 = {0,0}, tp4 = {0,0}, tp5 = {0,0}, tp6 = {0,0}, tp7 = {0,0}; v_alpha = COPY_DOUBLE_TO_VECTOR(alpha); diff --git a/kernel/mips/dot.c b/kernel/mips/dot.c index de7f7167f..89c9f80f6 100644 --- a/kernel/mips/dot.c +++ b/kernel/mips/dot.c @@ -41,8 +41,11 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) while(i < n) { - - dot += y[iy] * x[ix] ; +#if defined(DSDOT) + dot += (double)y[iy] * (double)x[ix] ; +#else + dot += y[iy] * x[ix]; +#endif ix += inc_x ; iy += inc_y ; i++ ; diff --git a/kernel/mips/sgemv_n_msa.c b/kernel/mips/sgemv_n_msa.c index e1ecb5473..66e3adebf 100644 --- a/kernel/mips/sgemv_n_msa.c +++ b/kernel/mips/sgemv_n_msa.c @@ -423,9 +423,9 @@ int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT alpha, FLOAT *A, FLOAT *y_org = y; FLOAT *pa0, *pa1, *pa2, *pa3, *pa4, *pa5, *pa6, *pa7; FLOAT temp, temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; - v4f32 v_alpha, x0, x1, y0 = 0.0, y1 = 0.0; + v4f32 v_alpha, x0, x1, y0 = {0,0,0,0}, y1 = {0,0,0,0}; v4f32 t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15; - v4f32 tp0 = 0.0, tp1 = 0.0, tp2 = 0.0, tp3 = 0.0, tp4 = 0.0, tp5 = 0.0, tp6 = 0.0, tp7 = 0.0; + v4f32 tp0 = {0,0,0,0}, tp1 = {0,0,0,0}, tp2 = {0,0,0,0}, tp3 = {0,0,0,0}, tp4 = {0,0,0,0}, tp5 = {0,0,0,0}, tp6 = {0,0,0,0}, tp7 = {0,0,0,0}; v_alpha = COPY_FLOAT_TO_VECTOR(alpha); diff --git a/kernel/mips64/KERNEL b/kernel/mips64/KERNEL index 6afb2cf13..61da7445f 100644 --- a/kernel/mips64/KERNEL +++ b/kernel/mips64/KERNEL @@ -1,3 +1,13 @@ +CAXPYKERNEL = ../mips/zaxpy.c +ZAXPYKERNEL = ../mips/zaxpy.c +SROTKERNEL = ../mips/rot.c +DROTKERNEL = ../mips/rot.c +CROTKERNEL = ../mips/zrot.c +ZROTKERNEL = ../mips/zrot.c +CSWAPKERNEL = ../mips/zswap.c +ZSWAPKERNEL = ../mips/zswap.c + + ifndef SNRM2KERNEL SNRM2KERNEL = snrm2.S endif diff --git a/kernel/mips64/KERNEL.LOONGSON3A b/kernel/mips64/KERNEL.LOONGSON3A index 2d03ad7fa..0298faaad 100644 --- a/kernel/mips64/KERNEL.LOONGSON3A +++ b/kernel/mips64/KERNEL.LOONGSON3A @@ -63,6 +63,7 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c +DSDOTKERNEL = ../mips/dot.c diff --git a/kernel/mips64/axpy_loongson3a.S b/kernel/mips64/axpy_loongson3a.S index 5904bc580..765e5ebbb 100644 --- a/kernel/mips64/axpy_loongson3a.S +++ b/kernel/mips64/axpy_loongson3a.S @@ -270,6 +270,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .align 5 .L20: + beqz INCY, .L27 dsra I, N, 3 move YY, Y @@ -450,5 +451,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. j $31 NOP + .align 3 +.L27: + LD b1, 0 * SIZE(Y) +.L28: + daddiu N, N, -1 + LD a1, 0 * SIZE(X) + daddu X, X, INCX + bgtz N, .L28 + MADD b1, b1, ALPHA, a1 + + j .L999 + ST b1, 0 * SIZE(Y) + EPILOGUE diff --git a/kernel/mips64/daxpy_loongson3a_simd.S b/kernel/mips64/daxpy_loongson3a_simd.S index f54008bc2..23225770a 100644 --- a/kernel/mips64/daxpy_loongson3a_simd.S +++ b/kernel/mips64/daxpy_loongson3a_simd.S @@ -562,6 +562,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //INCX!=1 or INCY != 1 .L20: + beq INCY, $0, .L27 dsra I, N, 3 move YY, Y @@ -754,5 +755,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. j $31 NOP + .align 3 +.L27: + LD b1, 0 * SIZE(Y) +.L28: + daddiu N, N, -1 + LD a1, 0 * SIZE(X) + daddu X, X, INCX + bgtz N, .L28 + MADD b1, b1, ALPHA, a1 + + j .L999 + ST b1, 0 * SIZE(Y) + EPILOGUE diff --git a/kernel/mips64/dot.S b/kernel/mips64/dot.S index cb6fbe99c..a645495f4 100644 --- a/kernel/mips64/dot.S +++ b/kernel/mips64/dot.S @@ -103,35 +103,83 @@ .align 3 .L12: +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 +#endif LD a1, 4 * SIZE(X) LD b1, 4 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a2, a2 +cvt.d.s b2, b2 +madd.d s2, s2, a2, b2 +#else MADD s2, s2, a2, b2 +#endif LD a2, 5 * SIZE(X) LD b2, 5 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a3, a3 +cvt.d.s b3, b3 +madd.d s1, s1, a3, b3 +#else MADD s1, s1, a3, b3 +#endif LD a3, 6 * SIZE(X) LD b3, 6 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a4, a4 +cvt.d.s b4, b4 +madd.d s2, s2, a4, b4 +#else MADD s2, s2, a4, b4 +#endif LD a4, 7 * SIZE(X) LD b4, 7 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 +#endif LD a1, 8 * SIZE(X) LD b1, 8 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a2, a2 +cvt.d.s b2, b2 +madd.d s2, s2, a2, b2 +#else MADD s2, s2, a2, b2 +#endif LD a2, 9 * SIZE(X) LD b2, 9 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a3, a3 +cvt.d.s b3, b3 +madd.d s1, s1, a3, b3 +#else MADD s1, s1, a3, b3 +#endif LD a3, 10 * SIZE(X) LD b3, 10 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a4, a4 +cvt.d.s b4, b4 +madd.d s2, s2, a4, b4 +#else MADD s2, s2, a4, b4 +#endif LD a4, 11 * SIZE(X) LD b4, 11 * SIZE(Y) @@ -143,29 +191,77 @@ .align 3 .L13: +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 +#endif LD a1, 4 * SIZE(X) LD b1, 4 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a2, a2 +cvt.d.s b2, b2 +madd.d s2, s2, a2, b2 +#else MADD s2, s2, a2, b2 +#endif LD a2, 5 * SIZE(X) LD b2, 5 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a3, a3 +cvt.d.s b3, b3 +madd.d s1, s1, a3, b3 +#else MADD s1, s1, a3, b3 +#endif LD a3, 6 * SIZE(X) LD b3, 6 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a4, a4 +cvt.d.s b4, b4 +madd.d s2, s2, a4, b4 +#else MADD s2, s2, a4, b4 +#endif LD a4, 7 * SIZE(X) LD b4, 7 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 +#endif daddiu X, X, 8 * SIZE +#ifdef DSDOT +cvt.d.s a2, a2 +cvt.d.s b2, b2 +madd.d s2, s2, a2, b2 +#else MADD s2, s2, a2, b2 +#endif daddiu Y, Y, 8 * SIZE +#ifdef DSDOT +cvt.d.s a3, a3 +cvt.d.s b3, b3 +madd.d s1, s1, a3, b3 +#else MADD s1, s1, a3, b3 +#endif +#ifdef DSDOT +cvt.d.s a4, a4 +cvt.d.s b4, b4 +madd.d s2, s2, a4, b4 +#else MADD s2, s2, a4, b4 +#endif .align 3 .L15: @@ -179,8 +275,13 @@ LD a1, 0 * SIZE(X) LD b1, 0 * SIZE(Y) +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 - +#endif daddiu I, I, -1 daddiu X, X, SIZE @@ -225,50 +326,85 @@ LD b1, 0 * SIZE(Y) dadd Y, Y, INCY +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 - +#endif LD a1, 0 * SIZE(X) dadd X, X, INCX LD b1, 0 * SIZE(Y) dadd Y, Y, INCY +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s2, s2, a1, b1 +#else MADD s2, s2, a1, b1 - +#endif LD a1, 0 * SIZE(X) dadd X, X, INCX LD b1, 0 * SIZE(Y) dadd Y, Y, INCY +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 - +#endif LD a1, 0 * SIZE(X) dadd X, X, INCX LD b1, 0 * SIZE(Y) dadd Y, Y, INCY +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s2, s2, a1, b1 +#else MADD s2, s2, a1, b1 - +#endif LD a1, 0 * SIZE(X) dadd X, X, INCX LD b1, 0 * SIZE(Y) dadd Y, Y, INCY +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 - +#endif LD a1, 0 * SIZE(X) dadd X, X, INCX LD b1, 0 * SIZE(Y) dadd Y, Y, INCY +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s2, s2, a1, b1 +#else MADD s2, s2, a1, b1 - +#endif LD a1, 0 * SIZE(X) dadd X, X, INCX LD b1, 0 * SIZE(Y) dadd Y, Y, INCY +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 - +#endif LD a1, 0 * SIZE(X) dadd X, X, INCX LD b1, 0 * SIZE(Y) @@ -277,7 +413,13 @@ daddiu I, I, -1 bgtz I, .L23 +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s2, s2, a1, b1 +#else MADD s2, s2, a1, b1 +#endif .align 3 .L25: @@ -296,13 +438,20 @@ daddiu I, I, -1 bgtz I, .L26 +#ifdef DSDOT +cvt.d.s a1, a1 +cvt.d.s b1, b1 +madd.d s1, s1, a1, b1 +#else MADD s1, s1, a1, b1 +#endif .align 3 .L999: - ADD s1, s1, s2 #ifdef DSDOT - cvt.d.s s1, s1 + add.d s1, s1, s2 +#else + ADD s1, s1, s2 #endif j $31 NOP diff --git a/kernel/mips64/sgemm_kernel_8x4_ps.S b/kernel/mips64/sgemm_kernel_8x4_ps.S index 37b20a880..82703ff5d 100644 --- a/kernel/mips64/sgemm_kernel_8x4_ps.S +++ b/kernel/mips64/sgemm_kernel_8x4_ps.S @@ -146,11 +146,11 @@ sd $21, 40($sp) sd $22, 48($sp) - ST $f24, 56($sp) - ST $f25, 64($sp) - ST $f26, 72($sp) - ST $f27, 80($sp) - ST $f28, 88($sp) + sdc1 $f24, 56($sp) + sdc1 $f25, 64($sp) + sdc1 $f26, 72($sp) + sdc1 $f27, 80($sp) + sdc1 $f28, 88($sp) #if defined(TRMMKERNEL) sd $23, 96($sp) @@ -161,10 +161,10 @@ #endif #ifndef __64BIT__ - ST $f20,120($sp) - ST $f21,128($sp) - ST $f22,136($sp) - ST $f23,144($sp) + sdc1 $f20,120($sp) + sdc1 $f21,128($sp) + sdc1 $f22,136($sp) + sdc1 $f23,144($sp) #endif .align 4 @@ -7766,11 +7766,11 @@ ld $21, 40($sp) ld $22, 48($sp) - LD $f24, 56($sp) - LD $f25, 64($sp) - LD $f26, 72($sp) - LD $f27, 80($sp) - LD $f28, 88($sp) + ldc1 $f24, 56($sp) + ldc1 $f25, 64($sp) + ldc1 $f26, 72($sp) + ldc1 $f27, 80($sp) + ldc1 $f28, 88($sp) #if defined(TRMMKERNEL) ld $23, 96($sp) @@ -7779,10 +7779,10 @@ #endif #ifndef __64BIT__ - LD $f20,120($sp) - LD $f21,128($sp) - LD $f22,136($sp) - LD $f23,144($sp) + ldc1 $f20,120($sp) + ldc1 $f21,128($sp) + ldc1 $f22,136($sp) + ldc1 $f23,144($sp) #endif daddiu $sp,$sp,STACKSIZE diff --git a/kernel/power/KERNEL.POWER6 b/kernel/power/KERNEL.POWER6 index 344b205fe..e6d2c9a51 100644 --- a/kernel/power/KERNEL.POWER6 +++ b/kernel/power/KERNEL.POWER6 @@ -54,3 +54,6 @@ ZTRSMKERNEL_LN = ztrsm_kernel_power6_LN.S ZTRSMKERNEL_LT = ztrsm_kernel_power6_LT.S ZTRSMKERNEL_RN = ztrsm_kernel_power6_LT.S ZTRSMKERNEL_RT = ztrsm_kernel_power6_RT.S + +CROTKERNEL = ../arm/zrot.c +ZROTKERNEL = ../arm/zrot.c diff --git a/kernel/setparam-ref.c b/kernel/setparam-ref.c index b6c5b54de..6d4028b0b 100644 --- a/kernel/setparam-ref.c +++ b/kernel/setparam-ref.c @@ -294,6 +294,8 @@ gotoblas_t TABLE_NAME = { chemm_outcopyTS, chemm_oltcopyTS, 0, 0, 0, + +#if defined(USE_GEMM3M) #ifdef CGEMM3M_DEFAULT_UNROLL_M CGEMM3M_DEFAULT_UNROLL_M, CGEMM3M_DEFAULT_UNROLL_N, MAX(CGEMM3M_DEFAULT_UNROLL_M, CGEMM3M_DEFAULT_UNROLL_N), #else @@ -324,6 +326,33 @@ gotoblas_t TABLE_NAME = { chemm3m_oucopybTS, chemm3m_olcopybTS, chemm3m_oucopyrTS, chemm3m_olcopyrTS, chemm3m_oucopyiTS, chemm3m_olcopyiTS, +#else + 0, 0, 0, + + NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, +#endif #ifndef NO_LAPACK cneg_tcopyTS, claswp_ncopyTS, @@ -400,6 +429,7 @@ gotoblas_t TABLE_NAME = { zhemm_outcopyTS, zhemm_oltcopyTS, 0, 0, 0, +#if defined(USE_GEMM3M) #ifdef ZGEMM3M_DEFAULT_UNROLL_M ZGEMM3M_DEFAULT_UNROLL_M, ZGEMM3M_DEFAULT_UNROLL_N, MAX(ZGEMM3M_DEFAULT_UNROLL_M, ZGEMM3M_DEFAULT_UNROLL_N), #else @@ -430,6 +460,33 @@ gotoblas_t TABLE_NAME = { zhemm3m_oucopybTS, zhemm3m_olcopybTS, zhemm3m_oucopyrTS, zhemm3m_olcopyrTS, zhemm3m_oucopyiTS, zhemm3m_olcopyiTS, +#else + 0, 0, 0, + + NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, +#endif #ifndef NO_LAPACK zneg_tcopyTS, zlaswp_ncopyTS, @@ -503,6 +560,7 @@ gotoblas_t TABLE_NAME = { xhemm_outcopyTS, xhemm_oltcopyTS, 0, 0, 0, +#if defined(USE_GEMM3M) QGEMM_DEFAULT_UNROLL_M, QGEMM_DEFAULT_UNROLL_N, MAX(QGEMM_DEFAULT_UNROLL_M, QGEMM_DEFAULT_UNROLL_N), xgemm3m_kernelTS, @@ -528,6 +586,33 @@ gotoblas_t TABLE_NAME = { xhemm3m_oucopybTS, xhemm3m_olcopybTS, xhemm3m_oucopyrTS, xhemm3m_olcopyrTS, xhemm3m_oucopyiTS, xhemm3m_olcopyiTS, +#else + 0, 0, 0, + + NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, + + NULL, NULL, + NULL, NULL, + NULL, NULL, +#endif #ifndef NO_LAPACK xneg_tcopyTS, xlaswp_ncopyTS, @@ -561,6 +646,78 @@ gotoblas_t TABLE_NAME = { }; +#if defined(ARCH_ARM64) +static void init_parameter(void) { + TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; + TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; + TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; + TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; + + TABLE_NAME.sgemm_q = SGEMM_DEFAULT_Q; + TABLE_NAME.dgemm_q = DGEMM_DEFAULT_Q; + TABLE_NAME.cgemm_q = CGEMM_DEFAULT_Q; + TABLE_NAME.zgemm_q = ZGEMM_DEFAULT_Q; + + TABLE_NAME.sgemm_r = SGEMM_DEFAULT_R; + TABLE_NAME.dgemm_r = DGEMM_DEFAULT_R; + TABLE_NAME.cgemm_r = CGEMM_DEFAULT_R; + TABLE_NAME.zgemm_r = ZGEMM_DEFAULT_R; + +#ifdef EXPRECISION + TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; + TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; + TABLE_NAME.qgemm_q = QGEMM_DEFAULT_Q; + TABLE_NAME.xgemm_q = XGEMM_DEFAULT_Q; + TABLE_NAME.qgemm_r = QGEMM_DEFAULT_R; + TABLE_NAME.xgemm_r = XGEMM_DEFAULT_R; +#endif + +#if defined(USE_GEMM3M) +#ifdef CGEMM3M_DEFAULT_P + TABLE_NAME.cgemm3m_p = CGEMM3M_DEFAULT_P; +#else + TABLE_NAME.cgemm3m_p = TABLE_NAME.sgemm_p; +#endif + +#ifdef ZGEMM3M_DEFAULT_P + TABLE_NAME.zgemm3m_p = ZGEMM3M_DEFAULT_P; +#else + TABLE_NAME.zgemm3m_p = TABLE_NAME.dgemm_p; +#endif + +#ifdef CGEMM3M_DEFAULT_Q + TABLE_NAME.cgemm3m_q = CGEMM3M_DEFAULT_Q; +#else + TABLE_NAME.cgemm3m_q = TABLE_NAME.sgemm_q; +#endif + +#ifdef ZGEMM3M_DEFAULT_Q + TABLE_NAME.zgemm3m_q = ZGEMM3M_DEFAULT_Q; +#else + TABLE_NAME.zgemm3m_q = TABLE_NAME.dgemm_q; +#endif + +#ifdef CGEMM3M_DEFAULT_R + TABLE_NAME.cgemm3m_r = CGEMM3M_DEFAULT_R; +#else + TABLE_NAME.cgemm3m_r = TABLE_NAME.sgemm_r; +#endif + +#ifdef ZGEMM3M_DEFAULT_R + TABLE_NAME.zgemm3m_r = ZGEMM3M_DEFAULT_R; +#else + TABLE_NAME.zgemm3m_r = TABLE_NAME.dgemm_r; +#endif + +#ifdef EXPRECISION + TABLE_NAME.xgemm3m_p = TABLE_NAME.qgemm_p; + TABLE_NAME.xgemm3m_q = TABLE_NAME.qgemm_q; + TABLE_NAME.xgemm3m_r = TABLE_NAME.qgemm_r; +#endif +#endif + +} +#else // defined(ARCH_ARM64) #ifdef ARCH_X86 static int get_l2_size_old(void){ int i, eax, ebx, ecx, edx, cpuid_level; @@ -647,7 +804,9 @@ static int get_l2_size_old(void){ return 6144; } } - return 0; +// return 0; +fprintf (stderr,"OpenBLAS WARNING - could not determine the L2 cache size on this system, assuming 256k\n"); +return 256; } #endif @@ -660,6 +819,10 @@ static __inline__ int get_l2_size(void){ l2 = BITMASK(ecx, 16, 0xffff); #ifndef ARCH_X86 + if (l2 <= 0) { + fprintf (stderr,"OpenBLAS WARNING - could not determine the L2 cache size on this system, assuming 256k\n"); + return 256; + } return l2; #else @@ -871,6 +1034,22 @@ static void init_parameter(void) { #endif #endif +#ifdef SKYLAKEX + +#ifdef DEBUG + fprintf(stderr, "SkylakeX\n"); +#endif + + TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P; + TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P; + TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P; + TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P; +#ifdef EXPRECISION + TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P; + TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P; +#endif +#endif + #ifdef OPTERON @@ -1124,3 +1303,4 @@ static void init_parameter(void) { } +#endif //defined(ARCH_ARM64) diff --git a/kernel/x86/KERNEL b/kernel/x86/KERNEL index 39be2ef80..83b51db13 100644 --- a/kernel/x86/KERNEL +++ b/kernel/x86/KERNEL @@ -169,7 +169,7 @@ ifndef ZDOTKERNEL ZDOTKERNEL = ../arm/zdot.c endif -DSDOTKERNEL = ../arm/dot.c +DSDOTKERNEL = ../generic/dot.c # Bug in znrm2 assembler kernel ifndef ZNRM2KERNEL diff --git a/kernel/x86/swap.S b/kernel/x86/swap.S index 54b00b33e..e30c27898 100644 --- a/kernel/x86/swap.S +++ b/kernel/x86/swap.S @@ -138,6 +138,14 @@ /* INCX != 1 or INCY != 1 */ .L14: + cmpl $0, %ebx + jne .L141 + cmpl $0, %ecx + jne .L141 +/* INCX == 0 and INCY == 0 */ + jmp .L27 + +.L141: movl %edx, %eax sarl $2, %eax jle .L28 diff --git a/kernel/x86/trsm_kernel_LN_2x4_penryn.S b/kernel/x86/trsm_kernel_LN_2x4_penryn.S index 0b475afa2..34653d400 100644 --- a/kernel/x86/trsm_kernel_LN_2x4_penryn.S +++ b/kernel/x86/trsm_kernel_LN_2x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_LN_4x4_penryn.S b/kernel/x86/trsm_kernel_LN_4x4_penryn.S index e98854f34..492f34344 100644 --- a/kernel/x86/trsm_kernel_LN_4x4_penryn.S +++ b/kernel/x86/trsm_kernel_LN_4x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_LT_2x4_penryn.S b/kernel/x86/trsm_kernel_LT_2x4_penryn.S index 086852cfc..6840c54ad 100644 --- a/kernel/x86/trsm_kernel_LT_2x4_penryn.S +++ b/kernel/x86/trsm_kernel_LT_2x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_LT_4x4_penryn.S b/kernel/x86/trsm_kernel_LT_4x4_penryn.S index 2dd8ad08b..e2f731fca 100644 --- a/kernel/x86/trsm_kernel_LT_4x4_penryn.S +++ b/kernel/x86/trsm_kernel_LT_4x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_RT_2x4_penryn.S b/kernel/x86/trsm_kernel_RT_2x4_penryn.S index 154276f6a..11825429e 100644 --- a/kernel/x86/trsm_kernel_RT_2x4_penryn.S +++ b/kernel/x86/trsm_kernel_RT_2x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/trsm_kernel_RT_4x4_penryn.S b/kernel/x86/trsm_kernel_RT_4x4_penryn.S index acdcd6e22..4c054f399 100644 --- a/kernel/x86/trsm_kernel_RT_4x4_penryn.S +++ b/kernel/x86/trsm_kernel_RT_4x4_penryn.S @@ -62,7 +62,7 @@ #define PREFETCHSIZE (8 * 21 + 4) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHSIZE (8 * 21 + 4) #endif diff --git a/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S b/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S index da561b583..e67496736 100644 --- a/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_LN_2x2_penryn.S @@ -61,7 +61,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S b/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S index a11b0286a..498057697 100644 --- a/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_LT_1x2_penryn.S @@ -63,7 +63,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S b/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S index 787ab5982..f3072983d 100644 --- a/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_LT_2x2_penryn.S @@ -61,7 +61,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S b/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S index 9a3b0cbd7..879ae9c38 100644 --- a/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_RT_1x2_penryn.S @@ -63,7 +63,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S b/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S index bd7a78b5a..6c308197b 100644 --- a/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S +++ b/kernel/x86/ztrsm_kernel_RT_2x2_penryn.S @@ -61,7 +61,7 @@ #define PREFETCHSIZE 84 #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined (SKYLAKEX) #define PREFETCH prefetcht1 #define PREFETCHSIZE 84 #endif diff --git a/kernel/x86_64/KERNEL.HASWELL b/kernel/x86_64/KERNEL.HASWELL index 848de38df..f98728a41 100644 --- a/kernel/x86_64/KERNEL.HASWELL +++ b/kernel/x86_64/KERNEL.HASWELL @@ -33,9 +33,10 @@ ZAXPYKERNEL = zaxpy.c STRMMKERNEL = sgemm_kernel_16x4_haswell.S SGEMMKERNEL = sgemm_kernel_16x4_haswell.S +SGEMM_BETA = sgemm_beta_skylakex.c SGEMMINCOPY = ../generic/gemm_ncopy_16.c SGEMMITCOPY = ../generic/gemm_tcopy_16.c -SGEMMONCOPY = ../generic/gemm_ncopy_4.c +SGEMMONCOPY = sgemm_ncopy_4_skylakex.c SGEMMOTCOPY = ../generic/gemm_tcopy_4.c SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX) SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX) @@ -44,9 +45,10 @@ SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX) DTRMMKERNEL = dtrmm_kernel_4x8_haswell.c DGEMMKERNEL = dgemm_kernel_4x8_haswell.S +DGEMM_BETA = dgemm_beta_skylakex.c DGEMMINCOPY = ../generic/gemm_ncopy_4.c DGEMMITCOPY = ../generic/gemm_tcopy_4.c -DGEMMONCOPY = ../generic/gemm_ncopy_8.c +DGEMMONCOPY = dgemm_ncopy_8_skylakex.c DGEMMOTCOPY = ../generic/gemm_tcopy_8.c DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX) DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX) diff --git a/kernel/x86_64/KERNEL.SKYLAKEX b/kernel/x86_64/KERNEL.SKYLAKEX new file mode 100644 index 000000000..acc6356d6 --- /dev/null +++ b/kernel/x86_64/KERNEL.SKYLAKEX @@ -0,0 +1,18 @@ +include $(KERNELDIR)/KERNEL.HASWELL + +SGEMMKERNEL = sgemm_kernel_16x4_skylakex.c + +SGEMMINCOPY = ../generic/gemm_ncopy_16.c +SGEMMITCOPY = sgemm_tcopy_16_skylakex.c +SGEMMONCOPY = sgemm_ncopy_4_skylakex.c +SGEMMOTCOPY = ../generic/gemm_tcopy_4.c + +DGEMMKERNEL = dgemm_kernel_4x8_skylakex.c + +DGEMMINCOPY = dgemm_ncopy_8_skylakex.c +DGEMMITCOPY = dgemm_tcopy_8_skylakex.c +DGEMMONCOPY = dgemm_ncopy_8_skylakex.c +DGEMMOTCOPY = dgemm_tcopy_8_skylakex.c + +SGEMM_BETA = sgemm_beta_skylakex.c +DGEMM_BETA = dgemm_beta_skylakex.c diff --git a/kernel/x86_64/caxpy.c b/kernel/x86_64/caxpy.c index b1ec19bd3..586d05ac2 100644 --- a/kernel/x86_64/caxpy.c +++ b/kernel/x86_64/caxpy.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "caxpy_microk_steamroller-2.c" #elif defined(BULLDOZER) #include "caxpy_microk_bulldozer-2.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined(SKYLAKEX) #include "caxpy_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "caxpy_microk_sandy-2.c" diff --git a/kernel/x86_64/caxpy_microk_haswell-2.c b/kernel/x86_64/caxpy_microk_haswell-2.c index 7a9fc1b95..00e2e6a42 100644 --- a/kernel/x86_64/caxpy_microk_haswell-2.c +++ b/kernel/x86_64/caxpy_microk_haswell-2.c @@ -50,11 +50,11 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vmulps (%5), %%ymm0 , %%ymm0 \n\t" #endif - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x @@ -70,7 +70,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vpermilps $0xb1 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part "vfmadd213ps (%3,%0,4), %%ymm0 , %%ymm5 \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "vfmadd213ps 32(%3,%0,4), %%ymm0 , %%ymm7 \n\t" "vfmadd213ps 64(%3,%0,4), %%ymm0 , %%ymm9 \n\t" "vfmadd213ps 96(%3,%0,4), %%ymm0 , %%ymm11 \n\t" @@ -96,7 +96,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vfmadd231ps %%ymm1 , %%ymm10, %%ymm15 \n\t" "vmovups %%ymm5 , (%3,%0,4) \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups %%ymm7 , 32(%3,%0,4) \n\t" "vmovups %%ymm9 , 64(%3,%0,4) \n\t" "vmovups %%ymm11, 96(%3,%0,4) \n\t" diff --git a/kernel/x86_64/caxpy_microk_sandy-2.c b/kernel/x86_64/caxpy_microk_sandy-2.c index dbfce208f..a798fd977 100644 --- a/kernel/x86_64/caxpy_microk_sandy-2.c +++ b/kernel/x86_64/caxpy_microk_sandy-2.c @@ -50,11 +50,11 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vmulps (%5), %%ymm0 , %%ymm0 \n\t" #endif - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,4), %%ymm5 \n\t" // 4 complex values from x - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups 32(%2,%0,4), %%ymm7 \n\t" // 4 complex values from x "vmovups 64(%2,%0,4), %%ymm9 \n\t" // 4 complex values from x "vmovups 96(%2,%0,4), %%ymm11 \n\t" // 4 complex values from x @@ -85,7 +85,7 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vaddps %%ymm10, %%ymm11, %%ymm11 \n\t" "vmovups %%ymm5 , (%3,%0,4) \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups %%ymm7 , 32(%3,%0,4) \n\t" "vmovups %%ymm9 , 64(%3,%0,4) \n\t" "vmovups %%ymm11, 96(%3,%0,4) \n\t" diff --git a/kernel/x86_64/cdot.c b/kernel/x86_64/cdot.c index 5f01f7eeb..93fca0a0d 100644 --- a/kernel/x86_64/cdot.c +++ b/kernel/x86_64/cdot.c @@ -34,7 +34,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "cdot_microk_bulldozer-2.c" #elif defined(STEAMROLLER) || defined(PILEDRIVER) || defined(EXCAVATOR) #include "cdot_microk_steamroller-2.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "cdot_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "cdot_microk_sandy-2.c" diff --git a/kernel/x86_64/cdot_microk_haswell-2.c b/kernel/x86_64/cdot_microk_haswell-2.c index fc76b138a..fe195a63b 100644 --- a/kernel/x86_64/cdot_microk_haswell-2.c +++ b/kernel/x86_64/cdot_microk_haswell-2.c @@ -46,7 +46,7 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,4), %%ymm8 \n\t" // 2 * x "vmovups 32(%2,%0,4), %%ymm9 \n\t" // 2 * x diff --git a/kernel/x86_64/cdot_microk_sandy-2.c b/kernel/x86_64/cdot_microk_sandy-2.c index 22cd79e2e..01816917d 100644 --- a/kernel/x86_64/cdot_microk_sandy-2.c +++ b/kernel/x86_64/cdot_microk_sandy-2.c @@ -46,7 +46,7 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,4), %%ymm8 \n\t" // 2 * x "vmovups 32(%2,%0,4), %%ymm9 \n\t" // 2 * x diff --git a/kernel/x86_64/cgemv_n_4.c b/kernel/x86_64/cgemv_n_4.c index 770c955b2..d81766cd4 100644 --- a/kernel/x86_64/cgemv_n_4.c +++ b/kernel/x86_64/cgemv_n_4.c @@ -29,7 +29,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include #include "common.h" -#if defined(HASWELL) || defined(ZEN) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "cgemv_n_microk_haswell-4.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "cgemv_n_microk_bulldozer-4.c" diff --git a/kernel/x86_64/cgemv_t_4.c b/kernel/x86_64/cgemv_t_4.c index d75e58fdd..6bdea6787 100644 --- a/kernel/x86_64/cgemv_t_4.c +++ b/kernel/x86_64/cgemv_t_4.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "cgemv_t_microk_haswell-4.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "cgemv_t_microk_bulldozer-4.c" diff --git a/kernel/x86_64/cscal.c b/kernel/x86_64/cscal.c index 9b9179da0..72af99809 100644 --- a/kernel/x86_64/cscal.c +++ b/kernel/x86_64/cscal.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "cscal_microk_haswell-2.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) #include "cscal_microk_bulldozer-2.c" diff --git a/kernel/x86_64/cscal_microk_haswell-2.c b/kernel/x86_64/cscal_microk_haswell-2.c index 48e339501..0a4eb683c 100644 --- a/kernel/x86_64/cscal_microk_haswell-2.c +++ b/kernel/x86_64/cscal_microk_haswell-2.c @@ -54,7 +54,7 @@ static void cscal_kernel_16( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $16, %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" @@ -156,7 +156,7 @@ static void cscal_kernel_16_zero_r( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $16, %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" @@ -245,7 +245,7 @@ static void cscal_kernel_16_zero_i( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $16, %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" @@ -312,7 +312,7 @@ static void cscal_kernel_16_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) "addq $128, %1 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" diff --git a/kernel/x86_64/daxpy.c b/kernel/x86_64/daxpy.c index 20075b815..cde5bdaa6 100644 --- a/kernel/x86_64/daxpy.c +++ b/kernel/x86_64/daxpy.c @@ -33,17 +33,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "daxpy_microk_nehalem-2.c" #elif defined(BULLDOZER) #include "daxpy_microk_bulldozer-2.c" -/* -these appear to be broken, see issue 1332 #elif defined(STEAMROLLER) || defined(EXCAVATOR) #include "daxpy_microk_steamroller-2.c" #elif defined(PILEDRIVER) #include "daxpy_microk_piledriver-2.c" #elif defined(HASWELL) || defined(ZEN) #include "daxpy_microk_haswell-2.c" -*/ -#elif defined(HASWELL) || defined(ZEN) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) -#include "daxpy_microk_sandy-2.c" +#elif defined (SKYLAKEX) +#include "daxpy_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "daxpy_microk_sandy-2.c" #endif diff --git a/kernel/x86_64/daxpy_microk_haswell-2.c b/kernel/x86_64/daxpy_microk_haswell-2.c index db117a8ba..bbe8b9550 100644 --- a/kernel/x86_64/daxpy_microk_haswell-2.c +++ b/kernel/x86_64/daxpy_microk_haswell-2.c @@ -38,7 +38,7 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) ( "vbroadcastsd (%4), %%ymm0 \n\t" // alpha - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,8), %%ymm12 \n\t" // 4 * y diff --git a/kernel/x86_64/daxpy_microk_nehalem-2.c b/kernel/x86_64/daxpy_microk_nehalem-2.c index 38472c520..943d893af 100644 --- a/kernel/x86_64/daxpy_microk_nehalem-2.c +++ b/kernel/x86_64/daxpy_microk_nehalem-2.c @@ -39,7 +39,7 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "movsd (%4), %%xmm0 \n\t" // alpha "shufpd $0, %%xmm0, %%xmm0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" // "prefetcht0 192(%2,%0,8) \n\t" // "prefetcht0 192(%3,%0,8) \n\t" diff --git a/kernel/x86_64/daxpy_microk_sandy-2.c b/kernel/x86_64/daxpy_microk_sandy-2.c index 522e084dc..85e038cef 100644 --- a/kernel/x86_64/daxpy_microk_sandy-2.c +++ b/kernel/x86_64/daxpy_microk_sandy-2.c @@ -50,7 +50,7 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "subq $16, %1 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmulpd %%ymm4, %%ymm0, %%ymm4 \n\t" diff --git a/kernel/x86_64/daxpy_microk_skylakex-2.c b/kernel/x86_64/daxpy_microk_skylakex-2.c new file mode 100644 index 000000000..e785a39f1 --- /dev/null +++ b/kernel/x86_64/daxpy_microk_skylakex-2.c @@ -0,0 +1,71 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) + +#include + +#define HAVE_KERNEL_8 1 + +static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + BLASLONG i = 0; + + __m256d __alpha; + + __alpha = _mm256_broadcastsd_pd(_mm_load_sd(alpha)); + +#ifdef __AVX512CD__ + BLASLONG n32; + __m512d __alpha5; + __alpha5 = _mm512_broadcastsd_pd(_mm_load_sd(alpha)); + + n32 = n & ~31; + + for (; i < n32; i+= 32) { + _mm512_storeu_pd(&y[i + 0], _mm512_loadu_pd(&y[i + 0]) + __alpha5 * _mm512_loadu_pd(&x[i + 0])); + _mm512_storeu_pd(&y[i + 8], _mm512_loadu_pd(&y[i + 8]) + __alpha5 * _mm512_loadu_pd(&x[i + 8])); + _mm512_storeu_pd(&y[i + 16], _mm512_loadu_pd(&y[i + 16]) + __alpha5 * _mm512_loadu_pd(&x[i + 16])); + _mm512_storeu_pd(&y[i + 24], _mm512_loadu_pd(&y[i + 24]) + __alpha5 * _mm512_loadu_pd(&x[i + 24])); + } + +#endif + + for (; i < n; i+= 16) { + _mm256_storeu_pd(&y[i + 0], _mm256_loadu_pd(&y[i + 0]) + __alpha * _mm256_loadu_pd(&x[i + 0])); + _mm256_storeu_pd(&y[i + 4], _mm256_loadu_pd(&y[i + 4]) + __alpha * _mm256_loadu_pd(&x[i + 4])); + _mm256_storeu_pd(&y[i + 8], _mm256_loadu_pd(&y[i + 8]) + __alpha * _mm256_loadu_pd(&x[i + 8])); + _mm256_storeu_pd(&y[i + 12], _mm256_loadu_pd(&y[i + 12]) + __alpha * _mm256_loadu_pd(&x[i + 12])); + } +} +#else +#include "daxpy_microk_haswell-2.c" +#endif + + diff --git a/kernel/x86_64/ddot.c b/kernel/x86_64/ddot.c index 0a20564cf..969357614 100644 --- a/kernel/x86_64/ddot.c +++ b/kernel/x86_64/ddot.c @@ -29,20 +29,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(BULLDOZER) +#if defined(BULLDOZER) #include "ddot_microk_bulldozer-2.c" #elif defined(STEAMROLLER) || defined(EXCAVATOR) #include "ddot_microk_steamroller-2.c" #elif defined(PILEDRIVER) #include "ddot_microk_piledriver-2.c" -#elif defined(NEHALEM) +#elif defined(NEHALEM) #include "ddot_microk_nehalem-2.c" #elif defined(HASWELL) || defined(ZEN) #include "ddot_microk_haswell-2.c" +#elif defined (SKYLAKEX) +#include "ddot_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "ddot_microk_sandy-2.c" #endif +#if !defined(DSDOT) +#define RETURN_TYPE FLOAT +#else +#define RETURN_TYPE double +#endif + #ifndef HAVE_KERNEL_8 @@ -71,7 +79,7 @@ static void ddot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *d) #endif -FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +static FLOAT dot_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) { BLASLONG i=0; BLASLONG ix=0,iy=0; @@ -104,7 +112,7 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) FLOAT temp1 = 0.0; FLOAT temp2 = 0.0; - BLASLONG n1 = n & -4; + BLASLONG n1 = n & -4; while(i < n1) { @@ -139,4 +147,60 @@ FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) } +#if defined(SMP) +static int dot_thread_function(BLASLONG n, BLASLONG dummy0, + BLASLONG dummy1, FLOAT dummy2, FLOAT *x, BLASLONG inc_x, FLOAT *y, + BLASLONG inc_y, RETURN_TYPE *result, BLASLONG dummy3) +{ + *(RETURN_TYPE *)result = dot_compute(n, x, inc_x, y, inc_y); + return 0; +} + +extern int blas_level1_thread_with_return_value(int mode, BLASLONG m, BLASLONG n, + BLASLONG k, void *alpha, void *a, BLASLONG lda, void *b, BLASLONG ldb, + void *c, BLASLONG ldc, int (*function)(), int nthreads); +#endif + +FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) +{ +#if defined(SMP) + int nthreads; + FLOAT dummy_alpha; +#endif + FLOAT dot = 0.0; + +#if defined(SMP) + if (inc_x == 0 || inc_y == 0 || n <= 10000) + nthreads = 1; + else + nthreads = num_cpu_avail(1); + + if (nthreads == 1) { + dot = dot_compute(n, x, inc_x, y, inc_y); + } else { + int mode, i; + char result[MAX_CPU_NUMBER * sizeof(double) * 2]; + RETURN_TYPE *ptr; + +#if !defined(DOUBLE) + mode = BLAS_SINGLE | BLAS_REAL; +#else + mode = BLAS_DOUBLE | BLAS_REAL; +#endif + blas_level1_thread_with_return_value(mode, n, 0, 0, &dummy_alpha, + x, inc_x, y, inc_y, result, 0, + ( void *)dot_thread_function, nthreads); + + ptr = (RETURN_TYPE *)result; + for (i = 0; i < nthreads; i++) { + dot = dot + (*ptr); + ptr = (RETURN_TYPE *)(((char *)ptr) + sizeof(double) * 2); + } + } +#else + dot = dot_compute(n, x, inc_x, y, inc_y); +#endif + + return dot; +} diff --git a/kernel/x86_64/ddot_microk_haswell-2.c b/kernel/x86_64/ddot_microk_haswell-2.c index d36577af3..365737363 100644 --- a/kernel/x86_64/ddot_microk_haswell-2.c +++ b/kernel/x86_64/ddot_microk_haswell-2.c @@ -41,7 +41,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,8), %%ymm12 \n\t" // 2 * x "vmovups 32(%2,%0,8), %%ymm13 \n\t" // 2 * x diff --git a/kernel/x86_64/ddot_microk_nehalem-2.c b/kernel/x86_64/ddot_microk_nehalem-2.c index 1d10fc2d7..fb5ec9bca 100644 --- a/kernel/x86_64/ddot_microk_nehalem-2.c +++ b/kernel/x86_64/ddot_microk_nehalem-2.c @@ -41,7 +41,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "xorpd %%xmm6, %%xmm6 \n\t" "xorpd %%xmm7, %%xmm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movups (%2,%0,8), %%xmm12 \n\t" // 2 * x diff --git a/kernel/x86_64/ddot_microk_sandy-2.c b/kernel/x86_64/ddot_microk_sandy-2.c index e2e6701c7..160f95604 100644 --- a/kernel/x86_64/ddot_microk_sandy-2.c +++ b/kernel/x86_64/ddot_microk_sandy-2.c @@ -41,7 +41,7 @@ static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,8), %%ymm12 \n\t" // 2 * x "vmovups 32(%2,%0,8), %%ymm13 \n\t" // 2 * x diff --git a/kernel/x86_64/ddot_microk_skylakex-2.c b/kernel/x86_64/ddot_microk_skylakex-2.c new file mode 100644 index 000000000..8eabf225a --- /dev/null +++ b/kernel/x86_64/ddot_microk_skylakex-2.c @@ -0,0 +1,96 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) + +#define HAVE_KERNEL_8 1 + +#include + +static void ddot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) +{ + int i = 0; + __m256d accum_0, accum_1, accum_2, accum_3; + + accum_0 = _mm256_setzero_pd(); + accum_1 = _mm256_setzero_pd(); + accum_2 = _mm256_setzero_pd(); + accum_3 = _mm256_setzero_pd(); + +#ifdef __AVX512CD__ + __m512d accum_05, accum_15, accum_25, accum_35; + int n32; + n32 = n & (~31); + + accum_05 = _mm512_setzero_pd(); + accum_15 = _mm512_setzero_pd(); + accum_25 = _mm512_setzero_pd(); + accum_35 = _mm512_setzero_pd(); + + for (; i < n32; i += 32) { + accum_05 += _mm512_loadu_pd(&x[i+ 0]) * _mm512_loadu_pd(&y[i+ 0]); + accum_15 += _mm512_loadu_pd(&x[i+ 8]) * _mm512_loadu_pd(&y[i+ 8]); + accum_25 += _mm512_loadu_pd(&x[i+16]) * _mm512_loadu_pd(&y[i+16]); + accum_35 += _mm512_loadu_pd(&x[i+24]) * _mm512_loadu_pd(&y[i+24]); + } + + /* + * we need to fold our 512 bit wide accumulator vectors into 256 bit wide vectors so that the AVX2 code + * below can continue using the intermediate results in its loop + */ + accum_0 = _mm512_extractf64x4_pd(accum_05, 0) + _mm512_extractf64x4_pd(accum_05, 1); + accum_1 = _mm512_extractf64x4_pd(accum_15, 0) + _mm512_extractf64x4_pd(accum_15, 1); + accum_2 = _mm512_extractf64x4_pd(accum_25, 0) + _mm512_extractf64x4_pd(accum_25, 1); + accum_3 = _mm512_extractf64x4_pd(accum_35, 0) + _mm512_extractf64x4_pd(accum_35, 1); + +#endif + for (; i < n; i += 16) { + accum_0 += _mm256_loadu_pd(&x[i+ 0]) * _mm256_loadu_pd(&y[i+ 0]); + accum_1 += _mm256_loadu_pd(&x[i+ 4]) * _mm256_loadu_pd(&y[i+ 4]); + accum_2 += _mm256_loadu_pd(&x[i+ 8]) * _mm256_loadu_pd(&y[i+ 8]); + accum_3 += _mm256_loadu_pd(&x[i+12]) * _mm256_loadu_pd(&y[i+12]); + } + + /* we now have the partial sums of the dot product in the 4 accumulation vectors, time to consolidate */ + + accum_0 = accum_0 + accum_1 + accum_2 + accum_3; + + __m128d half_accum0; + + /* Add upper half to lower half of each of the 256 bit vector to get a 128 bit vector */ + half_accum0 = _mm_add_pd(_mm256_extractf128_pd(accum_0, 0), _mm256_extractf128_pd(accum_0, 1)); + + /* in 128 bit land there is a hadd operation to do the rest of the element-wise sum in one go */ + half_accum0 = _mm_hadd_pd(half_accum0, half_accum0); + + *dot = half_accum0[0]; +} + +#else +#include "ddot_microk_haswell-2.c" +#endif diff --git a/kernel/x86_64/dgemm_beta_skylakex.c b/kernel/x86_64/dgemm_beta_skylakex.c new file mode 100644 index 000000000..5cd001920 --- /dev/null +++ b/kernel/x86_64/dgemm_beta_skylakex.c @@ -0,0 +1,160 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +#include + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta, + FLOAT *dummy2, BLASLONG dummy3, FLOAT *dummy4, BLASLONG dummy5, + FLOAT *c, BLASLONG ldc){ + + BLASLONG i, j; + FLOAT *c_offset1, *c_offset; + FLOAT ctemp1, ctemp2, ctemp3, ctemp4; + FLOAT ctemp5, ctemp6, ctemp7, ctemp8; + + /* fast path.. just zero the whole matrix */ + if (m == ldc && beta == ZERO) { + memset(c, 0, m * n * sizeof(FLOAT)); + return 0; + } + + if (m == 0 || n == 0) + return 0; + + c_offset = c; + + if (beta == ZERO){ + + j = n; + do { + c_offset1 = c_offset; + c_offset += ldc; + + i = m; +#ifdef __AVX2__ +#ifdef __AVX512CD__ + while (i >= 32) { + __m512d z_zero = _mm512_setzero_pd(); + _mm512_storeu_pd(c_offset1, z_zero); + _mm512_storeu_pd(c_offset1 + 8, z_zero); + _mm512_storeu_pd(c_offset1 + 16, z_zero); + _mm512_storeu_pd(c_offset1 + 24 , z_zero); + c_offset1 += 32; + i -= 32; + } +#endif + while (i >= 8) { +#ifdef __AVX512CD__ + __m512d z_zero = _mm512_setzero_pd(); + _mm512_storeu_pd(c_offset1, z_zero); +#else + __m256d y_zero = _mm256_setzero_pd(); + _mm256_storeu_pd(c_offset1, y_zero); + _mm256_storeu_pd(c_offset1 + 4, y_zero); +#endif + c_offset1 += 8; + i -= 8; + } +#endif + while (i > 0) { + *c_offset1 = ZERO; + c_offset1 ++; + i --; + } + j --; + } while (j > 0); + + } else { + + j = n; + do { + c_offset1 = c_offset; + c_offset += ldc; + + i = (m >> 3); + if (i > 0){ + do { + ctemp1 = *(c_offset1 + 0); + ctemp2 = *(c_offset1 + 1); + ctemp3 = *(c_offset1 + 2); + ctemp4 = *(c_offset1 + 3); + ctemp5 = *(c_offset1 + 4); + ctemp6 = *(c_offset1 + 5); + ctemp7 = *(c_offset1 + 6); + ctemp8 = *(c_offset1 + 7); + + ctemp1 *= beta; + ctemp2 *= beta; + ctemp3 *= beta; + ctemp4 *= beta; + ctemp5 *= beta; + ctemp6 *= beta; + ctemp7 *= beta; + ctemp8 *= beta; + + *(c_offset1 + 0) = ctemp1; + *(c_offset1 + 1) = ctemp2; + *(c_offset1 + 2) = ctemp3; + *(c_offset1 + 3) = ctemp4; + *(c_offset1 + 4) = ctemp5; + *(c_offset1 + 5) = ctemp6; + *(c_offset1 + 6) = ctemp7; + *(c_offset1 + 7) = ctemp8; + c_offset1 += 8; + i --; + } while (i > 0); + } + + i = (m & 7); + if (i > 0){ + do { + ctemp1 = *c_offset1; + ctemp1 *= beta; + *c_offset1 = ctemp1; + c_offset1 ++; + i --; + } while (i > 0); + } + j --; + } while (j > 0); + + } + return 0; +}; diff --git a/kernel/x86_64/dgemm_kernel_16x2_skylakex.S b/kernel/x86_64/dgemm_kernel_16x2_skylakex.S new file mode 100644 index 000000000..91ac51280 --- /dev/null +++ b/kernel/x86_64/dgemm_kernel_16x2_skylakex.S @@ -0,0 +1,5138 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +/********************************************************************* +* 2013/10/20 Saar +* BLASTEST : OK +* CTEST : OK +* TEST : OK + +* +* +* 2013/10/20 Saar +* Parameter: +* DGEMM_DEFAULT_UNROLL_N 2 +* DGEMM_DEFAULT_UNROLL_M 16 +* DGEMM_DEFAULT_P 192 +* DGEMM_DEFAULT_Q 128 +* A_PR1 512 +* +* +* Performance without prefetch of B: +* 1 thread: 45.8 GFLOPS (MKL: 45) +* 2 threads: 80.0 GFLOPS (MKL: 91) +* 4 threads: 135.0 GFLOPS (MKL: 135) +*********************************************************************/ + + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define J %r14 +#define OLD_K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define K %r12 +#define BI %rbp +#define SP %rbx + +#define BO1 %rdi +#define BO2 %r15 + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#else + +#define STACKSIZE 256 + +#define OLD_A 40 + STACKSIZE(%rsp) +#define OLD_B 48 + STACKSIZE(%rsp) +#define OLD_C 56 + STACKSIZE(%rsp) +#define OLD_LDC 64 + STACKSIZE(%rsp) +#define OLD_OFFSET 72 + STACKSIZE(%rsp) + +#endif + +#define L_BUFFER_SIZE 512*8*4 +#define LB2_OFFSET 512*8*2 + +#define Ndiv6 24(%rsp) +#define Nmod6 32(%rsp) +#define N 40(%rsp) +#define ALPHA 48(%rsp) +#define OFFSET 56(%rsp) +#define KK 64(%rsp) +#define KKK 72(%rsp) +#define BUFFER1 128(%rsp) +#define BUFFER2 LB2_OFFSET+128(%rsp) + +#if defined(OS_WINDOWS) +#if L_BUFFER_SIZE > 16384 +#define STACK_TOUCH \ + movl $0, 4096 * 4(%rsp);\ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 12288 +#define STACK_TOUCH \ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 8192 +#define STACK_TOUCH \ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 4096 +#define STACK_TOUCH \ + movl $0, 4096 * 1(%rsp); +#else +#define STACK_TOUCH +#endif +#else +#define STACK_TOUCH +#endif + +#if defined(BULLDOZER) + +.macro VFMADD231PD_ y0,y1,y2 + vfmaddpd \y0,\y1,\y2,\y0 +.endm + +.macro VFMADD231SD_ x0,x1,x2 + vfmaddsd \x0,\x1,\x2,\x0 +.endm + +#else + +.macro VFMADD231PD_ y0,y1,y2 + vfmadd231pd \y2,\y1,\y0 +.endm + +.macro VFMADD231SD_ x0,x1,x2 + vfmadd231sd \x2,\x1,\x0 +.endm + +#endif + + +#define A_PR1 1024 +#define B_PR1 256 + +/******************************************************************************************* +* 3 lines of N +*******************************************************************************************/ + +.macro KERNEL16x3_SUBN + vbroadcastsd -12 * SIZE(BO), %zmm1 + vbroadcastsd -11 * SIZE(BO), %zmm2 + vbroadcastsd -10 * SIZE(BO), %zmm3 + + vmovaps -16 * SIZE(AO), %zmm0 + VFMADD231PD_ %zmm4,%zmm1,%zmm0 + VFMADD231PD_ %zmm5,%zmm2,%zmm0 + VFMADD231PD_ %zmm6,%zmm3,%zmm0 + + vmovaps -8 * SIZE(AO), %zmm9 + VFMADD231PD_ %zmm10,%zmm1,%zmm9 + VFMADD231PD_ %zmm11,%zmm2,%zmm9 + VFMADD231PD_ %zmm12,%zmm3,%zmm9 + addq $ 3*SIZE , BO + addq $ 16*SIZE, AO +.endm + + +.macro KERNEL8x3_SUBN + vbroadcastsd -12 * SIZE(BO), %ymm1 + vmovaps -16 * SIZE(AO), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -11 * SIZE(BO), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -10 * SIZE(BO), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + vmovaps -12 * SIZE(AO), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + VFMADD231PD_ %ymm9,%ymm3,%ymm0 + prefetcht0 B_PR1(BO) + addq $ 3*SIZE , BO + addq $ 8*SIZE, AO +.endm + +.macro KERNEL4x3_SUBN + vbroadcastsd -12 * SIZE(BO), %ymm1 + vmovaps -16 * SIZE(AO), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -11 * SIZE(BO), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -10 * SIZE(BO), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + addq $ 3*SIZE , BO + addq $ 4*SIZE, AO +.endm + +.macro KERNEL2x3_SUBN + vmovsd -12 * SIZE(BO), %xmm1 + vmovsd -16 * SIZE(AO), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -11 * SIZE(BO), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -10 * SIZE(BO), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + vmovsd -15 * SIZE(AO), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + VFMADD231SD_ %xmm12,%xmm3,%xmm0 + addq $ 3*SIZE , BO + addq $ 2*SIZE, AO +.endm + +.macro KERNEL1x3_SUBN + vmovsd -12 * SIZE(BO), %xmm1 + vmovsd -16 * SIZE(AO), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -11 * SIZE(BO), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -10 * SIZE(BO), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + addq $ 3*SIZE , BO + addq $ 1*SIZE, AO +.endm + + + + + + +/******************************************************************************************/ + +.macro KERNEL16x3_1 + vbroadcastsd -6 * SIZE(BO, BI, SIZE), %zmm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm4,%zmm1,%zmm0 + vbroadcastsd -5 * SIZE(BO, BI, SIZE), %zmm2 + VFMADD231PD_ %zmm5,%zmm2,%zmm0 + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PD_ %zmm6,%zmm3,%zmm0 + vmovups -24 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm10,%zmm1,%zmm0 + VFMADD231PD_ %zmm11,%zmm2,%zmm0 + VFMADD231PD_ %zmm12,%zmm3,%zmm0 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %zmm1 + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %zmm2 +.endm + + + + +.macro KERNEL16x3_2 + vmovups -16 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm4,%zmm1,%zmm0 + VFMADD231PD_ %zmm5,%zmm2,%zmm0 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PD_ %zmm6,%zmm3,%zmm0 + vmovups -8 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm10,%zmm1,%zmm0 + VFMADD231PD_ %zmm11,%zmm2,%zmm0 + VFMADD231PD_ %zmm12,%zmm3,%zmm0 + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %zmm1 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %zmm2 +.endm + +.macro KERNEL16x3_3 + vmovups 0 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm4,%zmm1,%zmm0 + VFMADD231PD_ %zmm5,%zmm2,%zmm0 + vbroadcastsd 2 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PD_ %zmm6,%zmm3,%zmm0 + vmovups 8 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm10,%zmm1,%zmm0 + VFMADD231PD_ %zmm11,%zmm2,%zmm0 + VFMADD231PD_ %zmm12,%zmm3,%zmm0 + vbroadcastsd 3 * SIZE(BO, BI, SIZE), %zmm1 + vbroadcastsd 4 * SIZE(BO, BI, SIZE), %zmm2 +.endm + +.macro KERNEL16x3_4 + vmovups 16 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm4,%zmm1,%zmm0 + VFMADD231PD_ %zmm5,%zmm2,%zmm0 + vbroadcastsd 5 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PD_ %zmm6,%zmm3,%zmm0 + vmovups 24 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm10,%zmm1,%zmm0 + VFMADD231PD_ %zmm11,%zmm2,%zmm0 + addq $12, BI + VFMADD231PD_ %zmm12,%zmm3,%zmm0 + addq $64, %rax +.endm + +.macro KERNEL16x3_SUB + vbroadcastsd -6 * SIZE(BO, BI, SIZE), %zmm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm4,%zmm1,%zmm0 + vbroadcastsd -5 * SIZE(BO, BI, SIZE), %zmm2 + VFMADD231PD_ %zmm5,%zmm2,%zmm0 + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PD_ %zmm6,%zmm3,%zmm0 + vmovups -24 * SIZE(AO, %rax, SIZE), %zmm0 + VFMADD231PD_ %zmm10,%zmm1,%zmm0 + VFMADD231PD_ %zmm11,%zmm2,%zmm0 + VFMADD231PD_ %zmm12,%zmm3,%zmm0 + addq $3 , BI + addq $16, %rax +.endm + +.macro SAVE16x3 + + vbroadcastsd ALPHA, %zmm0 + + vmulpd %zmm0 , %zmm4 , %zmm4 + vmulpd %zmm0 , %zmm10, %zmm10 + + vmulpd %zmm0 , %zmm5 , %zmm5 + vmulpd %zmm0 , %zmm11, %zmm11 + + vmulpd %zmm0 , %zmm6 , %zmm6 + vmulpd %zmm0 , %zmm12, %zmm12 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %zmm4,%zmm4 + vaddpd 8 * SIZE(CO1), %zmm10,%zmm10 + + vaddpd (CO1, LDC), %zmm5,%zmm5 + vaddpd 8 * SIZE(CO1, LDC), %zmm11,%zmm11 + + vaddpd (CO1, LDC, 2), %zmm6,%zmm6 + vaddpd 8 * SIZE(CO1, LDC, 2), %zmm12,%zmm12 + +#endif + + vmovups %zmm4 , (CO1) + vmovups %zmm10, 8 * SIZE(CO1) + + vmovups %zmm5 , (CO1, LDC) + vmovups %zmm11, 8 * SIZE(CO1, LDC) + + vmovups %zmm6 , (CO1, LDC, 2) + vmovups %zmm12, 8 * SIZE(CO1, LDC, 2) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL8x3_1 + prefetcht0 A_PR1(AO, %rax, SIZE) + vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + VFMADD231PD_ %ymm9,%ymm3,%ymm0 +.endm + +.macro KERNEL8x3_2 + prefetcht0 64+A_PR1(AO, %rax, SIZE) + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + VFMADD231PD_ %ymm9,%ymm3,%ymm0 +.endm + +.macro KERNEL8x3_3 + prefetcht0 128+A_PR1(AO, %rax, SIZE) + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + VFMADD231PD_ %ymm9,%ymm3,%ymm0 +.endm + +.macro KERNEL8x3_4 + prefetcht0 192+A_PR1(AO, %rax, SIZE) + vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + VFMADD231PD_ %ymm9,%ymm3,%ymm0 + addq $12, BI + addq $32, %rax +.endm + +.macro KERNEL8x3_SUB + vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + VFMADD231PD_ %ymm9,%ymm3,%ymm0 + addq $3 , BI + addq $8 , %rax +.endm + +.macro SAVE8x3 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + vmulpd %ymm0 , %ymm7 , %ymm7 + + vmulpd %ymm0 , %ymm5 , %ymm5 + vmulpd %ymm0 , %ymm8 , %ymm8 + + vmulpd %ymm0 , %ymm6 , %ymm6 + vmulpd %ymm0 , %ymm9 , %ymm9 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + vaddpd 4 * SIZE(CO1), %ymm7,%ymm7 + + vaddpd (CO1, LDC), %ymm5,%ymm5 + vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8 + + vaddpd (CO1, LDC, 2), %ymm6,%ymm6 + vaddpd 4 * SIZE(CO1, LDC, 2), %ymm9,%ymm9 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm7 , 4 * SIZE(CO1) + + vmovups %ymm5 , (CO1, LDC) + vmovups %ymm8 , 4 * SIZE(CO1, LDC) + + vmovups %ymm6 , (CO1, LDC, 2) + vmovups %ymm9 , 4 * SIZE(CO1, LDC, 2) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL4x3_1 + prefetcht0 A_PR1(AO, %rax, SIZE) + vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 +.endm + +.macro KERNEL4x3_2 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 +.endm + +.macro KERNEL4x3_3 + prefetcht0 A_PR1(AO, %rax, SIZE) + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 +.endm + +.macro KERNEL4x3_4 + vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 4 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd 5 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + addq $12, BI + addq $16, %rax +.endm + +.macro KERNEL4x3_SUB + vbroadcastsd -6 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -5 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PD_ %ymm6,%ymm3,%ymm0 + addq $3 , BI + addq $4 , %rax +.endm + +.macro SAVE4x3 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + vmulpd %ymm0 , %ymm5 , %ymm5 + vmulpd %ymm0 , %ymm6 , %ymm6 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + vaddpd (CO1, LDC), %ymm5,%ymm5 + vaddpd (CO1, LDC, 2), %ymm6,%ymm6 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm5 , (CO1, LDC) + vmovups %ymm6 , (CO1, LDC, 2) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL2x3_1 + prefetcht0 A_PR1(AO, %rax, SIZE) + vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + VFMADD231SD_ %xmm12,%xmm3,%xmm0 +.endm + +.macro KERNEL2x3_2 + vmovsd -3 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -1 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + VFMADD231SD_ %xmm12,%xmm3,%xmm0 +.endm + +.macro KERNEL2x3_3 + vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd 2 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + VFMADD231SD_ %xmm12,%xmm3,%xmm0 +.endm + +.macro KERNEL2x3_4 + vmovsd 3 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 4 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd 5 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + VFMADD231SD_ %xmm12,%xmm3,%xmm0 + addq $12, BI + addq $8, %rax +.endm + +.macro KERNEL2x3_SUB + vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + VFMADD231SD_ %xmm12,%xmm3,%xmm0 + addq $3 , BI + addq $2 , %rax +.endm + +.macro SAVE2x3 + + vmovsd ALPHA, %xmm0 + + vmulsd %xmm0 , %xmm4 , %xmm4 + vmulsd %xmm0 , %xmm8 , %xmm8 + vmulsd %xmm0 , %xmm5 , %xmm5 + vmulsd %xmm0 , %xmm10, %xmm10 + vmulsd %xmm0 , %xmm6 , %xmm6 + vmulsd %xmm0 , %xmm12, %xmm12 + +#if !defined(TRMMKERNEL) + + vaddsd (CO1), %xmm4,%xmm4 + vaddsd 1 * SIZE(CO1), %xmm8,%xmm8 + vaddsd (CO1, LDC), %xmm5,%xmm5 + vaddsd 1 * SIZE(CO1, LDC), %xmm10,%xmm10 + vaddsd (CO1, LDC, 2), %xmm6,%xmm6 + vaddsd 1 * SIZE(CO1, LDC, 2), %xmm12,%xmm12 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm8 , 1 * SIZE(CO1) + vmovsd %xmm5 , (CO1, LDC) + vmovsd %xmm10, 1 * SIZE(CO1, LDC) + vmovsd %xmm6 , (CO1, LDC, 2) + vmovsd %xmm12, 1 * SIZE(CO1, LDC, 2) + +.endm + +/*******************************************************************************************/ + +.macro KERNEL1x3_1 + vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 +.endm + +.macro KERNEL1x3_2 + vmovsd -3 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -1 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 +.endm + +.macro KERNEL1x3_3 + vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd 2 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 +.endm + +.macro KERNEL1x3_4 + vmovsd 3 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 4 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd 5 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + addq $12, BI + addq $4, %rax +.endm + +.macro KERNEL1x3_SUB + vmovsd -6 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -5 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SD_ %xmm6,%xmm3,%xmm0 + addq $3 , BI + addq $1 , %rax +.endm + +.macro SAVE1x3 + + vmovsd ALPHA, %xmm0 + + vmulsd %xmm0 , %xmm4 , %xmm4 + vmulsd %xmm0 , %xmm5 , %xmm5 + vmulsd %xmm0 , %xmm6 , %xmm6 + +#if !defined(TRMMKERNEL) + + vaddsd (CO1), %xmm4,%xmm4 + vaddsd (CO1, LDC), %xmm5,%xmm5 + vaddsd (CO1, LDC, 2), %xmm6,%xmm6 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm5 , (CO1, LDC) + vmovsd %xmm6 , (CO1, LDC, 2) + +.endm + + +/*******************************************************************************************/ + +/******************************************************************************************* +* 2 lines of N +*******************************************************************************************/ + +.macro KERNEL16x2_1 + prefetcht0 A_PR1(AO, %rax, SIZE) + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + prefetcht0 64+A_PR1(AO, %rax, SIZE) + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + VFMADD231PD_ %ymm11,%ymm2,%ymm0 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 + VFMADD231PD_ %ymm14,%ymm2,%ymm0 +.endm + +.macro KERNEL16x2_2 + prefetcht0 128+A_PR1(AO, %rax, SIZE) + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + prefetcht0 192+A_PR1(AO, %rax, SIZE) + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + VFMADD231PD_ %ymm11,%ymm2,%ymm0 + vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 + VFMADD231PD_ %ymm14,%ymm2,%ymm0 +.endm + +.macro KERNEL16x2_3 + prefetcht0 256+A_PR1(AO, %rax, SIZE) + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + prefetcht0 320+A_PR1(AO, %rax, SIZE) + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + VFMADD231PD_ %ymm11,%ymm2,%ymm0 + vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 + VFMADD231PD_ %ymm14,%ymm2,%ymm0 +.endm + +.macro KERNEL16x2_4 + prefetcht0 384+A_PR1(AO, %rax, SIZE) + vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + prefetcht0 448+A_PR1(AO, %rax, SIZE) + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + VFMADD231PD_ %ymm11,%ymm2,%ymm0 + vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 + VFMADD231PD_ %ymm14,%ymm2,%ymm0 + addq $8, BI + addq $64, %rax +.endm + +.macro KERNEL16x2_SUB + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + VFMADD231PD_ %ymm11,%ymm2,%ymm0 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 + VFMADD231PD_ %ymm14,%ymm2,%ymm0 + addq $2, BI + addq $16, %rax +.endm + +.macro SAVE16x2 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + vmulpd %ymm0 , %ymm7 , %ymm7 + vmulpd %ymm0 , %ymm10, %ymm10 + vmulpd %ymm0 , %ymm13, %ymm13 + + vmulpd %ymm0 , %ymm5 , %ymm5 + vmulpd %ymm0 , %ymm8 , %ymm8 + vmulpd %ymm0 , %ymm11, %ymm11 + vmulpd %ymm0 , %ymm14, %ymm14 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + vaddpd 4 * SIZE(CO1), %ymm7,%ymm7 + vaddpd 8 * SIZE(CO1), %ymm10,%ymm10 + vaddpd 12 * SIZE(CO1), %ymm13,%ymm13 + + vaddpd (CO1, LDC), %ymm5,%ymm5 + vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8 + vaddpd 8 * SIZE(CO1, LDC), %ymm11,%ymm11 + vaddpd 12 * SIZE(CO1, LDC), %ymm14,%ymm14 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm7 , 4 * SIZE(CO1) + vmovups %ymm10, 8 * SIZE(CO1) + vmovups %ymm13,12 * SIZE(CO1) + + vmovups %ymm5 , (CO1, LDC) + vmovups %ymm8 , 4 * SIZE(CO1, LDC) + vmovups %ymm11, 8 * SIZE(CO1, LDC) + vmovups %ymm14,12 * SIZE(CO1, LDC) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL8x2_1 + prefetcht0 A_PR1(AO, %rax, SIZE) + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 +.endm + +.macro KERNEL8x2_2 + prefetcht0 64+A_PR1(AO, %rax, SIZE) + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 +.endm + +.macro KERNEL8x2_3 + prefetcht0 128+A_PR1(AO, %rax, SIZE) + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 +.endm + +.macro KERNEL8x2_4 + prefetcht0 192+A_PR1(AO, %rax, SIZE) + vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + addq $8, BI + addq $32, %rax +.endm + +.macro KERNEL8x2_SUB + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + VFMADD231PD_ %ymm8,%ymm2,%ymm0 + addq $2, BI + addq $8 , %rax +.endm + +.macro SAVE8x2 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + vmulpd %ymm0 , %ymm7 , %ymm7 + + vmulpd %ymm0 , %ymm5 , %ymm5 + vmulpd %ymm0 , %ymm8 , %ymm8 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + vaddpd 4 * SIZE(CO1), %ymm7,%ymm7 + + vaddpd (CO1, LDC), %ymm5,%ymm5 + vaddpd 4 * SIZE(CO1, LDC), %ymm8,%ymm8 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm7 , 4 * SIZE(CO1) + + vmovups %ymm5 , (CO1, LDC) + vmovups %ymm8 , 4 * SIZE(CO1, LDC) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL4x2_1 + prefetcht0 A_PR1(AO, %rax, SIZE) + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 +.endm + +.macro KERNEL4x2_2 + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 +.endm + +.macro KERNEL4x2_3 + prefetcht0 64+A_PR1(AO, %rax, SIZE) + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 +.endm + +.macro KERNEL4x2_4 + vbroadcastsd 2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd 3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + addq $8, BI + addq $16, %rax +.endm + +.macro KERNEL4x2_SUB + vbroadcastsd -4 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vbroadcastsd -3 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PD_ %ymm5,%ymm2,%ymm0 + addq $2, BI + addq $4 , %rax +.endm + +.macro SAVE4x2 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + vmulpd %ymm0 , %ymm5 , %ymm5 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + vaddpd (CO1, LDC), %ymm5,%ymm5 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm5 , (CO1, LDC) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL2x2_1 + prefetcht0 A_PR1(AO, %rax, SIZE) + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 +.endm + +.macro KERNEL2x2_2 + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -1 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 +.endm + +.macro KERNEL2x2_3 + vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 +.endm + +.macro KERNEL2x2_4 + vmovsd 2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 3 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + addq $8, BI + addq $8, %rax +.endm + +.macro KERNEL2x2_SUB + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + VFMADD231SD_ %xmm10,%xmm2,%xmm0 + addq $2, BI + addq $2, %rax +.endm + +.macro SAVE2x2 + + vmovsd ALPHA, %xmm0 + + vmulsd %xmm0 , %xmm4 , %xmm4 + vmulsd %xmm0 , %xmm8 , %xmm8 + vmulsd %xmm0 , %xmm5 , %xmm5 + vmulsd %xmm0 , %xmm10, %xmm10 + +#if !defined(TRMMKERNEL) + + vaddsd (CO1), %xmm4,%xmm4 + vaddsd 1 * SIZE(CO1), %xmm8,%xmm8 + vaddsd (CO1, LDC), %xmm5,%xmm5 + vaddsd 1 * SIZE(CO1, LDC), %xmm10,%xmm10 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm8 , 1 * SIZE(CO1) + vmovsd %xmm5 , (CO1, LDC) + vmovsd %xmm10, 1 * SIZE(CO1, LDC) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL1x2_1 + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 +.endm + +.macro KERNEL1x2_2 + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -1 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 +.endm + +.macro KERNEL1x2_3 + vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 1 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 +.endm + +.macro KERNEL1x2_4 + vmovsd 2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd 3 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + addq $8, BI + addq $4, %rax +.endm + +.macro KERNEL1x2_SUB + vmovsd -4 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -3 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SD_ %xmm5,%xmm2,%xmm0 + addq $2, BI + addq $1, %rax +.endm + +.macro SAVE1x2 + + vmovsd ALPHA, %xmm0 + + vmulsd %xmm0 , %xmm4 , %xmm4 + vmulsd %xmm0 , %xmm5 , %xmm5 + +#if !defined(TRMMKERNEL) + + vaddsd (CO1), %xmm4,%xmm4 + vaddsd (CO1, LDC), %xmm5,%xmm5 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm5 , (CO1, LDC) + +.endm + + +/*******************************************************************************************/ + +/******************************************************************************************* +* 1 line of N +*******************************************************************************************/ + +.macro KERNEL16x1_1 + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 +.endm + +.macro KERNEL16x1_2 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 +.endm + +.macro KERNEL16x1_3 + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups 0 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups 4 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + vmovups 8 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + vmovups 12 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 +.endm + +.macro KERNEL16x1_4 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1 + vmovups 16 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups 20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + vmovups 24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + vmovups 28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 + addq $4, BI + addq $64, %rax +.endm + +.macro KERNEL16x1_SUB + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm10,%ymm1,%ymm0 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm13,%ymm1,%ymm0 + addq $1, BI + addq $16, %rax +.endm + +.macro SAVE16x1 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + vmulpd %ymm0 , %ymm7 , %ymm7 + vmulpd %ymm0 , %ymm10, %ymm10 + vmulpd %ymm0 , %ymm13, %ymm13 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + vaddpd 4 * SIZE(CO1), %ymm7,%ymm7 + vaddpd 8 * SIZE(CO1), %ymm10,%ymm10 + vaddpd 12 * SIZE(CO1), %ymm13,%ymm13 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm7 , 4 * SIZE(CO1) + vmovups %ymm10, 8 * SIZE(CO1) + vmovups %ymm13,12 * SIZE(CO1) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL8x1_1 + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 +.endm + +.macro KERNEL8x1_2 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 +.endm + +.macro KERNEL8x1_3 + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -12 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 +.endm + +.macro KERNEL8x1_4 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -8 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -4 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + addq $4, BI + addq $32, %rax +.endm + +.macro KERNEL8x1_SUB + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm7,%ymm1,%ymm0 + addq $1, BI + addq $8 , %rax +.endm + +.macro SAVE8x1 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + vmulpd %ymm0 , %ymm7 , %ymm7 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + vaddpd 4 * SIZE(CO1), %ymm7,%ymm7 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm7 , 4 * SIZE(CO1) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL4x1_1 + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 +.endm + +.macro KERNEL4x1_2 + vbroadcastsd -1 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -28 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 +.endm + +.macro KERNEL4x1_3 + vbroadcastsd 0 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -24 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 +.endm + +.macro KERNEL4x1_4 + vbroadcastsd 1 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -20 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + addq $4, BI + addq $16, %rax +.endm + +.macro KERNEL4x1_SUB + vbroadcastsd -2 * SIZE(BO, BI, SIZE), %ymm1 + vmovups -32 * SIZE(AO, %rax, SIZE), %ymm0 + VFMADD231PD_ %ymm4,%ymm1,%ymm0 + addq $1, BI + addq $4 , %rax +.endm + +.macro SAVE4x1 + + vbroadcastsd ALPHA, %ymm0 + + vmulpd %ymm0 , %ymm4 , %ymm4 + +#if !defined(TRMMKERNEL) + + vaddpd (CO1), %ymm4,%ymm4 + +#endif + + vmovups %ymm4 , (CO1) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL2x1_1 + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 +.endm + +.macro KERNEL2x1_2 + vmovsd -1 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 +.endm + +.macro KERNEL2x1_3 + vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -28 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -27 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 +.endm + +.macro KERNEL2x1_4 + vmovsd 1 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -26 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -25 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + addq $4, BI + addq $8, %rax +.endm + +.macro KERNEL2x1_SUB + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm8,%xmm1,%xmm0 + addq $1, BI + addq $2 , %rax +.endm + +.macro SAVE2x1 + + vmovsd ALPHA, %xmm0 + + vmulsd %xmm0 , %xmm4 , %xmm4 + vmulsd %xmm0 , %xmm8 , %xmm8 + +#if !defined(TRMMKERNEL) + + vaddsd (CO1), %xmm4,%xmm4 + vaddsd 1 * SIZE(CO1), %xmm8,%xmm8 + +#endif + + vmovsd %xmm4 , (CO1) + vmovsd %xmm8 , 1 * SIZE(CO1) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL1x1_1 + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 +.endm + +.macro KERNEL1x1_2 + vmovsd -1 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -31 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 +.endm + +.macro KERNEL1x1_3 + vmovsd 0 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -30 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 +.endm + +.macro KERNEL1x1_4 + vmovsd 1 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -29 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + addq $ 4, BI + addq $ 4, %rax +.endm + +.macro KERNEL1x1_SUB + vmovsd -2 * SIZE(BO, BI, SIZE), %xmm1 + vmovsd -32 * SIZE(AO, %rax, SIZE), %xmm0 + VFMADD231SD_ %xmm4,%xmm1,%xmm0 + addq $ 1, BI + addq $ 1 , %rax +.endm + +.macro SAVE1x1 + + vmovsd ALPHA, %xmm0 + + vmulsd %xmm0 , %xmm4 , %xmm4 + +#if !defined(TRMMKERNEL) + + vaddsd (CO1), %xmm4,%xmm4 + +#endif + + vmovsd %xmm4 , (CO1) + +.endm + + +/*******************************************************************************************/ + +#if !defined(TRMMKERNEL) + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC + + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovsd %xmm0, ALPHA + + salq $BASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $6, %rdi + divq %rdi // N / 6 + movq %rax, Ndiv6 // N / 6 + movq %rdx, Nmod6 // N % 6 + + + movq Ndiv6, J + cmpq $0, J + je .L2_0 + ALIGN_4 + +.L6_01: + // copy to sub buffer + movq K, %rax + salq $1,%rax // K * 2 ; read 2 values + movq B, BO1 + leaq (B,%rax, SIZE), BO2 // next offset to BO2 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $3 , %rax // K / 8 + jz .L6_01a_2 + ALIGN_4 + +.L6_01a_1: + + prefetcht0 512(BO1) + prefetcht0 512(BO2) + prefetchw 512(BO) + + + vmovups 0 * SIZE(BO1), %xmm0 + vmovups 2 * SIZE(BO1), %xmm2 + vmovups 4 * SIZE(BO1), %xmm4 + vmovups 6 * SIZE(BO1), %xmm6 + vmovsd 0 * SIZE(BO2), %xmm1 + vmovsd 2 * SIZE(BO2), %xmm3 + vmovsd 4 * SIZE(BO2), %xmm5 + vmovsd 6 * SIZE(BO2), %xmm7 + vmovups %xmm0, 0*SIZE(BO) + vmovsd %xmm1, 2*SIZE(BO) + vmovups %xmm2, 3*SIZE(BO) + vmovsd %xmm3, 5*SIZE(BO) + vmovups %xmm4, 6*SIZE(BO) + vmovsd %xmm5, 8*SIZE(BO) + vmovups %xmm6, 9*SIZE(BO) + vmovsd %xmm7,11*SIZE(BO) + addq $ 8*SIZE,BO1 + addq $ 8*SIZE,BO2 + addq $ 12*SIZE,BO + + vmovups 0 * SIZE(BO1), %xmm0 + vmovups 2 * SIZE(BO1), %xmm2 + vmovups 4 * SIZE(BO1), %xmm4 + vmovups 6 * SIZE(BO1), %xmm6 + vmovsd 0 * SIZE(BO2), %xmm1 + vmovsd 2 * SIZE(BO2), %xmm3 + vmovsd 4 * SIZE(BO2), %xmm5 + vmovsd 6 * SIZE(BO2), %xmm7 + vmovups %xmm0, 0*SIZE(BO) + vmovsd %xmm1, 2*SIZE(BO) + vmovups %xmm2, 3*SIZE(BO) + vmovsd %xmm3, 5*SIZE(BO) + vmovups %xmm4, 6*SIZE(BO) + vmovsd %xmm5, 8*SIZE(BO) + vmovups %xmm6, 9*SIZE(BO) + vmovsd %xmm7,11*SIZE(BO) + addq $ 8*SIZE,BO1 + addq $ 8*SIZE,BO2 + addq $ 12*SIZE,BO + + decq %rax + jnz .L6_01a_1 + + + +.L6_01a_2: + + movq K, %rax + andq $7, %rax // K % 8 + jz .L6_02c + ALIGN_4 + + +.L6_02b: + + vmovups 0 * SIZE(BO1), %xmm0 + vmovsd 0 * SIZE(BO2), %xmm2 + vmovups %xmm0, 0*SIZE(BO) + vmovsd %xmm2, 2*SIZE(BO) + addq $ 2*SIZE,BO1 + addq $ 2*SIZE,BO2 + addq $ 3*SIZE,BO + decq %rax + jnz .L6_02b + +.L6_02c: + + movq K, %rax + salq $1,%rax // K * 2 + leaq (B,%rax, SIZE), BO1 // next offset to BO1 + leaq (BO1,%rax, SIZE), BO2 // next offset to BO2 + leaq BUFFER2, BO // second buffer to BO + movq K, %rax + sarq $3 , %rax // K / 8 + jz .L6_02c_2 + ALIGN_4 + +.L6_02c_1: + + prefetcht0 512(BO2) + prefetchw 512(BO) + + vmovups 0 * SIZE(BO2), %xmm0 + vmovups 2 * SIZE(BO2), %xmm2 + vmovups 4 * SIZE(BO2), %xmm4 + vmovups 6 * SIZE(BO2), %xmm6 + vmovsd 1 * SIZE(BO1), %xmm1 + vmovsd 3 * SIZE(BO1), %xmm3 + vmovsd 5 * SIZE(BO1), %xmm5 + vmovsd 7 * SIZE(BO1), %xmm7 + vmovsd %xmm1, 0*SIZE(BO) + vmovups %xmm0, 1*SIZE(BO) + vmovsd %xmm3, 3*SIZE(BO) + vmovups %xmm2, 4*SIZE(BO) + vmovsd %xmm5, 6*SIZE(BO) + vmovups %xmm4, 7*SIZE(BO) + vmovsd %xmm7, 9*SIZE(BO) + vmovups %xmm6,10*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + + + vmovups 0 * SIZE(BO2), %xmm0 + vmovups 2 * SIZE(BO2), %xmm2 + vmovups 4 * SIZE(BO2), %xmm4 + vmovups 6 * SIZE(BO2), %xmm6 + vmovsd 1 * SIZE(BO1), %xmm1 + vmovsd 3 * SIZE(BO1), %xmm3 + vmovsd 5 * SIZE(BO1), %xmm5 + vmovsd 7 * SIZE(BO1), %xmm7 + vmovsd %xmm1, 0*SIZE(BO) + vmovups %xmm0, 1*SIZE(BO) + vmovsd %xmm3, 3*SIZE(BO) + vmovups %xmm2, 4*SIZE(BO) + vmovsd %xmm5, 6*SIZE(BO) + vmovups %xmm4, 7*SIZE(BO) + vmovsd %xmm7, 9*SIZE(BO) + vmovups %xmm6,10*SIZE(BO) + addq $8*SIZE,BO1 + addq $8*SIZE,BO2 + addq $12*SIZE,BO + + decq %rax + jnz .L6_02c_1 + + +.L6_02c_2: + + movq K, %rax + andq $7, %rax // K % 8 + jz .L6_03c + ALIGN_4 + +.L6_03b: + + vmovsd 1*SIZE(BO1), %xmm0 + vmovups 0*SIZE(BO2), %xmm1 + vmovsd %xmm0, 0*SIZE(BO) + vmovups %xmm1, 1*SIZE(BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO2 + addq $3*SIZE,BO + decq %rax + jnz .L6_03b + + +.L6_03c: + + movq BO2, B // next offset of B + +.L6_10: + movq C, CO1 + leaq (C, LDC, 2), C + leaq (C, LDC, 1), C // c += 3 * ldc + + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L6_20 + + ALIGN_4 + +.L6_11: + leaq BUFFER1, BO // first buffer to BO + addq $12 * SIZE, BO + + prefetcht0 (CO1) + prefetcht0 (CO1,LDC,1) + prefetcht0 (CO1,LDC,2) + prefetcht0 64(CO1) + prefetcht0 64(CO1,LDC,1) + prefetcht0 64(CO1,LDC,2) + + vzeroall + + movq K, %rax + + sarq $1, %rax // K / 8 + je .L6_16 + + ALIGN_5 + +.L6_12: +/* + prefetcht0 B_PR1(BO) + prefetcht0 B_PR1+64(BO) + prefetcht0 B_PR1+128(BO) +*/ + KERNEL16x3_SUBN + KERNEL16x3_SUBN +/* + KERNEL16x3_SUBN + KERNEL16x3_SUBN + + KERNEL16x3_SUBN + KERNEL16x3_SUBN + KERNEL16x3_SUBN + KERNEL16x3_SUBN +*/ + dec %rax + jne .L6_12 + +.L6_16: + movq K, %rax + + andq $1, %rax # if (k & 1) + je .L6_19 + + ALIGN_4 + +.L6_17: + + KERNEL16x3_SUBN + + dec %rax + jne .L6_17 + ALIGN_4 + + +.L6_19: + + SAVE16x3 + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L6_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L6_20: + // Test rest of M + + testq $15, M + jz .L7_10 // to next 3 lines of N + + testq $8, M + jz .L6_21pre + ALIGN_4 + +/**************************************************************************/ + +.L6_20_1: + leaq BUFFER1, BO // first buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3, %rax + je .L6_20_6 + + ALIGN_4 + +.L6_20_2: + + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + dec %rax + jne .L6_20_2 + ALIGN_4 + +.L6_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_20_9 + + + ALIGN_4 + +.L6_20_7: + + KERNEL8x3_SUBN + + dec %rax + jne .L6_20_7 + ALIGN_4 + + +.L6_20_9: + + SAVE8x3 + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L6_21pre: + + testq $4, M + jz .L6_30 + ALIGN_4 + +.L6_21: + leaq BUFFER1, BO // first buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3, %rax + je .L6_26 + + ALIGN_4 + +.L6_22: + + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + dec %rax + jne .L6_22 + ALIGN_4 + +.L6_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_29 + + ALIGN_4 + +.L6_27: + + KERNEL4x3_SUBN + + dec %rax + jne .L6_27 + ALIGN_4 + + +.L6_29: + + SAVE4x3 + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L6_30: + testq $2, M + jz .L6_40 + + ALIGN_4 + +.L6_31: + leaq BUFFER1, BO // first buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3, %rax + je .L6_36 + ALIGN_4 + +.L6_32: + + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + dec %rax + jne .L6_32 + ALIGN_4 + +.L6_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_39 + + ALIGN_4 + +.L6_37: + + KERNEL2x3_SUBN + + dec %rax + jne .L6_37 + ALIGN_4 + + +.L6_39: + + SAVE2x3 + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L6_40: + testq $1, M + jz .L7_10 // to next 3 lines of N + + ALIGN_4 + +.L6_41: + leaq BUFFER1, BO // first buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3,%rax + je .L6_46 + + ALIGN_4 + +.L6_42: + + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + + dec %rax + jne .L6_42 + ALIGN_4 + +.L6_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_49 + + ALIGN_4 + +.L6_47: + + KERNEL1x3_SUBN + + dec %rax + jne .L6_47 + ALIGN_4 + + +.L6_49: + + SAVE1x3 + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + + +/***************************************************************************************************************/ + +.L7_10: + movq C, CO1 + leaq (C, LDC, 2), C + leaq (C, LDC, 1), C // c += 3 * ldc + + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L7_20 + + ALIGN_4 + +.L7_11: + leaq BUFFER2, BO // second buffer to BO + addq $12 * SIZE, BO + + prefetcht0 (CO1) + prefetcht0 (CO1,LDC,1) + prefetcht0 (CO1,LDC,2) + prefetcht0 64(CO1) + prefetcht0 64(CO1,LDC,1) + prefetcht0 64(CO1,LDC,2) + + vzeroall + + movq K, %rax + + sarq $3, %rax // K / 8 + je .L7_16 + ALIGN_5 + +.L7_12: +/* + prefetcht0 B_PR1(BO) + prefetcht0 B_PR1+64(BO) + prefetcht0 B_PR1+128(BO) +*/ + KERNEL16x3_SUBN + KERNEL16x3_SUBN + KERNEL16x3_SUBN + KERNEL16x3_SUBN + + KERNEL16x3_SUBN + KERNEL16x3_SUBN + KERNEL16x3_SUBN + KERNEL16x3_SUBN + dec %rax + jne .L7_12 + ALIGN_4 + +.L7_16: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_19 + + ALIGN_5 + +.L7_17: + + KERNEL16x3_SUBN + + dec %rax + jne .L7_17 + + +.L7_19: + + SAVE16x3 + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L7_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L7_20: + // Test rest of M + + testq $15, M + jz .L7_60 // to next 3 lines of N + + testq $8, M + jz .L7_21pre + ALIGN_4 + +/**************************************************************************/ + +.L7_20_1: + leaq BUFFER2, BO // first buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3, %rax + je .L7_20_6 + + ALIGN_4 + +.L7_20_2: + + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + KERNEL8x3_SUBN + + dec %rax + jne .L7_20_2 + ALIGN_4 + +.L7_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_20_9 + + ALIGN_4 + +.L7_20_7: + + KERNEL8x3_SUBN + + dec %rax + jne .L7_20_7 + ALIGN_4 + +.L7_20_9: + + SAVE8x3 + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L7_21pre: + + testq $4, M + jz .L7_30 + ALIGN_4 + +.L7_21: + leaq BUFFER2, BO // second buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3, %rax + je .L7_26 + + ALIGN_4 + +.L7_22: + + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + KERNEL4x3_SUBN + + dec %rax + jne .L7_22 + ALIGN_4 + +.L7_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_29 + + ALIGN_4 + +.L7_27: + + KERNEL4x3_SUBN + + dec %rax + jne .L7_27 + ALIGN_4 + + +.L7_29: + + SAVE4x3 + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L7_30: + testq $2, M + jz .L7_40 + + ALIGN_4 + +.L7_31: + leaq BUFFER2, BO // second buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3, %rax + je .L7_36 + + ALIGN_4 + +.L7_32: + + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + KERNEL2x3_SUBN + + dec %rax + jne .L7_32 + ALIGN_4 + +.L7_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_39 + + ALIGN_4 + +.L7_37: + + KERNEL2x3_SUBN + + dec %rax + jne .L7_37 + ALIGN_4 + + +.L7_39: + + SAVE2x3 + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L7_40: + testq $1, M + jz .L7_60 // to next 3 lines of N + + ALIGN_4 + +.L7_41: + leaq BUFFER2, BO // second buffer to BO + addq $12 * SIZE, BO + + vzeroall + + movq K, %rax + + sarq $3, %rax + je .L7_46 + + ALIGN_4 + +.L7_42: + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + KERNEL1x3_SUBN + + dec %rax + jne .L7_42 + ALIGN_4 + +.L7_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_49 + + ALIGN_4 + +.L7_47: + + KERNEL1x3_SUBN + + dec %rax + jne .L7_47 + ALIGN_4 + + +.L7_49: + + SAVE1x3 + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + +.L7_60: + + decq J // j -- + jg .L6_01 + + +.L2_0: + cmpq $0, Nmod6 // N % 6 == 0 + je .L999 + +/************************************************************************************************ +* Loop for Nmod6 / 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + sarq $1, J // j = j / 2 + je .L1_0 + ALIGN_4 + +.L2_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $2, %rax // K / 4 + jz .L2_01b + ALIGN_4 + +.L2_01a: + prefetcht0 512(BO1) + prefetchw 512(BO) + + vmovups (BO1), %xmm0 + vmovups 2*SIZE(BO1), %xmm1 + vmovups 4*SIZE(BO1), %xmm2 + vmovups 6*SIZE(BO1), %xmm3 + + vmovups %xmm0, (BO) + vmovups %xmm1, 2*SIZE(BO) + vmovups %xmm2, 4*SIZE(BO) + vmovups %xmm3, 6*SIZE(BO) + + addq $8*SIZE,BO1 + addq $8*SIZE,BO + decq %rax + jnz .L2_01a + + +.L2_01b: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L2_02d + ALIGN_4 + +.L2_02c: + + vmovups (BO1), %xmm0 + vmovups %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L2_02c + +.L2_02d: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L2_20 + + ALIGN_4 + +.L2_11: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + je .L2_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL16x2_SUB + + jl .L2_17 + ALIGN_4 + + +.L2_19: + + SAVE16x2 + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L2_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_20: + // Test rest of M + + testq $15, M + jz .L2_60 // to next 3 lines of N + + testq $8, M + jz .L2_21pre + ALIGN_4 + +/**************************************************************************/ + +.L2_20_1: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_2: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + je .L2_20_6 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + je .L2_20_6 + + jmp .L2_20_2 + ALIGN_4 + +.L2_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_7: + + KERNEL8x2_SUB + + jl .L2_20_7 + ALIGN_4 + + +.L2_20_9: + + SAVE8x2 + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L2_21pre: + + testq $4, M + jz .L2_30 + ALIGN_4 + +.L2_21: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_26 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 1 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + je .L2_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL4x2_SUB + + jl .L2_27 + ALIGN_4 + + +.L2_29: + + SAVE4x2 + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L2_30: + testq $2, M + jz .L2_40 + + ALIGN_4 + +.L2_31: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_36 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_32: + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + je .L2_36 + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + je .L2_36 + + jmp .L2_32 + ALIGN_4 + +.L2_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_37: + + KERNEL2x2_SUB + + jl .L2_37 + ALIGN_4 + + +.L2_39: + + SAVE2x2 + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L2_46 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + je .L2_46 + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB + + jl .L2_47 + ALIGN_4 + + +.L2_49: + + SAVE1x2 + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + +.L2_60: + + decq J // j -- + jg .L2_01 // next 2 lines of N + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $1*SIZE,BO1 + addq $1*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L1_20 + + ALIGN_4 + +.L1_11: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + je .L1_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL16x1_SUB + + jl .L1_17 + ALIGN_4 + + +.L1_19: + + SAVE16x1 + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L1_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_20: + // Test rest of M + + testq $15, M + jz .L999 + + testq $8, M + jz .L1_21pre + ALIGN_4 + +/**************************************************************************/ + +.L1_20_1: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L1_20_6 + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_2: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + je .L1_20_6 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + je .L1_20_6 + + jmp .L1_20_2 + ALIGN_4 + +.L1_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_20_9 + + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_7: + + KERNEL8x1_SUB + + jl .L1_20_7 + ALIGN_4 + + +.L1_20_9: + + SAVE8x1 + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L1_21pre: + + testq $4, M + jz .L1_30 + ALIGN_4 + +.L1_21: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L1_26 + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + je .L1_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL4x1_SUB + + jl .L1_27 + ALIGN_4 + + +.L1_29: + + SAVE4x1 + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L1_30: + testq $2, M + jz .L1_40 + + ALIGN_4 + +.L1_31: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L1_36 + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_32: + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + je .L1_36 + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + je .L1_36 + + jmp .L1_32 + ALIGN_4 + +.L1_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_39 + + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_37: + + KERNEL2x1_SUB + + jl .L1_37 + ALIGN_4 + + +.L1_39: + + SAVE2x1 + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L1_40: + testq $1, M + jz .L999 + + ALIGN_4 + +.L1_41: + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L1_46 + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + je .L1_46 + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB + + jl .L1_47 + ALIGN_4 + + +.L1_49: + + SAVE1x1 + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE + + +#else +/************************************************************************************* +* TRMM Kernel +*************************************************************************************/ + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + movsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovsd %xmm0, ALPHA + + salq $BASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $2, %rdi + divq %rdi // N / 6 + movq %rax, Ndiv6 // N / 6 + movq %rdx, Nmod6 // N % 6 + + + +#ifdef TRMMKERNEL + vmovsd %xmm12, OFFSET + vmovsd %xmm12, KK +#ifndef LEFT + negq KK +#endif +#endif + + movq Ndiv6, J + cmpq $0, J + je .L1_0 + ALIGN_4 + +.L2_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $2, %rax // K / 4 + jz .L2_01b + ALIGN_4 + +.L2_01a: + prefetcht0 512(BO1) + prefetchw 512(BO) + + vmovups (BO1), %xmm0 + vmovups 2*SIZE(BO1), %xmm1 + vmovups 4*SIZE(BO1), %xmm2 + vmovups 6*SIZE(BO1), %xmm3 + + vmovups %xmm0, (BO) + vmovups %xmm1, 2*SIZE(BO) + vmovups %xmm2, 4*SIZE(BO) + vmovups %xmm3, 6*SIZE(BO) + + addq $8*SIZE,BO1 + addq $8*SIZE,BO + decq %rax + jnz .L2_01a + + +.L2_01b: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L2_02d + ALIGN_4 + +.L2_02c: + + vmovups (BO1), %xmm0 + vmovups %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L2_02c + +.L2_02d: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L2_20 + + ALIGN_4 + +.L2_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + je .L2_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x2_1 + KERNEL16x2_2 + KERNEL16x2_3 + KERNEL16x2_4 + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL16x2_SUB + + jl .L2_17 + ALIGN_4 + + +.L2_19: + + SAVE16x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L2_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_20: + // Test rest of M + + testq $15, M + jz .L2_60 // to next 3 lines of N + + testq $8, M + jz .L2_21pre + ALIGN_4 + +/**************************************************************************/ + +.L2_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_2: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + je .L2_20_6 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x2_1 + KERNEL8x2_2 + KERNEL8x2_3 + KERNEL8x2_4 + + je .L2_20_6 + + jmp .L2_20_2 + ALIGN_4 + +.L2_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_7: + + KERNEL8x2_SUB + + jl .L2_20_7 + ALIGN_4 + + +.L2_20_9: + + SAVE8x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L2_21pre: + + testq $4, M + jz .L2_30 + ALIGN_4 + +.L2_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_26 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 1 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + je .L2_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x2_1 + KERNEL4x2_2 + KERNEL4x2_3 + KERNEL4x2_4 + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL4x2_SUB + + jl .L2_27 + ALIGN_4 + + +.L2_29: + + SAVE4x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L2_30: + testq $2, M + jz .L2_40 + + ALIGN_4 + +.L2_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_36 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_32: + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + je .L2_36 + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + KERNEL2x2_1 + KERNEL2x2_2 + KERNEL2x2_3 + KERNEL2x2_4 + + je .L2_36 + + jmp .L2_32 + ALIGN_4 + +.L2_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_37: + + KERNEL2x2_SUB + + jl .L2_37 + ALIGN_4 + + +.L2_39: + + SAVE2x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L2_46 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + je .L2_46 + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + KERNEL1x2_1 + KERNEL1x2_2 + KERNEL1x2_3 + KERNEL1x2_4 + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB + + jl .L2_47 + ALIGN_4 + + +.L2_49: + + SAVE1x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + + + +.L2_60: +#if defined(TRMMKERNEL) && !defined(LEFT) + addq $2, KK +#endif + + decq J // j -- + jg .L2_01 // next 2 lines of N + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $1*SIZE,BO1 + addq $1*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $32 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L1_20 + + ALIGN_4 + +.L1_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + je .L1_16 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + KERNEL16x1_1 + KERNEL16x1_2 + KERNEL16x1_3 + KERNEL16x1_4 + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL16x1_SUB + + jl .L1_17 + ALIGN_4 + + +.L1_19: + + SAVE16x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L1_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_20: + // Test rest of M + + testq $15, M + jz .L999 + + testq $8, M + jz .L1_21pre + ALIGN_4 + +/**************************************************************************/ + +.L1_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_20_6 + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_2: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + je .L1_20_6 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + KERNEL8x1_1 + KERNEL8x1_2 + KERNEL8x1_3 + KERNEL8x1_4 + + je .L1_20_6 + + jmp .L1_20_2 + ALIGN_4 + +.L1_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_20_9 + + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_7: + + KERNEL8x1_SUB + + jl .L1_20_7 + ALIGN_4 + + +.L1_20_9: + + SAVE8x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L1_21pre: + + testq $4, M + jz .L1_30 + ALIGN_4 + +.L1_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_26 + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + je .L1_26 + + prefetcht0 B_PR1(BO,BI,8) + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + KERNEL4x1_1 + KERNEL4x1_2 + KERNEL4x1_3 + KERNEL4x1_4 + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL4x1_SUB + + jl .L1_27 + ALIGN_4 + + +.L1_29: + + SAVE4x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L1_30: + testq $2, M + jz .L1_40 + + ALIGN_4 + +.L1_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_36 + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_32: + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + je .L1_36 + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + KERNEL2x1_1 + KERNEL2x1_2 + KERNEL2x1_3 + KERNEL2x1_4 + + je .L1_36 + + jmp .L1_32 + ALIGN_4 + +.L1_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_39 + + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_37: + + KERNEL2x1_SUB + + jl .L1_37 + ALIGN_4 + + +.L1_39: + + SAVE2x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L1_40: + testq $1, M + jz .L999 + + ALIGN_4 + +.L1_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $2 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L1_46 + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + je .L1_46 + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + KERNEL1x1_1 + KERNEL1x1_2 + KERNEL1x1_3 + KERNEL1x1_4 + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB + + jl .L1_47 + ALIGN_4 + + +.L1_49: + + SAVE1x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE + + + + + +#endif diff --git a/kernel/x86_64/dgemm_kernel_4x8_skylakex.c b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c new file mode 100644 index 000000000..6257e569e --- /dev/null +++ b/kernel/x86_64/dgemm_kernel_4x8_skylakex.c @@ -0,0 +1,1565 @@ +/********************************************************************************* +Copyright (c) 2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ + +/* + * This file is based on dgemm_kernel_4x8_haswell.s (original copyright above). + * The content got translated from ASM to C+intrinsics, significantly simplified, + * and AVX512 support added by Arjan van de Ven + */ + + +#include "common.h" +#include + + +/******************************************************************************************* +* Macro definitions +*******************************************************************************************/ + + +/******************************************************************************************/ + + +#define INIT4x8() \ + ymm4 = _mm256_setzero_pd(); \ + ymm5 = _mm256_setzero_pd(); \ + ymm6 = _mm256_setzero_pd(); \ + ymm7 = _mm256_setzero_pd(); \ + ymm8 = _mm256_setzero_pd(); \ + ymm9 = _mm256_setzero_pd(); \ + ymm10 = _mm256_setzero_pd(); \ + ymm11 = _mm256_setzero_pd(); \ + + +#define KERNEL4x8_SUB() \ + ymm0 = _mm256_loadu_pd(AO - 16); \ +/* ymm0 [ A B C D ] */ \ + ymm1 = _mm256_loadu_pd(BO - 12); \ + ymm2 = _mm256_loadu_pd(BO - 8); \ +/* ymm1 [ 1 2 3 4 ] */ \ +/* ymm2 [ 5 6 7 8 ] */ \ + \ + ymm4 += ymm0 * ymm1; \ +/* ymm4 += [ A*1 | B*2 | C*3 | D*4 ] */ \ + ymm8 += ymm0 * ymm2; \ +/* ymm8 += [ A*5 | B*6 | C*7 | D*8 ] */ \ + \ + ymm0 = _mm256_permute4x64_pd(ymm0, 0xb1); \ +/* ymm0 [ B A D C ] */ \ + ymm5 += ymm0 * ymm1; \ +/* ymm5 += [ B*1 | A*2 | D*3 | C*4 ] */ \ + ymm9 += ymm0 * ymm2; \ +/* ymm9 += [ B*5 | A*6 | D*7 | C*8 ] */ \ + \ + ymm0 = _mm256_permute4x64_pd(ymm0, 0x1b); \ +/* ymm0 [ C D A B ]] */ \ + ymm6 += ymm0 * ymm1; \ +/* ymm6 += [ C*1 | D*2 | A*3 | B*4 ] */ \ + ymm10+= ymm0 * ymm2; \ +/* ymm10 += [ C*5 | D*6 | A*7 | B*8 ] */ \ + \ + ymm0 = _mm256_permute4x64_pd(ymm0, 0xb1); \ +/* ymm0 [ D C B A ] */ \ + ymm7 += ymm0 * ymm1; \ +/* ymm7 += [ D*1 | C*2 | B*3 | A*4 ] */ \ + ymm11+= ymm0 * ymm2; \ +/* ymm11 += [ D*5 | C*6 | B*7 | A*8 ] */ \ + AO += 4; \ + BO += 8; + + +#define SAVE4x8(ALPHA) \ + ymm0 = _mm256_set1_pd(ALPHA); \ + ymm4 *= ymm0; \ + ymm5 *= ymm0; \ + ymm6 *= ymm0; \ + ymm7 *= ymm0; \ + ymm8 *= ymm0; \ + ymm9 *= ymm0; \ + ymm10 *= ymm0; \ + ymm11 *= ymm0; \ + \ +/* Entry values: */ \ +/* ymm4 = a [ A*1 | B*2 | C*3 | D*4 ] */ \ +/* ymm5 = a [ B*1 | A*2 | D*3 | C*4 ] */ \ +/* ymm6 = a [ C*1 | D*2 | A*3 | B*4 ] */ \ +/* ymm7 = a [ D*1 | C*2 | B*3 | A*4 ] */ \ +/* ymm8 = a [ A*5 | B*6 | C*7 | D*8 ] */ \ +/* ymm9 = a [ B*5 | A*6 | D*7 | C*8 ] */ \ +/* ymm10 = a [ C*5 | D*6 | A*7 | B*8 ] */ \ +/* ymm11 = a [ D*5 | C*6 | B*7 | A*8 ] */ \ + \ + ymm5 = _mm256_permute4x64_pd(ymm5, 0xb1); \ +/* ymm5 = a [ A*2 | B*1 | C*4 | D*3 ] */ \ + ymm7 = _mm256_permute4x64_pd(ymm7, 0xb1); \ +/* ymm7 = a [ C*2 | D*1 | A*4 | B*3 ] */ \ + \ + ymm0 = _mm256_blend_pd(ymm4, ymm5, 0x0a); \ + ymm1 = _mm256_blend_pd(ymm4, ymm5, 0x05); \ +/* ymm0 = a [ A*1 | B*1 | C*3 | D*3 ] */ \ +/* ymm1 = a [ A*2 | B*2 | C*4 | D*4 ] */ \ + ymm2 = _mm256_blend_pd(ymm6, ymm7, 0x0a); \ + ymm3 = _mm256_blend_pd(ymm6, ymm7, 0x05); \ +/* ymm2 = a [ C*1 | D*1 | A*3 | B*3 ] */ \ +/* ymm3 = a [ C*2 | D*2 | A*4 | B*4 ] */ \ + \ + ymm2 = _mm256_permute4x64_pd(ymm2, 0x1b); \ + ymm3 = _mm256_permute4x64_pd(ymm3, 0x1b); \ +/* ymm2 = a [ B*3 | A*3 | D*1 | C*1 ] */ \ +/* ymm3 = a [ B*4 | A*4 | D*2 | C*2 ] */ \ + ymm2 = _mm256_permute4x64_pd(ymm2, 0xb1); \ + ymm3 = _mm256_permute4x64_pd(ymm3, 0xb1); \ +/* ymm2 = a [ A*3 | B*3 | C*1 | D*1 ] */ \ +/* ymm3 = a [ A*4 | B*4 | C*2 | D*2 ] */ \ + \ + ymm4 = _mm256_blend_pd(ymm2, ymm0, 0x03); \ + ymm5 = _mm256_blend_pd(ymm3, ymm1, 0x03); \ +/* ymm4 = a [ A*1 | B*1 | C*1 | D*1 ] */ \ +/* ymm5 = a [ A*2 | B*2 | C*2 | D*2 ] */ \ + ymm6 = _mm256_blend_pd(ymm0, ymm2, 0x03); \ + ymm7 = _mm256_blend_pd(ymm1, ymm3, 0x03); \ +/* ymm5 = a [ A*3 | B*3 | C*3 | D*3 ] */ \ +/* ymm7 = a [ A*4 | B*4 | C*4 | D*4 ] */ \ + \ + ymm4 += _mm256_loadu_pd(CO1 + (0 * ldc)); \ + ymm5 += _mm256_loadu_pd(CO1 + (1 * ldc)); \ + ymm6 += _mm256_loadu_pd(CO1 + (2 * ldc)); \ + ymm7 += _mm256_loadu_pd(CO1 + (3 * ldc)); \ + _mm256_storeu_pd(CO1 + (0 * ldc), ymm4); \ + _mm256_storeu_pd(CO1 + (1 * ldc), ymm5); \ + _mm256_storeu_pd(CO1 + (2 * ldc), ymm6); \ + _mm256_storeu_pd(CO1 + (3 * ldc), ymm7); \ + \ + ymm9 = _mm256_permute4x64_pd(ymm9, 0xb1); \ + ymm11 = _mm256_permute4x64_pd(ymm11, 0xb1); \ + \ + ymm0 = _mm256_blend_pd(ymm8, ymm9, 0x0a); \ + ymm1 = _mm256_blend_pd(ymm8, ymm9, 0x05); \ + ymm2 = _mm256_blend_pd(ymm10, ymm11, 0x0a); \ + ymm3 = _mm256_blend_pd(ymm10, ymm11, 0x05); \ + \ + ymm2 = _mm256_permute4x64_pd(ymm2, 0x1b); \ + ymm3 = _mm256_permute4x64_pd(ymm3, 0x1b); \ + ymm2 = _mm256_permute4x64_pd(ymm2, 0xb1); \ + ymm3 = _mm256_permute4x64_pd(ymm3, 0xb1); \ + \ + ymm4 = _mm256_blend_pd(ymm2, ymm0, 0x03); \ + ymm5 = _mm256_blend_pd(ymm3, ymm1, 0x03); \ + ymm6 = _mm256_blend_pd(ymm0, ymm2, 0x03); \ + ymm7 = _mm256_blend_pd(ymm1, ymm3, 0x03); \ + \ + ymm4 += _mm256_loadu_pd(CO1 + (4 * ldc)); \ + ymm5 += _mm256_loadu_pd(CO1 + (5 * ldc)); \ + ymm6 += _mm256_loadu_pd(CO1 + (6 * ldc)); \ + ymm7 += _mm256_loadu_pd(CO1 + (7 * ldc)); \ + _mm256_storeu_pd(CO1 + (4 * ldc), ymm4); \ + _mm256_storeu_pd(CO1 + (5 * ldc), ymm5); \ + _mm256_storeu_pd(CO1 + (6 * ldc), ymm6); \ + _mm256_storeu_pd(CO1 + (7 * ldc), ymm7); \ + \ + CO1 += 4; + +/******************************************************************************************/ + +#define INIT2x8() \ + xmm4 = _mm_setzero_pd(); \ + xmm5 = _mm_setzero_pd(); \ + xmm6 = _mm_setzero_pd(); \ + xmm7 = _mm_setzero_pd(); \ + xmm8 = _mm_setzero_pd(); \ + xmm9 = _mm_setzero_pd(); \ + xmm10 = _mm_setzero_pd(); \ + xmm11 = _mm_setzero_pd(); \ + + +#define KERNEL2x8_SUB() \ + xmm0 = _mm_loadu_pd(AO - 16); \ + xmm1 = _mm_set1_pd(*(BO - 12)); \ + xmm2 = _mm_set1_pd(*(BO - 11)); \ + xmm3 = _mm_set1_pd(*(BO - 10)); \ + xmm4 += xmm0 * xmm1; \ + xmm1 = _mm_set1_pd(*(BO - 9)); \ + xmm5 += xmm0 * xmm2; \ + xmm2 = _mm_set1_pd(*(BO - 8)); \ + xmm6 += xmm0 * xmm3; \ + xmm3 = _mm_set1_pd(*(BO - 7)); \ + xmm7 += xmm0 * xmm1; \ + xmm1 = _mm_set1_pd(*(BO - 6)); \ + xmm8 += xmm0 * xmm2; \ + xmm2 = _mm_set1_pd(*(BO - 5)); \ + xmm9 += xmm0 * xmm3; \ + xmm10 += xmm0 * xmm1; \ + xmm11 += xmm0 * xmm2; \ + BO += 8; \ + AO += 2; + +#define SAVE2x8(ALPHA) \ + xmm0 = _mm_set1_pd(ALPHA); \ + xmm4 *= xmm0; \ + xmm5 *= xmm0; \ + xmm6 *= xmm0; \ + xmm7 *= xmm0; \ + xmm8 *= xmm0; \ + xmm9 *= xmm0; \ + xmm10 *= xmm0; \ + xmm11 *= xmm0; \ + \ + xmm4 += _mm_loadu_pd(CO1 + (0 * ldc)); \ + xmm5 += _mm_loadu_pd(CO1 + (1 * ldc)); \ + xmm6 += _mm_loadu_pd(CO1 + (2 * ldc)); \ + xmm7 += _mm_loadu_pd(CO1 + (3 * ldc)); \ + \ + _mm_storeu_pd(CO1 + (0 * ldc), xmm4); \ + _mm_storeu_pd(CO1 + (1 * ldc), xmm5); \ + _mm_storeu_pd(CO1 + (2 * ldc), xmm6); \ + _mm_storeu_pd(CO1 + (3 * ldc), xmm7); \ + \ + xmm8 += _mm_loadu_pd(CO1 + (4 * ldc)); \ + xmm9 += _mm_loadu_pd(CO1 + (5 * ldc)); \ + xmm10+= _mm_loadu_pd(CO1 + (6 * ldc)); \ + xmm11+= _mm_loadu_pd(CO1 + (7 * ldc)); \ + _mm_storeu_pd(CO1 + (4 * ldc), xmm8); \ + _mm_storeu_pd(CO1 + (5 * ldc), xmm9); \ + _mm_storeu_pd(CO1 + (6 * ldc), xmm10); \ + _mm_storeu_pd(CO1 + (7 * ldc), xmm11); \ + CO1 += 2; + + + + +/******************************************************************************************/ + +#define INIT1x8() \ + dbl4 = 0; \ + dbl5 = 0; \ + dbl6 = 0; \ + dbl7 = 0; \ + dbl8 = 0; \ + dbl9 = 0; \ + dbl10 = 0; \ + dbl11 = 0; + + +#define KERNEL1x8_SUB() \ + dbl0 = *(AO - 16); \ + dbl1 = *(BO - 12); \ + dbl2 = *(BO - 11); \ + dbl3 = *(BO - 10); \ + dbl4 += dbl0 * dbl1; \ + dbl1 = *(BO - 9); \ + dbl5 += dbl0 * dbl2; \ + dbl2 = *(BO - 8); \ + dbl6 += dbl0 * dbl3; \ + dbl3 = *(BO - 7); \ + dbl7 += dbl0 * dbl1; \ + dbl1 = *(BO - 6); \ + dbl8 += dbl0 * dbl2; \ + dbl2 = *(BO - 5); \ + dbl9 += dbl0 * dbl3; \ + dbl10 += dbl0 * dbl1; \ + dbl11 += dbl0 * dbl2; \ + BO += 8; \ + AO += 1; + + +#define SAVE1x8(ALPHA) \ + dbl0 = ALPHA; \ + dbl4 *= dbl0; \ + dbl5 *= dbl0; \ + dbl6 *= dbl0; \ + dbl7 *= dbl0; \ + dbl8 *= dbl0; \ + dbl9 *= dbl0; \ + dbl10 *= dbl0; \ + dbl11 *= dbl0; \ + \ + dbl4 += *(CO1 + (0 * ldc)); \ + dbl5 += *(CO1 + (1 * ldc)); \ + dbl6 += *(CO1 + (2 * ldc)); \ + dbl7 += *(CO1 + (3 * ldc)); \ + *(CO1 + (0 * ldc)) = dbl4; \ + *(CO1 + (1 * ldc)) = dbl5; \ + *(CO1 + (2 * ldc)) = dbl6; \ + *(CO1 + (3 * ldc)) = dbl7; \ + \ + dbl8 += *(CO1 + (4 * ldc)); \ + dbl9 += *(CO1 + (5 * ldc)); \ + dbl10 += *(CO1 + (6 * ldc)); \ + dbl11 += *(CO1 + (7 * ldc)); \ + *(CO1 + (4 * ldc)) = dbl8; \ + *(CO1 + (5 * ldc)) = dbl9; \ + *(CO1 + (6 * ldc)) = dbl10; \ + *(CO1 + (7 * ldc)) = dbl11; \ + \ + CO1 += 1; + + + + + + +/******************************************************************************************/ + +#define INIT4x4() \ + ymm4 = _mm256_setzero_pd(); \ + ymm5 = _mm256_setzero_pd(); \ + ymm6 = _mm256_setzero_pd(); \ + ymm7 = _mm256_setzero_pd(); \ + + +#define KERNEL4x4_SUB() \ + ymm0 = _mm256_loadu_pd(AO - 16); \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 12)); \ + \ + ymm4 += ymm0 * ymm1; \ + \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 11)); \ + ymm5 += ymm0 * ymm1; \ + \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 10)); \ + ymm6 += ymm0 * ymm1; \ + \ + ymm1 = _mm256_broadcastsd_pd(_mm_load_sd(BO - 9)); \ + ymm7 += ymm0 * ymm1; \ + AO += 4; \ + BO += 4; + + +#define SAVE4x4(ALPHA) \ + ymm0 = _mm256_set1_pd(ALPHA); \ + ymm4 *= ymm0; \ + ymm5 *= ymm0; \ + ymm6 *= ymm0; \ + ymm7 *= ymm0; \ + \ + ymm4 += _mm256_loadu_pd(CO1 + (0 * ldc)); \ + ymm5 += _mm256_loadu_pd(CO1 + (1 * ldc)); \ + ymm6 += _mm256_loadu_pd(CO1 + (2 * ldc)); \ + ymm7 += _mm256_loadu_pd(CO1 + (3 * ldc)); \ + _mm256_storeu_pd(CO1 + (0 * ldc), ymm4); \ + _mm256_storeu_pd(CO1 + (1 * ldc), ymm5); \ + _mm256_storeu_pd(CO1 + (2 * ldc), ymm6); \ + _mm256_storeu_pd(CO1 + (3 * ldc), ymm7); \ + \ + CO1 += 4; + + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT2x4() \ + xmm4 = _mm_setzero_pd(); \ + xmm5 = _mm_setzero_pd(); \ + xmm6 = _mm_setzero_pd(); \ + xmm7 = _mm_setzero_pd(); \ + + + +#define KERNEL2x4_SUB() \ + xmm0 = _mm_loadu_pd(AO - 16); \ + xmm1 = _mm_set1_pd(*(BO - 12)); \ + xmm2 = _mm_set1_pd(*(BO - 11)); \ + xmm3 = _mm_set1_pd(*(BO - 10)); \ + xmm4 += xmm0 * xmm1; \ + xmm1 = _mm_set1_pd(*(BO - 9)); \ + xmm5 += xmm0 * xmm2; \ + xmm6 += xmm0 * xmm3; \ + xmm7 += xmm0 * xmm1; \ + BO += 4; \ + AO += 2; + + + +#define SAVE2x4(ALPHA) \ + xmm0 = _mm_set1_pd(ALPHA); \ + xmm4 *= xmm0; \ + xmm5 *= xmm0; \ + xmm6 *= xmm0; \ + xmm7 *= xmm0; \ + \ + xmm4 += _mm_loadu_pd(CO1 + (0 * ldc)); \ + xmm5 += _mm_loadu_pd(CO1 + (1 * ldc)); \ + xmm6 += _mm_loadu_pd(CO1 + (2 * ldc)); \ + xmm7 += _mm_loadu_pd(CO1 + (3 * ldc)); \ + \ + _mm_storeu_pd(CO1 + (0 * ldc), xmm4); \ + _mm_storeu_pd(CO1 + (1 * ldc), xmm5); \ + _mm_storeu_pd(CO1 + (2 * ldc), xmm6); \ + _mm_storeu_pd(CO1 + (3 * ldc), xmm7); \ + \ + CO1 += 2; + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT1x4() \ + dbl4 = 0; \ + dbl5 = 0; \ + dbl6 = 0; \ + dbl7 = 0; \ + +#define KERNEL1x4_SUB() \ + dbl0 = *(AO - 16); \ + dbl1 = *(BO - 12); \ + dbl2 = *(BO - 11); \ + dbl3 = *(BO - 10); \ + dbl8 = *(BO - 9); \ + \ + dbl4 += dbl0 * dbl1; \ + dbl5 += dbl0 * dbl2; \ + dbl6 += dbl0 * dbl3; \ + dbl7 += dbl0 * dbl8; \ + BO += 4; \ + AO += 1; + + +#define SAVE1x4(ALPHA) \ + dbl0 = ALPHA; \ + dbl4 *= dbl0; \ + dbl5 *= dbl0; \ + dbl6 *= dbl0; \ + dbl7 *= dbl0; \ + \ + dbl4 += *(CO1 + (0 * ldc)); \ + dbl5 += *(CO1 + (1 * ldc)); \ + dbl6 += *(CO1 + (2 * ldc)); \ + dbl7 += *(CO1 + (3 * ldc)); \ + *(CO1 + (0 * ldc)) = dbl4; \ + *(CO1 + (1 * ldc)) = dbl5; \ + *(CO1 + (2 * ldc)) = dbl6; \ + *(CO1 + (3 * ldc)) = dbl7; \ + \ + \ + CO1 += 1; + + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT8x4() \ + ymm10 = _mm256_setzero_pd(); \ + ymm11 = _mm256_setzero_pd(); \ + ymm12 = _mm256_setzero_pd(); \ + ymm13 = _mm256_setzero_pd(); \ + ymm14 = _mm256_setzero_pd(); \ + ymm15 = _mm256_setzero_pd(); \ + ymm16 = _mm256_setzero_pd(); \ + ymm17 = _mm256_setzero_pd(); \ + + +#define KERNEL8x4_SUB() \ + ymm0 = _mm256_loadu_pd(AO - 16); \ + ymm1 = _mm256_loadu_pd(AO - 12); \ + ymm2 = _mm256_set1_pd(*(BO - 12)); \ + ymm3 = _mm256_set1_pd(*(BO - 11)); \ + ymm4 = _mm256_set1_pd(*(BO - 10)); \ + ymm5 = _mm256_set1_pd(*(BO - 9)); \ + ymm10 += ymm0 * ymm2; \ + ymm11 += ymm1 * ymm2; \ + ymm12 += ymm0 * ymm3; \ + ymm13 += ymm1 * ymm3; \ + ymm14 += ymm0 * ymm4; \ + ymm15 += ymm1 * ymm4; \ + ymm16 += ymm0 * ymm5; \ + ymm17 += ymm1 * ymm5; \ + BO += 4; \ + AO += 8; + + + +#define SAVE8x4(ALPHA) \ + ymm0 = _mm256_set1_pd(ALPHA); \ + ymm10 *= ymm0; \ + ymm11 *= ymm0; \ + ymm12 *= ymm0; \ + ymm13 *= ymm0; \ + ymm14 *= ymm0; \ + ymm15 *= ymm0; \ + ymm16 *= ymm0; \ + ymm17 *= ymm0; \ + \ + ymm10 += _mm256_loadu_pd(CO1); \ + ymm11 += _mm256_loadu_pd(CO1 + 4); \ + ymm12 += _mm256_loadu_pd(CO1 + (ldc)); \ + ymm13 += _mm256_loadu_pd(CO1 + (ldc) + 4); \ + ymm14 += _mm256_loadu_pd(CO1 + (ldc*2)); \ + ymm15 += _mm256_loadu_pd(CO1 + (ldc*2) + 4); \ + ymm16 += _mm256_loadu_pd(CO1 + (ldc*3)); \ + ymm17 += _mm256_loadu_pd(CO1 + (ldc*3) + 4); \ + \ + _mm256_storeu_pd(CO1, ymm10); \ + _mm256_storeu_pd(CO1 + 4, ymm11); \ + _mm256_storeu_pd(CO1 + ldc, ymm12); \ + _mm256_storeu_pd(CO1 + ldc + 4, ymm13); \ + _mm256_storeu_pd(CO1 + ldc*2, ymm14); \ + _mm256_storeu_pd(CO1 + ldc*2 + 4, ymm15); \ + _mm256_storeu_pd(CO1 + ldc*3, ymm16); \ + _mm256_storeu_pd(CO1 + ldc*3 + 4, ymm17); \ + \ + CO1 += 8; + + +/******************************************************************************************/ +/******************************************************************************************/ +#define INIT8x2() \ + ymm4 = _mm256_setzero_pd(); \ + ymm5 = _mm256_setzero_pd(); \ + ymm6 = _mm256_setzero_pd(); \ + ymm7 = _mm256_setzero_pd(); \ + + +#define KERNEL8x2_SUB() \ + ymm0 = _mm256_loadu_pd(AO - 16); \ + ymm1 = _mm256_loadu_pd(AO - 12); \ + ymm2 = _mm256_set1_pd(*(BO - 12)); \ + ymm3 = _mm256_set1_pd(*(BO - 11)); \ + ymm4 += ymm0 * ymm2; \ + ymm5 += ymm1 * ymm2; \ + ymm6 += ymm0 * ymm3; \ + ymm7 += ymm1 * ymm3; \ + BO += 2; \ + AO += 8; + + + +#define SAVE8x2(ALPHA) \ + ymm0 = _mm256_set1_pd(ALPHA); \ + ymm4 *= ymm0; \ + ymm5 *= ymm0; \ + ymm6 *= ymm0; \ + ymm7 *= ymm0; \ + \ + ymm4 += _mm256_loadu_pd(CO1); \ + ymm5 += _mm256_loadu_pd(CO1 + 4); \ + ymm6 += _mm256_loadu_pd(CO1 + (ldc)); \ + ymm7 += _mm256_loadu_pd(CO1 + (ldc) + 4); \ + \ + _mm256_storeu_pd(CO1, ymm4); \ + _mm256_storeu_pd(CO1 + 4, ymm5); \ + _mm256_storeu_pd(CO1 + ldc, ymm6); \ + _mm256_storeu_pd(CO1 + ldc + 4, ymm7); \ + \ + CO1 += 8; + + +/******************************************************************************************/ +/******************************************************************************************/ +#define INIT4x2() \ + xmm4 = _mm_setzero_pd(); \ + xmm5 = _mm_setzero_pd(); \ + xmm6 = _mm_setzero_pd(); \ + xmm7 = _mm_setzero_pd(); \ + + +#define KERNEL4x2_SUB() \ + xmm0 = _mm_loadu_pd(AO - 16); \ + xmm1 = _mm_loadu_pd(AO - 14); \ + xmm2 = _mm_set1_pd(*(BO - 12)); \ + xmm3 = _mm_set1_pd(*(BO - 11)); \ + xmm4 += xmm0 * xmm2; \ + xmm5 += xmm1 * xmm2; \ + xmm6 += xmm0 * xmm3; \ + xmm7 += xmm1 * xmm3; \ + BO += 2; \ + AO += 4; + + + +#define SAVE4x2(ALPHA) \ + xmm0 = _mm_set1_pd(ALPHA); \ + xmm4 *= xmm0; \ + xmm5 *= xmm0; \ + xmm6 *= xmm0; \ + xmm7 *= xmm0; \ + \ + xmm4 += _mm_loadu_pd(CO1); \ + xmm5 += _mm_loadu_pd(CO1 + 2); \ + xmm6 += _mm_loadu_pd(CO1 + (ldc)); \ + xmm7 += _mm_loadu_pd(CO1 + (ldc) + 2); \ + \ + _mm_storeu_pd(CO1, xmm4); \ + _mm_storeu_pd(CO1 + 2, xmm5); \ + _mm_storeu_pd(CO1 + ldc, xmm6); \ + _mm_storeu_pd(CO1 + ldc + 2, xmm7); \ + \ + CO1 += 4; + + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT2x2() \ + xmm4 = _mm_setzero_pd(); \ + xmm6 = _mm_setzero_pd(); \ + + + +#define KERNEL2x2_SUB() \ + xmm2 = _mm_set1_pd(*(BO - 12)); \ + xmm0 = _mm_loadu_pd(AO - 16); \ + xmm3 = _mm_set1_pd(*(BO - 11)); \ + xmm4 += xmm0 * xmm2; \ + xmm6 += xmm0 * xmm3; \ + BO += 2; \ + AO += 2; + + +#define SAVE2x2(ALPHA) \ + xmm0 = _mm_set1_pd(ALPHA); \ + xmm4 *= xmm0; \ + xmm6 *= xmm0; \ + \ + xmm4 += _mm_loadu_pd(CO1); \ + xmm6 += _mm_loadu_pd(CO1 + ldc); \ + \ + _mm_storeu_pd(CO1, xmm4); \ + _mm_storeu_pd(CO1 + ldc, xmm6); \ + \ + CO1 += 2; + + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT1x2() \ + dbl4 = 0; \ + dbl5 = 0; + + +#define KERNEL1x2_SUB() \ + dbl0 = *(AO - 16); \ + dbl1 = *(BO - 12); \ + dbl2 = *(BO - 11); \ + dbl4 += dbl0 * dbl1; \ + dbl5 += dbl0 * dbl2; \ + BO += 2; \ + AO += 1; + + +#define SAVE1x2(ALPHA) \ + dbl0 = ALPHA; \ + dbl4 *= dbl0; \ + dbl5 *= dbl0; \ + \ + dbl4 += *(CO1 + (0 * ldc)); \ + dbl5 += *(CO1 + (1 * ldc)); \ + *(CO1 + (0 * ldc)) = dbl4; \ + *(CO1 + (1 * ldc)) = dbl5; \ + \ + \ + CO1 += 1; + + + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT4x1() \ + ymm4 = _mm256_setzero_pd(); \ + ymm5 = _mm256_setzero_pd(); \ + ymm6 = _mm256_setzero_pd(); \ + ymm7 = _mm256_setzero_pd(); + + +#define KERNEL4x1() \ + ymm0 = _mm256_set1_pd(*(BO - 12)); \ + ymm1 = _mm256_set1_pd(*(BO - 11)); \ + ymm2 = _mm256_set1_pd(*(BO - 10)); \ + ymm3 = _mm256_set1_pd(*(BO - 9)); \ + \ + ymm4 += _mm256_loadu_pd(AO - 16) * ymm0; \ + ymm5 += _mm256_loadu_pd(AO - 12) * ymm1; \ + \ + ymm0 = _mm256_set1_pd(*(BO - 8)); \ + ymm1 = _mm256_set1_pd(*(BO - 7)); \ + \ + ymm6 += _mm256_loadu_pd(AO - 8) * ymm2; \ + ymm7 += _mm256_loadu_pd(AO - 4) * ymm3; \ + \ + ymm2 = _mm256_set1_pd(*(BO - 6)); \ + ymm3 = _mm256_set1_pd(*(BO - 5)); \ + \ + ymm4 += _mm256_loadu_pd(AO + 0) * ymm0; \ + ymm5 += _mm256_loadu_pd(AO + 4) * ymm1; \ + ymm6 += _mm256_loadu_pd(AO + 8) * ymm2; \ + ymm7 += _mm256_loadu_pd(AO + 12) * ymm3; \ + \ + BO += 8; \ + AO += 32; + + +#define INIT8x1() \ + zmm4 = _mm512_setzero_pd(); \ + + +#define KERNEL8x1_SUB() \ + zmm2 = _mm512_set1_pd(*(BO - 12)); \ + zmm0 = _mm512_loadu_pd(AO - 16); \ + zmm4 += zmm0 * zmm2; \ + BO += 1; \ + AO += 8; + + +#define SAVE8x1(ALPHA) \ + zmm0 = _mm512_set1_pd(ALPHA); \ + zmm4 *= zmm0; \ + \ + zmm4 += _mm512_loadu_pd(CO1); \ + _mm512_storeu_pd(CO1, zmm4); \ + CO1 += 8; + +#define KERNEL4x1_SUB() \ + ymm2 = _mm256_set1_pd(*(BO - 12)); \ + ymm0 = _mm256_loadu_pd(AO - 16); \ + ymm4 += ymm0 * ymm2; \ + BO += 1; \ + AO += 4; + + +#define SAVE4x1(ALPHA) \ + ymm0 = _mm256_set1_pd(ALPHA); \ + ymm4 += ymm5; \ + ymm6 += ymm7; \ + ymm4 += ymm6; \ + ymm4 *= ymm0; \ + \ + ymm4 += _mm256_loadu_pd(CO1); \ + _mm256_storeu_pd(CO1, ymm4); \ + CO1 += 4; + + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT2x1() \ + xmm4 = _mm_setzero_pd(); + + +#define KERNEL2x1_SUB() \ + xmm2 = _mm_set1_pd(*(BO - 12)); \ + xmm0 = _mm_loadu_pd(AO - 16); \ + xmm4 += xmm0 * xmm2; \ + BO += 1; \ + AO += 2; + + +#define SAVE2x1(ALPHA) \ + xmm0 = _mm_set1_pd(ALPHA); \ + xmm4 *= xmm0; \ + \ + xmm4 += _mm_loadu_pd(CO1); \ + \ + _mm_storeu_pd(CO1, xmm4); \ + \ + CO1 += 2; + + +/******************************************************************************************/ +/******************************************************************************************/ + +#define INIT1x1() \ + dbl4 = 0; + +#define KERNEL1x1_SUB() \ + dbl1 = *(BO - 12); \ + dbl0 = *(AO - 16); \ + dbl4 += dbl0 * dbl1; \ + BO += 1; \ + AO += 1; + +#define SAVE1x1(ALPHA) \ + dbl0 = ALPHA; \ + dbl4 *= dbl0; \ + dbl4 += *CO1; \ + *CO1 = dbl4; \ + CO1 += 1; + + +/*******************************************************************************************/ + +/* START */ + + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, double alpha, double * __restrict__ A, double * __restrict__ B, double * __restrict__ C, BLASLONG ldc) +{ + unsigned long M=m, N=n, K=k; + + + if (M == 0) + return 0; + if (N == 0) + return 0; + if (K == 0) + return 0; + + while (N >= 8) { + double *CO1; + double *AO; + int i; + + CO1 = C; + C += 8 * ldc; + + AO = A + 16; + + i = m; + + while (i >= 24) { + double *BO; + double *A1, *A2; + int kloop = K; + + BO = B + 12; + A1 = AO + 8 * K; + A2 = AO + 16 * K; + /* + * This is the inner loop for the hot hot path + * Written in inline asm because compilers like GCC 8 and earlier + * struggle with register allocation and are not good at using + * the AVX512 built in broadcast ability (1to8) + */ + asm( + "vxorpd %%zmm1, %%zmm1, %%zmm1\n" + "vmovapd %%zmm1, %%zmm2\n" + "vmovapd %%zmm1, %%zmm3\n" + "vmovapd %%zmm1, %%zmm4\n" + "vmovapd %%zmm1, %%zmm5\n" + "vmovapd %%zmm1, %%zmm6\n" + "vmovapd %%zmm1, %%zmm7\n" + "vmovapd %%zmm1, %%zmm8\n" + "vmovapd %%zmm1, %%zmm11\n" + "vmovapd %%zmm1, %%zmm12\n" + "vmovapd %%zmm1, %%zmm13\n" + "vmovapd %%zmm1, %%zmm14\n" + "vmovapd %%zmm1, %%zmm15\n" + "vmovapd %%zmm1, %%zmm16\n" + "vmovapd %%zmm1, %%zmm17\n" + "vmovapd %%zmm1, %%zmm18\n" + "vmovapd %%zmm1, %%zmm21\n" + "vmovapd %%zmm1, %%zmm22\n" + "vmovapd %%zmm1, %%zmm23\n" + "vmovapd %%zmm1, %%zmm24\n" + "vmovapd %%zmm1, %%zmm25\n" + "vmovapd %%zmm1, %%zmm26\n" + "vmovapd %%zmm1, %%zmm27\n" + "vmovapd %%zmm1, %%zmm28\n" + "jmp .label24\n" + ".p2align 5\n" + /* Inner math loop */ + ".label24:\n" + "vmovupd -128(%[AO]),%%zmm0\n" + "vmovupd -128(%[A1]),%%zmm10\n" + "vmovupd -128(%[A2]),%%zmm20\n" + + "vbroadcastsd -96(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm1\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm11\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm21\n" + + "vbroadcastsd -88(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm2\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm12\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm22\n" + + "vbroadcastsd -80(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm3\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm13\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm23\n" + + "vbroadcastsd -72(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm4\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm14\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm24\n" + + "vbroadcastsd -64(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm5\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm15\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm25\n" + + "vbroadcastsd -56(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm6\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm16\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm26\n" + + "vbroadcastsd -48(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm7\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm17\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm27\n" + + "vbroadcastsd -40(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm8\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm18\n" + "vfmadd231pd %%zmm9, %%zmm20, %%zmm28\n" + "add $64, %[AO]\n" + "add $64, %[A1]\n" + "add $64, %[A2]\n" + "add $64, %[BO]\n" + "prefetch 512(%[AO])\n" + "prefetch 512(%[A1])\n" + "prefetch 512(%[A2])\n" + "prefetch 512(%[BO])\n" + "subl $1, %[kloop]\n" + "jg .label24\n" + /* multiply the result by alpha */ + "vbroadcastsd (%[alpha]), %%zmm9\n" + /* And store additively in C */ + "vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n" + "vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n" + "vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n" + "vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n" + "vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n" + "vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n" + "vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n" + "vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n" + "vmovupd %%zmm1, (%[C0])\n" + "vmovupd %%zmm2, (%[C1])\n" + "vmovupd %%zmm3, (%[C2])\n" + "vmovupd %%zmm4, (%[C3])\n" + "vmovupd %%zmm5, (%[C4])\n" + "vmovupd %%zmm6, (%[C5])\n" + "vmovupd %%zmm7, (%[C6])\n" + "vmovupd %%zmm8, (%[C7])\n" + + "vfmadd213pd 64(%[C0]), %%zmm9, %%zmm11\n" + "vfmadd213pd 64(%[C1]), %%zmm9, %%zmm12\n" + "vfmadd213pd 64(%[C2]), %%zmm9, %%zmm13\n" + "vfmadd213pd 64(%[C3]), %%zmm9, %%zmm14\n" + "vfmadd213pd 64(%[C4]), %%zmm9, %%zmm15\n" + "vfmadd213pd 64(%[C5]), %%zmm9, %%zmm16\n" + "vfmadd213pd 64(%[C6]), %%zmm9, %%zmm17\n" + "vfmadd213pd 64(%[C7]), %%zmm9, %%zmm18\n" + "vmovupd %%zmm11, 64(%[C0])\n" + "vmovupd %%zmm12, 64(%[C1])\n" + "vmovupd %%zmm13, 64(%[C2])\n" + "vmovupd %%zmm14, 64(%[C3])\n" + "vmovupd %%zmm15, 64(%[C4])\n" + "vmovupd %%zmm16, 64(%[C5])\n" + "vmovupd %%zmm17, 64(%[C6])\n" + "vmovupd %%zmm18, 64(%[C7])\n" + + "vfmadd213pd 128(%[C0]), %%zmm9, %%zmm21\n" + "vfmadd213pd 128(%[C1]), %%zmm9, %%zmm22\n" + "vfmadd213pd 128(%[C2]), %%zmm9, %%zmm23\n" + "vfmadd213pd 128(%[C3]), %%zmm9, %%zmm24\n" + "vfmadd213pd 128(%[C4]), %%zmm9, %%zmm25\n" + "vfmadd213pd 128(%[C5]), %%zmm9, %%zmm26\n" + "vfmadd213pd 128(%[C6]), %%zmm9, %%zmm27\n" + "vfmadd213pd 128(%[C7]), %%zmm9, %%zmm28\n" + "vmovupd %%zmm21, 128(%[C0])\n" + "vmovupd %%zmm22, 128(%[C1])\n" + "vmovupd %%zmm23, 128(%[C2])\n" + "vmovupd %%zmm24, 128(%[C3])\n" + "vmovupd %%zmm25, 128(%[C4])\n" + "vmovupd %%zmm26, 128(%[C5])\n" + "vmovupd %%zmm27, 128(%[C6])\n" + "vmovupd %%zmm28, 128(%[C7])\n" + + : + [AO] "+r" (AO), + [A1] "+r" (A1), + [A2] "+r" (A2), + [BO] "+r" (BO), + [C0] "+r" (CO1), + [kloop] "+r" (kloop) + : + [alpha] "r" (&alpha), + [C1] "r" (CO1 + 1 * ldc), + [C2] "r" (CO1 + 2 * ldc), + [C3] "r" (CO1 + 3 * ldc), + [C4] "r" (CO1 + 4 * ldc), + [C5] "r" (CO1 + 5 * ldc), + [C6] "r" (CO1 + 6 * ldc), + [C7] "r" (CO1 + 7 * ldc) + + : "memory", "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "zmm9", + "zmm10", "zmm11", "zmm12", "zmm13", "zmm14", "zmm15", "zmm16", "zmm17", "zmm18", + "zmm20", "zmm21", "zmm22", "zmm23", "zmm24", "zmm25", "zmm26", "zmm27", "zmm28" + ); + CO1 += 24; + AO += 16 * K; + i-= 24; + } + + + while (i >= 16) { + double *BO; + double *A1; + int kloop = K; + + BO = B + 12; + A1 = AO + 8 * K; + /* + * This is the inner loop for the hot hot path + * Written in inline asm because compilers like GCC 8 and earlier + * struggle with register allocation and are not good at using + * the AVX512 built in broadcast ability (1to8) + */ + asm( + "vxorpd %%zmm1, %%zmm1, %%zmm1\n" + "vmovapd %%zmm1, %%zmm2\n" + "vmovapd %%zmm1, %%zmm3\n" + "vmovapd %%zmm1, %%zmm4\n" + "vmovapd %%zmm1, %%zmm5\n" + "vmovapd %%zmm1, %%zmm6\n" + "vmovapd %%zmm1, %%zmm7\n" + "vmovapd %%zmm1, %%zmm8\n" + "vmovapd %%zmm1, %%zmm11\n" + "vmovapd %%zmm1, %%zmm12\n" + "vmovapd %%zmm1, %%zmm13\n" + "vmovapd %%zmm1, %%zmm14\n" + "vmovapd %%zmm1, %%zmm15\n" + "vmovapd %%zmm1, %%zmm16\n" + "vmovapd %%zmm1, %%zmm17\n" + "vmovapd %%zmm1, %%zmm18\n" + "jmp .label16\n" + ".p2align 5\n" + /* Inner math loop */ + ".label16:\n" + "vmovupd -128(%[AO]),%%zmm0\n" + "vmovupd -128(%[A1]),%%zmm10\n" + + "vbroadcastsd -96(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm1\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm11\n" + + "vbroadcastsd -88(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm2\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm12\n" + + "vbroadcastsd -80(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm3\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm13\n" + + "vbroadcastsd -72(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm4\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm14\n" + + "vbroadcastsd -64(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm5\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm15\n" + + "vbroadcastsd -56(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm6\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm16\n" + + "vbroadcastsd -48(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm7\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm17\n" + + "vbroadcastsd -40(%[BO]), %%zmm9\n" + "vfmadd231pd %%zmm9, %%zmm0, %%zmm8\n" + "vfmadd231pd %%zmm9, %%zmm10, %%zmm18\n" + "add $64, %[AO]\n" + "add $64, %[A1]\n" + "add $64, %[BO]\n" + "prefetch 512(%[AO])\n" + "prefetch 512(%[A1])\n" + "prefetch 512(%[BO])\n" + "subl $1, %[kloop]\n" + "jg .label16\n" + /* multiply the result by alpha */ + "vbroadcastsd (%[alpha]), %%zmm9\n" + /* And store additively in C */ + "vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n" + "vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n" + "vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n" + "vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n" + "vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n" + "vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n" + "vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n" + "vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n" + "vmovupd %%zmm1, (%[C0])\n" + "vmovupd %%zmm2, (%[C1])\n" + "vmovupd %%zmm3, (%[C2])\n" + "vmovupd %%zmm4, (%[C3])\n" + "vmovupd %%zmm5, (%[C4])\n" + "vmovupd %%zmm6, (%[C5])\n" + "vmovupd %%zmm7, (%[C6])\n" + "vmovupd %%zmm8, (%[C7])\n" + + "vfmadd213pd 64(%[C0]), %%zmm9, %%zmm11\n" + "vfmadd213pd 64(%[C1]), %%zmm9, %%zmm12\n" + "vfmadd213pd 64(%[C2]), %%zmm9, %%zmm13\n" + "vfmadd213pd 64(%[C3]), %%zmm9, %%zmm14\n" + "vfmadd213pd 64(%[C4]), %%zmm9, %%zmm15\n" + "vfmadd213pd 64(%[C5]), %%zmm9, %%zmm16\n" + "vfmadd213pd 64(%[C6]), %%zmm9, %%zmm17\n" + "vfmadd213pd 64(%[C7]), %%zmm9, %%zmm18\n" + "vmovupd %%zmm11, 64(%[C0])\n" + "vmovupd %%zmm12, 64(%[C1])\n" + "vmovupd %%zmm13, 64(%[C2])\n" + "vmovupd %%zmm14, 64(%[C3])\n" + "vmovupd %%zmm15, 64(%[C4])\n" + "vmovupd %%zmm16, 64(%[C5])\n" + "vmovupd %%zmm17, 64(%[C6])\n" + "vmovupd %%zmm18, 64(%[C7])\n" + + : + [AO] "+r" (AO), + [A1] "+r" (A1), + [BO] "+r" (BO), + [C0] "+r" (CO1), + [kloop] "+r" (kloop) + : + [alpha] "r" (&alpha), + [C1] "r" (CO1 + 1 * ldc), + [C2] "r" (CO1 + 2 * ldc), + [C3] "r" (CO1 + 3 * ldc), + [C4] "r" (CO1 + 4 * ldc), + [C5] "r" (CO1 + 5 * ldc), + [C6] "r" (CO1 + 6 * ldc), + [C7] "r" (CO1 + 7 * ldc) + + : "memory", "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "zmm9", + "zmm10", "zmm11", "zmm12", "zmm13", "zmm14", "zmm15", "zmm16", "zmm17", "zmm18" + ); + CO1 += 16; + AO += 8 * K; + i-= 16; + } + + while (i >= 8) { + double *BO; + int kloop = K; + + BO = B + 12; + /* + * This is the inner loop for the hot hot path + * Written in inline asm because compilers like GCC 8 and earlier + * struggle with register allocation and are not good at using + * the AVX512 built in broadcast ability (1to8) + */ + asm( + "vxorpd %%zmm1, %%zmm1, %%zmm1\n" + "vmovapd %%zmm1, %%zmm2\n" + "vmovapd %%zmm1, %%zmm3\n" + "vmovapd %%zmm1, %%zmm4\n" + "vmovapd %%zmm1, %%zmm5\n" + "vmovapd %%zmm1, %%zmm6\n" + "vmovapd %%zmm1, %%zmm7\n" + "vmovapd %%zmm1, %%zmm8\n" + "vbroadcastsd (%[alpha]), %%zmm9\n" + "jmp .label1\n" + ".p2align 5\n" + /* Inner math loop */ + ".label1:\n" + "vmovupd -128(%[AO]),%%zmm0\n" + "vfmadd231pd -96(%[BO])%{1to8%}, %%zmm0, %%zmm1\n" + "vfmadd231pd -88(%[BO])%{1to8%}, %%zmm0, %%zmm2\n" + "vfmadd231pd -80(%[BO])%{1to8%}, %%zmm0, %%zmm3\n" + "vfmadd231pd -72(%[BO])%{1to8%}, %%zmm0, %%zmm4\n" + "vfmadd231pd -64(%[BO])%{1to8%}, %%zmm0, %%zmm5\n" + "vfmadd231pd -56(%[BO])%{1to8%}, %%zmm0, %%zmm6\n" + "vfmadd231pd -48(%[BO])%{1to8%}, %%zmm0, %%zmm7\n" + "vfmadd231pd -40(%[BO])%{1to8%}, %%zmm0, %%zmm8\n" + "add $64, %[AO]\n" + "add $64, %[BO]\n" + "subl $1, %[kloop]\n" + "jg .label1\n" + /* multiply the result by alpha and add to the memory */ + "vfmadd213pd (%[C0]), %%zmm9, %%zmm1\n" + "vfmadd213pd (%[C1]), %%zmm9, %%zmm2\n" + "vfmadd213pd (%[C2]), %%zmm9, %%zmm3\n" + "vfmadd213pd (%[C3]), %%zmm9, %%zmm4\n" + "vfmadd213pd (%[C4]), %%zmm9, %%zmm5\n" + "vfmadd213pd (%[C5]), %%zmm9, %%zmm6\n" + "vfmadd213pd (%[C6]), %%zmm9, %%zmm7\n" + "vfmadd213pd (%[C7]), %%zmm9, %%zmm8\n" + "vmovupd %%zmm1, (%[C0])\n" + "vmovupd %%zmm2, (%[C1])\n" + "vmovupd %%zmm3, (%[C2])\n" + "vmovupd %%zmm4, (%[C3])\n" + "vmovupd %%zmm5, (%[C4])\n" + "vmovupd %%zmm6, (%[C5])\n" + "vmovupd %%zmm7, (%[C6])\n" + "vmovupd %%zmm8, (%[C7])\n" + : + [AO] "+r" (AO), + [BO] "+r" (BO), + [C0] "+r" (CO1), + [kloop] "+r" (kloop) + : + [alpha] "r" (&alpha), + [C1] "r" (CO1 + 1 * ldc), + [C2] "r" (CO1 + 2 * ldc), + [C3] "r" (CO1 + 3 * ldc), + [C4] "r" (CO1 + 4 * ldc), + [C5] "r" (CO1 + 5 * ldc), + [C6] "r" (CO1 + 6 * ldc), + [C7] "r" (CO1 + 7 * ldc) + + : "memory", "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "zmm9" + ); + CO1 += 8; + i-= 8; + } + + + + while (i >= 4) { + double *BO; + __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9, ymm10, ymm11; + int kloop = K; + + BO = B + 12; + INIT4x8() + + while (kloop > 0) { + KERNEL4x8_SUB() + kloop--; + } + SAVE4x8(alpha) + i-= 4; + } + + + while (i >= 2) { + double *BO; + __m128d xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11; + int kloop = K; + + BO = B + 12; + INIT2x8() + + while (kloop > 0) { + KERNEL2x8_SUB() + kloop--; + } + SAVE2x8(alpha) + i -= 2; + } + + while (i >= 1) { + double *BO; + double dbl0, dbl1, dbl2, dbl3, dbl4, dbl5, dbl6, dbl7, dbl8, dbl9, dbl10, dbl11; + int kloop = K; + + BO = B + 12; + INIT1x8() + + while (kloop > 0) { + KERNEL1x8_SUB() + kloop--; + } + SAVE1x8(alpha) + i -= 1; + } + B += K * 8; + N -= 8; + } + + if (N == 0) + return 0; + + + + // L8_0 + while (N >= 4) { + double *CO1; + double *AO; + int i; + // L8_10 + CO1 = C; + C += 4 * ldc; + + AO = A + 16; + + i = m; + while (i >= 8) { + double *BO; + // L8_11 + __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm10, ymm11,ymm12,ymm13,ymm14,ymm15,ymm16,ymm17; + BO = B + 12; + int kloop = K; + + INIT8x4() + + while (kloop > 0) { + // L12_17 + KERNEL8x4_SUB() + kloop--; + } + // L8_19 + SAVE8x4(alpha) + + i -= 8; + } + while (i >= 4) { + // L8_11 + double *BO; + __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7; + BO = B + 12; + int kloop = K; + + INIT4x4() + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL4x4_SUB() + kloop--; + } + // L8_19 + SAVE4x4(alpha) + + i -= 4; + } + +/************************************************************************** +* Rest of M +***************************************************************************/ + + while (i >= 2) { + double *BO; + __m128d xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + BO = B; + BO += 12; + + INIT2x4() + int kloop = K; + + while (kloop > 0) { + KERNEL2x4_SUB() + kloop--; + } + SAVE2x4(alpha) + i -= 2; + } + // L13_40 + while (i >= 1) { + double *BO; + double dbl0, dbl1, dbl2, dbl3, dbl4, dbl5, dbl6, dbl7, dbl8; + int kloop = K; + BO = B + 12; + INIT1x4() + + while (kloop > 0) { + KERNEL1x4_SUB() + kloop--; + } + SAVE1x4(alpha) + i -= 1; + } + + B += K * 4; + N -= 4; + } + +/**************************************************************************************************/ + + // L8_0 + while (N >= 2) { + double *CO1; + double *AO; + int i; + // L8_10 + CO1 = C; + C += 2 * ldc; + + AO = A + 16; + + i = m; + while (i >= 8) { + double *BO; + __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7; + // L8_11 + BO = B + 12; + int kloop = K; + + INIT8x2() + + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL8x2_SUB() + kloop--; + } + // L8_19 + SAVE8x2(alpha) + + i-=8; + } + + while (i >= 4) { + double *BO; + __m128d xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7; + // L8_11 + BO = B + 12; + int kloop = K; + + INIT4x2() + + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL4x2_SUB() + kloop--; + } + // L8_19 + SAVE4x2(alpha) + + i-=4; + } + +/************************************************************************** +* Rest of M +***************************************************************************/ + + while (i >= 2) { + double *BO; + __m128d xmm0, xmm2, xmm3, xmm4, xmm6; + int kloop = K; + BO = B + 12; + + INIT2x2() + + while (kloop > 0) { + KERNEL2x2_SUB() + kloop--; + } + SAVE2x2(alpha) + i -= 2; + } + // L13_40 + while (i >= 1) { + double *BO; + double dbl0, dbl1, dbl2, dbl4, dbl5; + int kloop = K; + BO = B + 12; + + INIT1x2() + + while (kloop > 0) { + KERNEL1x2_SUB() + kloop--; + } + SAVE1x2(alpha) + i -= 1; + } + + B += K * 2; + N -= 2; + } + + // L8_0 + while (N >= 1) { + // L8_10 + double *CO1; + double *AO; + int i; + + CO1 = C; + C += ldc; + + AO = A + 16; + + i = m; + while (i >= 8) { + double *BO; + __m512d zmm0, zmm2, zmm4; + // L8_11 + BO = B + 12; + int kloop = K; + + INIT8x1() + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL8x1_SUB() + kloop--; + } + // L8_19 + SAVE8x1(alpha) + + i-= 8; + } + while (i >= 4) { + double *BO; + __m256d ymm0, ymm2, ymm4, ymm5, ymm6, ymm7; + // L8_11 + BO = B + 12; + int kloop = K; + + INIT4x1() + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL4x1_SUB() + kloop--; + } + // L8_19 + SAVE4x1(alpha) + + i-= 4; + } + +/************************************************************************** +* Rest of M +***************************************************************************/ + + while (i >= 2) { + double *BO; + __m128d xmm0, xmm2, xmm4; + int kloop = K; + BO = B; + BO += 12; + + INIT2x1() + + while (kloop > 0) { + KERNEL2x1_SUB() + kloop--; + } + SAVE2x1(alpha) + i -= 2; + } + // L13_40 + while (i >= 1) { + double *BO; + double dbl0, dbl1, dbl4; + int kloop = K; + + BO = B; + BO += 12; + INIT1x1() + + + while (kloop > 0) { + KERNEL1x1_SUB() + kloop--; + } + SAVE1x1(alpha) + i -= 1; + } + + B += K * 1; + N -= 1; + } + + + return 0; +} diff --git a/kernel/x86_64/dgemm_ncopy_8_skylakex.c b/kernel/x86_64/dgemm_ncopy_8_skylakex.c new file mode 100644 index 000000000..74b336f3d --- /dev/null +++ b/kernel/x86_64/dgemm_ncopy_8_skylakex.c @@ -0,0 +1,421 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#include + +int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){ + BLASLONG i, j; + + FLOAT *aoffset; + FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + FLOAT *boffset; + FLOAT ctemp01, ctemp02, ctemp03, ctemp04; + FLOAT ctemp05, ctemp06, ctemp07, ctemp08; + FLOAT ctemp09, ctemp10, ctemp11, ctemp12; + FLOAT ctemp13, ctemp14, ctemp15, ctemp16; + FLOAT ctemp17, ctemp18, ctemp19, ctemp20; + FLOAT ctemp21, ctemp22, ctemp23, ctemp24; + FLOAT ctemp25, ctemp26, ctemp27, ctemp28; + FLOAT ctemp29, ctemp30, ctemp31, ctemp32; + FLOAT ctemp33, ctemp34, ctemp35, ctemp36; + FLOAT ctemp37, ctemp38, ctemp39, ctemp40; + FLOAT ctemp41, ctemp42, ctemp43, ctemp44; + FLOAT ctemp45, ctemp46, ctemp47, ctemp48; + FLOAT ctemp49, ctemp50, ctemp51, ctemp52; + FLOAT ctemp53, ctemp54, ctemp55, ctemp56; + FLOAT ctemp57, ctemp58, ctemp59, ctemp60; + FLOAT ctemp61, ctemp62, ctemp63, ctemp64; + + + aoffset = a; + boffset = b; + + j = (n >> 3); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + i = (m >> 3); + if (i > 0){ + do{ + __m128d xmm0, xmm1; + xmm0 = _mm_load_pd1(aoffset2 + 0); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 0); + _mm_storeu_pd(boffset + 0, xmm0); + + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + + xmm1 = _mm_load_pd1(aoffset4 + 0); + xmm1 = _mm_loadl_pd(xmm1, aoffset3 + 0); + _mm_storeu_pd(boffset + 2, xmm1); + + xmm0 = _mm_load_pd1(aoffset6 + 0); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 0); + _mm_storeu_pd(boffset + 4, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 0); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 0); + _mm_storeu_pd(boffset + 6, xmm0); + + ctemp15 = *(aoffset2 + 6); + ctemp16 = *(aoffset2 + 7); + + xmm0 = _mm_load_pd1(aoffset2 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 1); + _mm_storeu_pd(boffset + 8, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 1); + _mm_storeu_pd(boffset + 10, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 1); + _mm_storeu_pd(boffset + 12, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 1); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 1); + _mm_storeu_pd(boffset + 14, xmm0); + + xmm0 = _mm_load_pd1(aoffset2 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 2); + _mm_storeu_pd(boffset + 16, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 2); + _mm_storeu_pd(boffset + 18, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 2); + _mm_storeu_pd(boffset + 20, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 2); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 2); + _mm_storeu_pd(boffset + 22, xmm0); + + ctemp23 = *(aoffset3 + 6); + ctemp24 = *(aoffset3 + 7); + + xmm0 = _mm_load_pd1(aoffset2 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 3); + _mm_storeu_pd(boffset + 24, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 3); + _mm_storeu_pd(boffset + 26, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 3); + _mm_storeu_pd(boffset + 28, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 3); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 3); + _mm_storeu_pd(boffset + 30, xmm0); + + ctemp31 = *(aoffset4 + 6); + ctemp32 = *(aoffset4 + 7); + + + xmm0 = _mm_load_pd1(aoffset2 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 4); + _mm_storeu_pd(boffset + 32, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 4); + _mm_storeu_pd(boffset + 34, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 4); + _mm_storeu_pd(boffset + 36, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 4); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 4); + _mm_storeu_pd(boffset + 38, xmm0); + + ctemp39 = *(aoffset5 + 6); + ctemp40 = *(aoffset5 + 7); + + xmm0 = _mm_load_pd1(aoffset2 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset1 + 5); + _mm_storeu_pd(boffset + 40, xmm0); + + xmm0 = _mm_load_pd1(aoffset4 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset3 + 5); + _mm_storeu_pd(boffset + 42, xmm0); + + xmm0 = _mm_load_pd1(aoffset6 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset5 + 5); + _mm_storeu_pd(boffset + 44, xmm0); + + xmm0 = _mm_load_pd1(aoffset8 + 5); + xmm0 = _mm_loadl_pd(xmm0, aoffset7 + 5); + _mm_storeu_pd(boffset + 46, xmm0); + + + ctemp47 = *(aoffset6 + 6); + ctemp48 = *(aoffset6 + 7); + + ctemp55 = *(aoffset7 + 6); + ctemp56 = *(aoffset7 + 7); + + ctemp63 = *(aoffset8 + 6); + ctemp64 = *(aoffset8 + 7); + + + *(boffset + 48) = ctemp07; + *(boffset + 49) = ctemp15; + *(boffset + 50) = ctemp23; + *(boffset + 51) = ctemp31; + *(boffset + 52) = ctemp39; + *(boffset + 53) = ctemp47; + *(boffset + 54) = ctemp55; + *(boffset + 55) = ctemp63; + + *(boffset + 56) = ctemp08; + *(boffset + 57) = ctemp16; + *(boffset + 58) = ctemp24; + *(boffset + 59) = ctemp32; + *(boffset + 60) = ctemp40; + *(boffset + 61) = ctemp48; + *(boffset + 62) = ctemp56; + *(boffset + 63) = ctemp64; + + aoffset1 += 8; + aoffset2 += 8; + aoffset3 += 8; + aoffset4 += 8; + aoffset5 += 8; + aoffset6 += 8; + aoffset7 += 8; + aoffset8 += 8; + boffset += 64; + i --; + }while(i > 0); + } + + i = (m & 7); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp09 = *(aoffset2 + 0); + ctemp17 = *(aoffset3 + 0); + ctemp25 = *(aoffset4 + 0); + ctemp33 = *(aoffset5 + 0); + ctemp41 = *(aoffset6 + 0); + ctemp49 = *(aoffset7 + 0); + ctemp57 = *(aoffset8 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp09; + *(boffset + 2) = ctemp17; + *(boffset + 3) = ctemp25; + *(boffset + 4) = ctemp33; + *(boffset + 5) = ctemp41; + *(boffset + 6) = ctemp49; + *(boffset + 7) = ctemp57; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + aoffset5 ++; + aoffset6 ++; + aoffset7 ++; + aoffset8 ++; + + boffset += 8; + i --; + }while(i > 0); + } + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + i = (m >> 2); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + + ctemp09 = *(aoffset3 + 0); + ctemp10 = *(aoffset3 + 1); + ctemp11 = *(aoffset3 + 2); + ctemp12 = *(aoffset3 + 3); + + ctemp13 = *(aoffset4 + 0); + ctemp14 = *(aoffset4 + 1); + ctemp15 = *(aoffset4 + 2); + ctemp16 = *(aoffset4 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp05; + *(boffset + 2) = ctemp09; + *(boffset + 3) = ctemp13; + + *(boffset + 4) = ctemp02; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp10; + *(boffset + 7) = ctemp14; + + *(boffset + 8) = ctemp03; + *(boffset + 9) = ctemp07; + *(boffset + 10) = ctemp11; + *(boffset + 11) = ctemp15; + + *(boffset + 12) = ctemp04; + *(boffset + 13) = ctemp08; + *(boffset + 14) = ctemp12; + *(boffset + 15) = ctemp16; + + aoffset1 += 4; + aoffset2 += 4; + aoffset3 += 4; + aoffset4 += 4; + boffset += 16; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + ctemp03 = *(aoffset3 + 0); + ctemp04 = *(aoffset4 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + aoffset1 ++; + aoffset2 ++; + aoffset3 ++; + aoffset4 ++; + + boffset += 4; + i --; + }while(i > 0); + } + } /* end of if(j > 0) */ + + if (n & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp03; + *(boffset + 2) = ctemp02; + *(boffset + 3) = ctemp04; + + aoffset1 += 2; + aoffset2 += 2; + boffset += 4; + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 ++; + aoffset2 ++; + boffset += 2; + } + } /* end of if(j > 0) */ + + if (n & 1){ + aoffset1 = aoffset; + + i = m; + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + + *(boffset + 0) = ctemp01; + + aoffset1 ++; + boffset ++; + i --; + }while(i > 0); + } + + } /* end of if(j > 0) */ + + return 0; +} diff --git a/kernel/x86_64/dgemm_tcopy_8_skylakex.c b/kernel/x86_64/dgemm_tcopy_8_skylakex.c new file mode 100644 index 000000000..472ad6349 --- /dev/null +++ b/kernel/x86_64/dgemm_tcopy_8_skylakex.c @@ -0,0 +1,417 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" +#include + +int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){ + + BLASLONG i, j; + + FLOAT *aoffset; + FLOAT *aoffset1, *aoffset2, *aoffset3, *aoffset4; + FLOAT *aoffset5, *aoffset6, *aoffset7, *aoffset8; + + FLOAT *boffset, *boffset1, *boffset2, *boffset3, *boffset4; + + FLOAT ctemp01, ctemp02, ctemp03, ctemp04; + FLOAT ctemp05, ctemp06, ctemp07, ctemp08; + + aoffset = a; + boffset = b; + +#if 0 + fprintf(stderr, "M = %d N = %d\n", m, n); +#endif + + boffset2 = b + m * (n & ~7); + boffset3 = b + m * (n & ~3); + boffset4 = b + m * (n & ~1); + + j = (m >> 3); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset5 = aoffset4 + lda; + aoffset6 = aoffset5 + lda; + aoffset7 = aoffset6 + lda; + aoffset8 = aoffset7 + lda; + aoffset += 8 * lda; + + boffset1 = boffset; + boffset += 64; + + i = (n >> 3); + if (i > 0){ + do{ + __m512d row1, row2, row3, row4, row5, row6, row7, row8; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + row2 = _mm512_loadu_pd(aoffset2); + aoffset2 += 8; + row3 = _mm512_loadu_pd(aoffset3); + aoffset3 += 8; + row4 = _mm512_loadu_pd(aoffset4); + aoffset4 += 8; + row5 = _mm512_loadu_pd(aoffset5); + aoffset5 += 8; + row6 = _mm512_loadu_pd(aoffset6); + aoffset6 += 8; + row7 = _mm512_loadu_pd(aoffset7); + aoffset7 += 8; + row8 = _mm512_loadu_pd(aoffset8); + aoffset8 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + _mm512_storeu_pd(boffset1 + 8, row2); + _mm512_storeu_pd(boffset1 + 16, row3); + _mm512_storeu_pd(boffset1 + 24, row4); + _mm512_storeu_pd(boffset1 + 32, row5); + _mm512_storeu_pd(boffset1 + 40, row6); + _mm512_storeu_pd(boffset1 + 48, row7); + _mm512_storeu_pd(boffset1 + 56, row8); + boffset1 += m * 8; + i --; + }while(i > 0); + } + + if (n & 4){ + __m256d row1, row2, row3, row4, row5, row6, row7, row8; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + row2 = _mm256_loadu_pd(aoffset2); + aoffset2 += 4; + row3 = _mm256_loadu_pd(aoffset3); + aoffset3 += 4; + row4 = _mm256_loadu_pd(aoffset4); + aoffset4 += 4; + row5 = _mm256_loadu_pd(aoffset5); + aoffset5 += 4; + row6 = _mm256_loadu_pd(aoffset6); + aoffset6 += 4; + row7 = _mm256_loadu_pd(aoffset7); + aoffset7 += 4; + row8 = _mm256_loadu_pd(aoffset8); + aoffset8 += 4; + + _mm256_storeu_pd(boffset2 + 0, row1); + _mm256_storeu_pd(boffset2 + 4, row2); + _mm256_storeu_pd(boffset2 + 8, row3); + _mm256_storeu_pd(boffset2 + 12, row4); + _mm256_storeu_pd(boffset2 + 16, row5); + _mm256_storeu_pd(boffset2 + 20, row6); + _mm256_storeu_pd(boffset2 + 24, row7); + _mm256_storeu_pd(boffset2 + 28, row8); + boffset2 += 32; + } + + if (n & 2){ + __m128d row1, row2, row3, row4, row5, row6, row7, row8; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + row2 = _mm_loadu_pd(aoffset2); + aoffset2 += 2; + + row3 = _mm_loadu_pd(aoffset3); + aoffset3 += 2; + + row4 = _mm_loadu_pd(aoffset4); + aoffset4 += 2; + + row5 = _mm_loadu_pd(aoffset5); + aoffset5 += 2; + + row6 = _mm_loadu_pd(aoffset6); + aoffset6 += 2; + + row7 = _mm_loadu_pd(aoffset7); + aoffset7 += 2; + + row8 = _mm_loadu_pd(aoffset8); + aoffset8 += 2; + + _mm_storeu_pd(boffset3 + 0, row1); + _mm_storeu_pd(boffset3 + 2, row2); + _mm_storeu_pd(boffset3 + 4, row3); + _mm_storeu_pd(boffset3 + 6, row4); + _mm_storeu_pd(boffset3 + 8, row5); + _mm_storeu_pd(boffset3 + 10, row6); + _mm_storeu_pd(boffset3 + 12, row7); + _mm_storeu_pd(boffset3 + 14, row8); + boffset3 += 16; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + ctemp03 = *(aoffset3 + 0); + aoffset3 ++; + ctemp04 = *(aoffset4 + 0); + aoffset4 ++; + ctemp05 = *(aoffset5 + 0); + aoffset5 ++; + ctemp06 = *(aoffset6 + 0); + aoffset6 ++; + ctemp07 = *(aoffset7 + 0); + aoffset7 ++; + ctemp08 = *(aoffset8 + 0); + aoffset8 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + *(boffset4 + 2) = ctemp03; + *(boffset4 + 3) = ctemp04; + *(boffset4 + 4) = ctemp05; + *(boffset4 + 5) = ctemp06; + *(boffset4 + 6) = ctemp07; + *(boffset4 + 7) = ctemp08; + boffset4 += 8; + } + + j--; + }while(j > 0); + } + + if (m & 4){ + + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset3 = aoffset2 + lda; + aoffset4 = aoffset3 + lda; + aoffset += 4 * lda; + + boffset1 = boffset; + boffset += 32; + + i = (n >> 3); + if (i > 0){ + + do{ + __m512d row1, row2, row3, row4; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + row2 = _mm512_loadu_pd(aoffset2); + aoffset2 += 8; + row3 = _mm512_loadu_pd(aoffset3); + aoffset3 += 8; + row4 = _mm512_loadu_pd(aoffset4); + aoffset4 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + _mm512_storeu_pd(boffset1 + 8, row2); + _mm512_storeu_pd(boffset1 + 16, row3); + _mm512_storeu_pd(boffset1 + 24, row4); + + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4) { + __m256d row1, row2, row3, row4; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + row2 = _mm256_loadu_pd(aoffset2); + aoffset2 += 4; + row3 = _mm256_loadu_pd(aoffset3); + aoffset3 += 4; + row4 = _mm256_loadu_pd(aoffset4); + aoffset4 += 4; + _mm256_storeu_pd(boffset2 + 0, row1); + _mm256_storeu_pd(boffset2 + 4, row2); + _mm256_storeu_pd(boffset2 + 8, row3); + _mm256_storeu_pd(boffset2 + 12, row4); + boffset2 += 16; + } + + if (n & 2){ + __m128d row1, row2, row3, row4; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + row2 = _mm_loadu_pd(aoffset2); + aoffset2 += 2; + + row3 = _mm_loadu_pd(aoffset3); + aoffset3 += 2; + + row4 = _mm_loadu_pd(aoffset4); + aoffset4 += 2; + + + _mm_storeu_pd(boffset3 + 0, row1); + _mm_storeu_pd(boffset3 + 2, row2); + _mm_storeu_pd(boffset3 + 4, row3); + _mm_storeu_pd(boffset3 + 6, row4); + boffset3 += 8; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + ctemp03 = *(aoffset3 + 0); + aoffset3 ++; + ctemp04 = *(aoffset4 + 0); + aoffset4 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + *(boffset4 + 2) = ctemp03; + *(boffset4 + 3) = ctemp04; + boffset4 += 4; + } + } + + if (m & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset1 + lda; + aoffset += 2 * lda; + + boffset1 = boffset; + boffset += 16; + + i = (n >> 3); + if (i > 0){ + do{ + __m512d row1, row2; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + row2 = _mm512_loadu_pd(aoffset2); + aoffset2 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + _mm512_storeu_pd(boffset1 + 8, row2); + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4){ + __m256d row1, row2; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + row2 = _mm256_loadu_pd(aoffset2); + aoffset2 += 4; + _mm256_storeu_pd(boffset2 + 0, row1); + _mm256_storeu_pd(boffset2 + 4, row2); + boffset2 += 8; + } + + if (n & 2){ + __m128d row1, row2; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + row2 = _mm_loadu_pd(aoffset2); + aoffset2 += 2; + + + _mm_storeu_pd(boffset3 + 0, row1); + _mm_storeu_pd(boffset3 + 2, row2); + boffset3 += 4; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + ctemp02 = *(aoffset2 + 0); + aoffset2 ++; + + *(boffset4 + 0) = ctemp01; + *(boffset4 + 1) = ctemp02; + boffset4 += 2; + } + } + + if (m & 1){ + aoffset1 = aoffset; + // aoffset += lda; + + boffset1 = boffset; + // boffset += 8; + + i = (n >> 3); + if (i > 0){ + do{ + __m512d row1; + row1 = _mm512_loadu_pd(aoffset1); + aoffset1 += 8; + + _mm512_storeu_pd(boffset1 + 0, row1); + boffset1 += 8 * m; + i --; + }while(i > 0); + } + + if (n & 4){ + __m256d row1; + row1 = _mm256_loadu_pd(aoffset1); + aoffset1 += 4; + _mm256_storeu_pd(boffset2 + 0, row1); + // boffset2 += 4; + } + + if (n & 2){ + __m128d row1; + row1 = _mm_loadu_pd(aoffset1); + aoffset1 += 2; + + _mm_storeu_pd(boffset3 + 0, row1); + + // boffset3 += 2; + } + + if (n & 1){ + ctemp01 = *(aoffset1 + 0); + aoffset1 ++; + *(boffset4 + 0) = ctemp01; + boffset4 ++; + } + } + + return 0; +} diff --git a/kernel/x86_64/dgemv_n_4.c b/kernel/x86_64/dgemv_n_4.c index 1b9ca7a60..6d2530e81 100644 --- a/kernel/x86_64/dgemv_n_4.c +++ b/kernel/x86_64/dgemv_n_4.c @@ -33,6 +33,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "dgemv_n_microk_nehalem-4.c" #elif defined(HASWELL) || defined(ZEN) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "dgemv_n_microk_haswell-4.c" +#elif defined (SKYLAKEX) +#include "dgemv_n_microk_skylakex-4.c" #endif diff --git a/kernel/x86_64/dgemv_n_microk_nehalem-4.c b/kernel/x86_64/dgemv_n_microk_nehalem-4.c index 09be7c2bb..641a6d898 100644 --- a/kernel/x86_64/dgemv_n_microk_nehalem-4.c +++ b/kernel/x86_64/dgemv_n_microk_nehalem-4.c @@ -62,7 +62,7 @@ static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT "subq $4 , %1 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "xorpd %%xmm4 , %%xmm4 \n\t" diff --git a/kernel/x86_64/dgemv_n_microk_skylakex-4.c b/kernel/x86_64/dgemv_n_microk_skylakex-4.c new file mode 100644 index 000000000..4030399ab --- /dev/null +++ b/kernel/x86_64/dgemv_n_microk_skylakex-4.c @@ -0,0 +1,126 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) + +#define HAVE_KERNEL_4x4 1 + +#include + +static void dgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + + int i = 0; + + __m256d x0, x1, x2, x3; + __m256d __alpha; + + x0 = _mm256_broadcastsd_pd(_mm_load_sd(&x[0])); + x1 = _mm256_broadcastsd_pd(_mm_load_sd(&x[1])); + x2 = _mm256_broadcastsd_pd(_mm_load_sd(&x[2])); + x3 = _mm256_broadcastsd_pd(_mm_load_sd(&x[3])); + + __alpha = _mm256_broadcastsd_pd(_mm_load_sd(alpha)); + +#ifdef __AVX512CD__ + int n5; + __m512d x05, x15, x25, x35; + __m512d __alpha5; + n5 = n & ~7; + + x05 = _mm512_broadcastsd_pd(_mm_load_sd(&x[0])); + x15 = _mm512_broadcastsd_pd(_mm_load_sd(&x[1])); + x25 = _mm512_broadcastsd_pd(_mm_load_sd(&x[2])); + x35 = _mm512_broadcastsd_pd(_mm_load_sd(&x[3])); + + __alpha5 = _mm512_broadcastsd_pd(_mm_load_sd(alpha)); + + for (; i < n5; i+= 8) { + __m512d tempY; + __m512d sum; + + sum = _mm512_loadu_pd(&ap[0][i]) * x05 + + _mm512_loadu_pd(&ap[1][i]) * x15 + + _mm512_loadu_pd(&ap[2][i]) * x25 + + _mm512_loadu_pd(&ap[3][i]) * x35; + + tempY = _mm512_loadu_pd(&y[i]); + tempY += sum * __alpha5; + _mm512_storeu_pd(&y[i], tempY); + } +#endif + + for (; i < n; i+= 4) { + __m256d tempY; + __m256d sum; + + sum = _mm256_loadu_pd(&ap[0][i]) * x0 + + _mm256_loadu_pd(&ap[1][i]) * x1 + + _mm256_loadu_pd(&ap[2][i]) * x2 + + _mm256_loadu_pd(&ap[3][i]) * x3; + + tempY = _mm256_loadu_pd(&y[i]); + tempY += sum * __alpha; + _mm256_storeu_pd(&y[i], tempY); + } + +} + + +#define HAVE_KERNEL_4x2 + +static void dgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + + int i = 0; + + __m256d x0, x1; + __m256d __alpha; + + x0 = _mm256_broadcastsd_pd(_mm_load_sd(&x[0])); + x1 = _mm256_broadcastsd_pd(_mm_load_sd(&x[1])); + + __alpha = _mm256_broadcastsd_pd(_mm_load_sd(alpha)); + + + for (i = 0; i < n; i+= 4) { + __m256d tempY; + __m256d sum; + + sum = _mm256_loadu_pd(&ap[0][i]) * x0 + _mm256_loadu_pd(&ap[1][i]) * x1; + + tempY = _mm256_loadu_pd(&y[i]); + tempY += sum * __alpha; + _mm256_storeu_pd(&y[i], tempY); + } + +} + +#else +#include "dgemv_n_microk_haswell-4.c" +#endif diff --git a/kernel/x86_64/dgemv_t_4.c b/kernel/x86_64/dgemv_t_4.c index 6b99d6fdd..a7478e3a8 100644 --- a/kernel/x86_64/dgemv_t_4.c +++ b/kernel/x86_64/dgemv_t_4.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) || defined(STEAMROLLER) || defined(EXCAVATOR) +#if defined(HASWELL) || defined(ZEN) || defined(STEAMROLLER) || defined(EXCAVATOR) || defined (SKYLAKEX) #include "dgemv_t_microk_haswell-4.c" #endif diff --git a/kernel/x86_64/dger_microk_sandy-2.c b/kernel/x86_64/dger_microk_sandy-2.c index 564f1356d..2bf966a5f 100644 --- a/kernel/x86_64/dger_microk_sandy-2.c +++ b/kernel/x86_64/dger_microk_sandy-2.c @@ -53,7 +53,7 @@ static void dger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "subq $8, %1 \n\t" "jz 2f \n\t" - ".align 8 \n\t" + ".p2align 3 \n\t" "1: \n\t" "vmulpd %%xmm4, %%xmm0, %%xmm4 \n\t" diff --git a/kernel/x86_64/dscal.c b/kernel/x86_64/dscal.c index 78ad52179..ef9a0a6ba 100644 --- a/kernel/x86_64/dscal.c +++ b/kernel/x86_64/dscal.c @@ -33,6 +33,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "dscal_microk_sandy-2.c" #elif defined(HASWELL) || defined(ZEN) #include "dscal_microk_haswell-2.c" +#elif defined (SKYLAKEX) +#include "dscal_microk_skylakex-2.c" #endif @@ -99,7 +101,7 @@ static void dscal_kernel_inc_8(BLASLONG n, FLOAT *alpha, FLOAT *x, BLASLONG inc_ "leaq (%1,%4,4), %2 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movsd (%1) , %%xmm4 \n\t" diff --git a/kernel/x86_64/dscal_microk_haswell-2.c b/kernel/x86_64/dscal_microk_haswell-2.c index 07a9c804c..e732a2718 100644 --- a/kernel/x86_64/dscal_microk_haswell-2.c +++ b/kernel/x86_64/dscal_microk_haswell-2.c @@ -58,7 +58,7 @@ static void dscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $1 , %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" // "prefetcht0 640(%1) \n\t" @@ -156,7 +156,7 @@ static void dscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) "cmpq $0, %0 \n\t" "je 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups %%xmm0 ,-128(%1) \n\t" diff --git a/kernel/x86_64/dscal_microk_sandy-2.c b/kernel/x86_64/dscal_microk_sandy-2.c index f5bf5932f..8d855072b 100644 --- a/kernel/x86_64/dscal_microk_sandy-2.c +++ b/kernel/x86_64/dscal_microk_sandy-2.c @@ -58,7 +58,7 @@ static void dscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $1 , %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "prefetcht0 640(%1) \n\t" @@ -156,7 +156,7 @@ static void dscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) "cmpq $0, %0 \n\t" "je 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups %%xmm0 ,-128(%1) \n\t" diff --git a/kernel/x86_64/dscal_microk_skylakex-2.c b/kernel/x86_64/dscal_microk_skylakex-2.c new file mode 100644 index 000000000..e0598272e --- /dev/null +++ b/kernel/x86_64/dscal_microk_skylakex-2.c @@ -0,0 +1,77 @@ +/*************************************************************************** +Copyright (c) 2014-2015, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) + +#include + +#define HAVE_KERNEL_8 1 + +static void dscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) +{ + int i = 0; + +#ifdef __AVX512CD__ + __m512d __alpha5 = _mm512_broadcastsd_pd(_mm_load_sd(alpha)); + for (; i < n; i += 8) { + _mm512_storeu_pd(&x[i + 0], __alpha5 * _mm512_loadu_pd(&x[i + 0])); + } +#else + __m256d __alpha = _mm256_broadcastsd_pd(_mm_load_sd(alpha)); + for (; i < n; i += 8) { + _mm256_storeu_pd(&x[i + 0], __alpha * _mm256_loadu_pd(&x[i + 0])); + _mm256_storeu_pd(&x[i + 4], __alpha * _mm256_loadu_pd(&x[i + 4])); + } +#endif +} + + +static void dscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) +{ + int i = 0; + + /* question to self: Why is this not just memset() */ + +#ifdef __AVX512CD__ + __m512d zero = _mm512_setzero_pd(); + for (; i < n; i += 8) { + _mm512_storeu_pd(&x[i], zero); + } +#else + __m256d zero = _mm256_setzero_pd(); + for (; i < n; i += 8) { + _mm256_storeu_pd(&x[i + 0], zero); + _mm256_storeu_pd(&x[i + 4], zero); + } +#endif + +} + +#else +#include "dscal_microk_haswell-2.c" +#endif diff --git a/kernel/x86_64/dsymv_L.c b/kernel/x86_64/dsymv_L.c index 3e8db3fa3..a722cc9df 100644 --- a/kernel/x86_64/dsymv_L.c +++ b/kernel/x86_64/dsymv_L.c @@ -32,6 +32,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "dsymv_L_microk_bulldozer-2.c" #elif defined(HASWELL) || defined(ZEN) #include "dsymv_L_microk_haswell-2.c" +#elif defined (SKYLAKEX) +#include "dsymv_L_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "dsymv_L_microk_sandy-2.c" #elif defined(NEHALEM) diff --git a/kernel/x86_64/dsymv_L_microk_haswell-2.c b/kernel/x86_64/dsymv_L_microk_haswell-2.c index bc5ec6b87..866782ee6 100644 --- a/kernel/x86_64/dsymv_L_microk_haswell-2.c +++ b/kernel/x86_64/dsymv_L_microk_haswell-2.c @@ -44,7 +44,7 @@ static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL "vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1] "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y diff --git a/kernel/x86_64/dsymv_L_microk_nehalem-2.c b/kernel/x86_64/dsymv_L_microk_nehalem-2.c index f7f7954b2..38479f77a 100644 --- a/kernel/x86_64/dsymv_L_microk_nehalem-2.c +++ b/kernel/x86_64/dsymv_L_microk_nehalem-2.c @@ -47,7 +47,7 @@ static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL "shufpd $0, %%xmm6, %%xmm6 \n\t" "shufpd $0, %%xmm7, %%xmm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movups (%4,%0,8), %%xmm12 \n\t" // 2 * a "movups (%2,%0,8), %%xmm8 \n\t" // 2 * x diff --git a/kernel/x86_64/dsymv_L_microk_sandy-2.c b/kernel/x86_64/dsymv_L_microk_sandy-2.c index c87084915..b4e6ab369 100644 --- a/kernel/x86_64/dsymv_L_microk_sandy-2.c +++ b/kernel/x86_64/dsymv_L_microk_sandy-2.c @@ -44,7 +44,7 @@ static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL "vbroadcastsd 16(%8), %%ymm6 \n\t" // temp1[1] "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y diff --git a/kernel/x86_64/dsymv_L_microk_skylakex-2.c b/kernel/x86_64/dsymv_L_microk_skylakex-2.c new file mode 100644 index 000000000..8244dffa1 --- /dev/null +++ b/kernel/x86_64/dsymv_L_microk_skylakex-2.c @@ -0,0 +1,161 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) + +#include + +#define HAVE_KERNEL_4x4 1 + +static void dsymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FLOAT *y, FLOAT *temp1, FLOAT *temp2) +{ + + + __m256d accum_0, accum_1, accum_2, accum_3; + __m256d temp1_0, temp1_1, temp1_2, temp1_3; + + /* the 256 bit wide acculmulator vectors start out as zero */ + accum_0 = _mm256_setzero_pd(); + accum_1 = _mm256_setzero_pd(); + accum_2 = _mm256_setzero_pd(); + accum_3 = _mm256_setzero_pd(); + + temp1_0 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[0])); + temp1_1 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[1])); + temp1_2 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[2])); + temp1_3 = _mm256_broadcastsd_pd(_mm_load_sd(&temp1[3])); + +#ifdef __AVX512CD__ + __m512d accum_05, accum_15, accum_25, accum_35; + __m512d temp1_05, temp1_15, temp1_25, temp1_35; + BLASLONG to2; + int delta; + + /* the 512 bit wide accumulator vectors start out as zero */ + accum_05 = _mm512_setzero_pd(); + accum_15 = _mm512_setzero_pd(); + accum_25 = _mm512_setzero_pd(); + accum_35 = _mm512_setzero_pd(); + + temp1_05 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[0])); + temp1_15 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[1])); + temp1_25 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[2])); + temp1_35 = _mm512_broadcastsd_pd(_mm_load_sd(&temp1[3])); + + delta = (to - from) & ~7; + to2 = from + delta; + + + for (; from < to2; from += 8) { + __m512d _x, _y; + __m512d a0, a1, a2, a3; + + _y = _mm512_loadu_pd(&y[from]); + _x = _mm512_loadu_pd(&x[from]); + + a0 = _mm512_loadu_pd(&a[0][from]); + a1 = _mm512_loadu_pd(&a[1][from]); + a2 = _mm512_loadu_pd(&a[2][from]); + a3 = _mm512_loadu_pd(&a[3][from]); + + _y += temp1_05 * a0 + temp1_15 * a1 + temp1_25 * a2 + temp1_35 * a3; + + accum_05 += _x * a0; + accum_15 += _x * a1; + accum_25 += _x * a2; + accum_35 += _x * a3; + + _mm512_storeu_pd(&y[from], _y); + + }; + + /* + * we need to fold our 512 bit wide accumulator vectors into 256 bit wide vectors so that the AVX2 code + * below can continue using the intermediate results in its loop + */ + accum_0 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_05, 0), _mm512_extractf64x4_pd(accum_05, 1)); + accum_1 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_15, 0), _mm512_extractf64x4_pd(accum_15, 1)); + accum_2 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_25, 0), _mm512_extractf64x4_pd(accum_25, 1)); + accum_3 = _mm256_add_pd(_mm512_extractf64x4_pd(accum_35, 0), _mm512_extractf64x4_pd(accum_35, 1)); + +#endif + + for (; from != to; from += 4) { + __m256d _x, _y; + __m256d a0, a1, a2, a3; + + _y = _mm256_loadu_pd(&y[from]); + _x = _mm256_loadu_pd(&x[from]); + + /* load 4 rows of matrix data */ + a0 = _mm256_loadu_pd(&a[0][from]); + a1 = _mm256_loadu_pd(&a[1][from]); + a2 = _mm256_loadu_pd(&a[2][from]); + a3 = _mm256_loadu_pd(&a[3][from]); + + _y += temp1_0 * a0 + temp1_1 * a1 + temp1_2 * a2 + temp1_3 * a3; + + accum_0 += _x * a0; + accum_1 += _x * a1; + accum_2 += _x * a2; + accum_3 += _x * a3; + + _mm256_storeu_pd(&y[from], _y); + + }; + + /* + * we now have 4 accumulator vectors. Each vector needs to be summed up element wise and stored in the temp2 + * output array. There is no direct instruction for this in 256 bit space, only in 128 space. + */ + + __m128d half_accum0, half_accum1, half_accum2, half_accum3; + + + /* Add upper half to lower half of each of the four 256 bit vectors to get to four 128 bit vectors */ + half_accum0 = _mm_add_pd(_mm256_extractf128_pd(accum_0, 0), _mm256_extractf128_pd(accum_0, 1)); + half_accum1 = _mm_add_pd(_mm256_extractf128_pd(accum_1, 0), _mm256_extractf128_pd(accum_1, 1)); + half_accum2 = _mm_add_pd(_mm256_extractf128_pd(accum_2, 0), _mm256_extractf128_pd(accum_2, 1)); + half_accum3 = _mm_add_pd(_mm256_extractf128_pd(accum_3, 0), _mm256_extractf128_pd(accum_3, 1)); + + /* in 128 bit land there is a hadd operation to do the rest of the element-wise sum in one go */ + half_accum0 = _mm_hadd_pd(half_accum0, half_accum0); + half_accum1 = _mm_hadd_pd(half_accum1, half_accum1); + half_accum2 = _mm_hadd_pd(half_accum2, half_accum2); + half_accum3 = _mm_hadd_pd(half_accum3, half_accum3); + + /* and store the lowest double value from each of these vectors in the temp2 output */ + temp2[0] += half_accum0[0]; + temp2[1] += half_accum1[0]; + temp2[2] += half_accum2[0]; + temp2[3] += half_accum3[0]; +} +#else +#include "dsymv_L_microk_haswell-2.c" +#endif \ No newline at end of file diff --git a/kernel/x86_64/dsymv_U.c b/kernel/x86_64/dsymv_U.c index 61cb77a64..431e4bb3f 100644 --- a/kernel/x86_64/dsymv_U.c +++ b/kernel/x86_64/dsymv_U.c @@ -31,7 +31,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "dsymv_U_microk_bulldozer-2.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "dsymv_U_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "dsymv_U_microk_sandy-2.c" diff --git a/kernel/x86_64/dsymv_U_microk_haswell-2.c b/kernel/x86_64/dsymv_U_microk_haswell-2.c index 6ce384f93..d83d20f8e 100644 --- a/kernel/x86_64/dsymv_U_microk_haswell-2.c +++ b/kernel/x86_64/dsymv_U_microk_haswell-2.c @@ -46,7 +46,7 @@ static void dsymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] "xorq %0,%0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y diff --git a/kernel/x86_64/dsymv_U_microk_nehalem-2.c b/kernel/x86_64/dsymv_U_microk_nehalem-2.c index 75e3d02d1..1344c75f7 100644 --- a/kernel/x86_64/dsymv_U_microk_nehalem-2.c +++ b/kernel/x86_64/dsymv_U_microk_nehalem-2.c @@ -50,7 +50,7 @@ static void dsymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT "xorq %0,%0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movups (%4,%0,8), %%xmm12 \n\t" // 2 * a "movups (%2,%0,8), %%xmm8 \n\t" // 2 * x diff --git a/kernel/x86_64/dsymv_U_microk_sandy-2.c b/kernel/x86_64/dsymv_U_microk_sandy-2.c index 212d4cf7b..1ef6fbafd 100644 --- a/kernel/x86_64/dsymv_U_microk_sandy-2.c +++ b/kernel/x86_64/dsymv_U_microk_sandy-2.c @@ -46,7 +46,7 @@ static void dsymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT "vbroadcastsd 24(%8), %%ymm7 \n\t" // temp1[1] "xorq %0,%0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,8), %%ymm9 \n\t" // 2 * y diff --git a/kernel/x86_64/dtrmm_kernel_4x8_haswell.c b/kernel/x86_64/dtrmm_kernel_4x8_haswell.c index 289af772e..651736b89 100644 --- a/kernel/x86_64/dtrmm_kernel_4x8_haswell.c +++ b/kernel/x86_64/dtrmm_kernel_4x8_haswell.c @@ -24,7 +24,7 @@ static void dtrmm_kernel_4x8( BLASLONG n, FLOAT *alpha ,FLOAT *a, FLOAT *b, FLOA " cmp $0, %1 \n\t" " jz 2f \n\t" - " .align 16 \n\t" + " .p2align 4 \n\t" "1: \n\t" " vmovups (%2,%0,4) , %%ymm0 \n\t" " vmovups (%3,%0,8) , %%ymm1 \n\t" diff --git a/kernel/x86_64/dtrsm_kernel_RN_haswell.c b/kernel/x86_64/dtrsm_kernel_RN_haswell.c index da90e40c7..fcab8e2c7 100644 --- a/kernel/x86_64/dtrsm_kernel_RN_haswell.c +++ b/kernel/x86_64/dtrsm_kernel_RN_haswell.c @@ -128,7 +128,7 @@ static void dtrsm_RN_solve_opt(BLASLONG n, FLOAT *a, FLOAT *b, FLOAT *c, BLASLON " cmpq %1, %0 \n\t" " je 21f \n\t" - " .align 16 \n\t" + " .p2align 4 \n\t" "1: \n\t" " vmovups (%2,%1,4), %%ymm4 \n\t" // read a diff --git a/kernel/x86_64/saxpy.c b/kernel/x86_64/saxpy.c index d89fe408a..e1349da58 100644 --- a/kernel/x86_64/saxpy.c +++ b/kernel/x86_64/saxpy.c @@ -33,6 +33,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "saxpy_microk_nehalem-2.c" #elif defined(HASWELL) || defined(ZEN) #include "saxpy_microk_haswell-2.c" +#elif defined (SKYLAKEX) +#include "saxpy_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "saxpy_microk_sandy-2.c" #elif defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) diff --git a/kernel/x86_64/saxpy_microk_haswell-2.c b/kernel/x86_64/saxpy_microk_haswell-2.c index 496424207..3a743d64c 100644 --- a/kernel/x86_64/saxpy_microk_haswell-2.c +++ b/kernel/x86_64/saxpy_microk_haswell-2.c @@ -38,7 +38,7 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) ( "vbroadcastss (%4), %%ymm0 \n\t" // alpha - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,4), %%ymm12 \n\t" // 8 * y diff --git a/kernel/x86_64/saxpy_microk_nehalem-2.c b/kernel/x86_64/saxpy_microk_nehalem-2.c index a09494935..68f68ea3a 100644 --- a/kernel/x86_64/saxpy_microk_nehalem-2.c +++ b/kernel/x86_64/saxpy_microk_nehalem-2.c @@ -39,7 +39,7 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "movss (%4), %%xmm0 \n\t" // alpha "shufps $0, %%xmm0, %%xmm0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" // "prefetcht0 192(%2,%0,4) \n\t" // "prefetcht0 192(%3,%0,4) \n\t" diff --git a/kernel/x86_64/saxpy_microk_sandy-2.c b/kernel/x86_64/saxpy_microk_sandy-2.c index 159a23175..0a6bef046 100644 --- a/kernel/x86_64/saxpy_microk_sandy-2.c +++ b/kernel/x86_64/saxpy_microk_sandy-2.c @@ -50,7 +50,7 @@ static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "subq $32, %1 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmulps %%ymm4, %%ymm0, %%ymm4 \n\t" diff --git a/kernel/x86_64/saxpy_microk_skylakex-2.c b/kernel/x86_64/saxpy_microk_skylakex-2.c new file mode 100644 index 000000000..950f10ba2 --- /dev/null +++ b/kernel/x86_64/saxpy_microk_skylakex-2.c @@ -0,0 +1,69 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) + +#define HAVE_KERNEL_16 1 + +#include + +static void saxpy_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) +{ + BLASLONG i = 0; + + __m256 __alpha; + + __alpha = _mm256_broadcastss_ps(_mm_load_ss(alpha)); + +#ifdef __AVX512CD__ + BLASLONG n64; + __m512 __alpha5; + __alpha5 = _mm512_broadcastss_ps(_mm_load_ss(alpha)); + + n64 = n & ~63; + + for (; i < n64; i+= 64) { + _mm512_storeu_ps(&y[i + 0], _mm512_loadu_ps(&y[i + 0]) + __alpha5 * _mm512_loadu_ps(&x[i + 0])); + _mm512_storeu_ps(&y[i + 16], _mm512_loadu_ps(&y[i + 16]) + __alpha5 * _mm512_loadu_ps(&x[i + 16])); + _mm512_storeu_ps(&y[i + 32], _mm512_loadu_ps(&y[i + 32]) + __alpha5 * _mm512_loadu_ps(&x[i + 32])); + _mm512_storeu_ps(&y[i + 48], _mm512_loadu_ps(&y[i + 48]) + __alpha5 * _mm512_loadu_ps(&x[i + 48])); + } + +#endif + + for (; i < n; i+= 32) { + _mm256_storeu_ps(&y[i + 0], _mm256_loadu_ps(&y[i + 0]) + __alpha * _mm256_loadu_ps(&x[i + 0])); + _mm256_storeu_ps(&y[i + 8], _mm256_loadu_ps(&y[i + 8]) + __alpha * _mm256_loadu_ps(&x[i + 8])); + _mm256_storeu_ps(&y[i + 16], _mm256_loadu_ps(&y[i + 16]) + __alpha * _mm256_loadu_ps(&x[i + 16])); + _mm256_storeu_ps(&y[i + 24], _mm256_loadu_ps(&y[i + 24]) + __alpha * _mm256_loadu_ps(&x[i + 24])); + } +} +#else +#include "saxpy_microk_haswell-2.c" +#endif + diff --git a/kernel/x86_64/sdot.c b/kernel/x86_64/sdot.c index b6f3c21af..3536afc9e 100644 --- a/kernel/x86_64/sdot.c +++ b/kernel/x86_64/sdot.c @@ -36,6 +36,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sdot_microk_nehalem-2.c" #elif defined(HASWELL) || defined(ZEN) #include "sdot_microk_haswell-2.c" +#elif defined (SKYLAKEX) +#include "sdot_microk_skylakex-2.c" #elif defined(SANDYBRIDGE) #include "sdot_microk_sandy-2.c" #endif diff --git a/kernel/x86_64/sdot_microk_haswell-2.c b/kernel/x86_64/sdot_microk_haswell-2.c index 3248c408c..df367b61f 100644 --- a/kernel/x86_64/sdot_microk_haswell-2.c +++ b/kernel/x86_64/sdot_microk_haswell-2.c @@ -41,7 +41,7 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,4), %%ymm12 \n\t" // 2 * x "vmovups 32(%2,%0,4), %%ymm13 \n\t" // 2 * x diff --git a/kernel/x86_64/sdot_microk_nehalem-2.c b/kernel/x86_64/sdot_microk_nehalem-2.c index b5f6a1c91..1a27177f5 100644 --- a/kernel/x86_64/sdot_microk_nehalem-2.c +++ b/kernel/x86_64/sdot_microk_nehalem-2.c @@ -41,7 +41,7 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "xorps %%xmm6, %%xmm6 \n\t" "xorps %%xmm7, %%xmm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movups (%2,%0,4), %%xmm12 \n\t" // 4 * x "movups (%3,%0,4), %%xmm8 \n\t" // 4 * x diff --git a/kernel/x86_64/sdot_microk_sandy-2.c b/kernel/x86_64/sdot_microk_sandy-2.c index e265d16bd..ca13536f2 100644 --- a/kernel/x86_64/sdot_microk_sandy-2.c +++ b/kernel/x86_64/sdot_microk_sandy-2.c @@ -41,7 +41,7 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorps %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorps %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,4), %%ymm12 \n\t" // 2 * x "vmovups 32(%2,%0,4), %%ymm13 \n\t" // 2 * x diff --git a/kernel/x86_64/sdot_microk_skylakex-2.c b/kernel/x86_64/sdot_microk_skylakex-2.c new file mode 100644 index 000000000..1fcb7f27c --- /dev/null +++ b/kernel/x86_64/sdot_microk_skylakex-2.c @@ -0,0 +1,98 @@ +/*************************************************************************** +Copyright (c) 2014, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*****************************************************************************/ + +/* need a new enough GCC for avx512 support */ +#if (( defined(__GNUC__) && __GNUC__ > 6 && defined(__AVX2__)) || (defined(__clang__) && __clang_major__ >= 6)) + +#define HAVE_KERNEL_16 1 + +#include + +static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) + +{ + int i = 0; + __m256 accum_0, accum_1, accum_2, accum_3; + + accum_0 = _mm256_setzero_ps(); + accum_1 = _mm256_setzero_ps(); + accum_2 = _mm256_setzero_ps(); + accum_3 = _mm256_setzero_ps(); + +#ifdef __AVX512CD__ + __m512 accum_05, accum_15, accum_25, accum_35; + int n64; + n64 = n & (~63); + + accum_05 = _mm512_setzero_ps(); + accum_15 = _mm512_setzero_ps(); + accum_25 = _mm512_setzero_ps(); + accum_35 = _mm512_setzero_ps(); + + for (; i < n64; i += 64) { + accum_05 += _mm512_loadu_ps(&x[i+ 0]) * _mm512_loadu_ps(&y[i+ 0]); + accum_15 += _mm512_loadu_ps(&x[i+16]) * _mm512_loadu_ps(&y[i+16]); + accum_25 += _mm512_loadu_ps(&x[i+32]) * _mm512_loadu_ps(&y[i+32]); + accum_35 += _mm512_loadu_ps(&x[i+48]) * _mm512_loadu_ps(&y[i+48]); + } + + /* + * we need to fold our 512 bit wide accumulator vectors into 256 bit wide vectors so that the AVX2 code + * below can continue using the intermediate results in its loop + */ + accum_0 = _mm512_extractf32x8_ps(accum_05, 0) + _mm512_extractf32x8_ps(accum_05, 1); + accum_1 = _mm512_extractf32x8_ps(accum_15, 0) + _mm512_extractf32x8_ps(accum_15, 1); + accum_2 = _mm512_extractf32x8_ps(accum_25, 0) + _mm512_extractf32x8_ps(accum_25, 1); + accum_3 = _mm512_extractf32x8_ps(accum_35, 0) + _mm512_extractf32x8_ps(accum_35, 1); + +#endif + for (; i < n; i += 32) { + accum_0 += _mm256_loadu_ps(&x[i+ 0]) * _mm256_loadu_ps(&y[i+ 0]); + accum_1 += _mm256_loadu_ps(&x[i+ 8]) * _mm256_loadu_ps(&y[i+ 8]); + accum_2 += _mm256_loadu_ps(&x[i+16]) * _mm256_loadu_ps(&y[i+16]); + accum_3 += _mm256_loadu_ps(&x[i+24]) * _mm256_loadu_ps(&y[i+24]); + } + + /* we now have the partial sums of the dot product in the 4 accumulation vectors, time to consolidate */ + + accum_0 = accum_0 + accum_1 + accum_2 + accum_3; + + __m128 half_accum0; + + /* Add upper half to lower half of each of the 256 bit vector to get a 128 bit vector */ + half_accum0 = _mm256_extractf128_ps(accum_0, 0) + _mm256_extractf128_ps(accum_0, 1); + + /* in 128 bit land there is a hadd operation to do the rest of the element-wise sum in one go */ + half_accum0 = _mm_hadd_ps(half_accum0, half_accum0); + half_accum0 = _mm_hadd_ps(half_accum0, half_accum0); + + *dot = half_accum0[0]; +} + +#else +#include "sdot_microk_haswell-2.c" +#endif diff --git a/kernel/x86_64/sgemm_beta_skylakex.c b/kernel/x86_64/sgemm_beta_skylakex.c new file mode 100644 index 000000000..1c29c1168 --- /dev/null +++ b/kernel/x86_64/sgemm_beta_skylakex.c @@ -0,0 +1,158 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include "common.h" + +#include + +int CNAME(BLASLONG m, BLASLONG n, BLASLONG dummy1, FLOAT beta, + FLOAT *dummy2, BLASLONG dummy3, FLOAT *dummy4, BLASLONG dummy5, + FLOAT *c, BLASLONG ldc){ + + BLASLONG i, j; + FLOAT *c_offset1, *c_offset; + FLOAT ctemp1, ctemp2, ctemp3, ctemp4; + FLOAT ctemp5, ctemp6, ctemp7, ctemp8; + + /* fast path.. just zero the whole matrix */ + if (m == ldc && beta == ZERO) { + memset(c, 0, m * n * sizeof(FLOAT)); + return 0; + } + + if (n == 0 || m == 0) + return 0; + + c_offset = c; + + if (beta == ZERO){ + + j = n; + do { + c_offset1 = c_offset; + c_offset += ldc; + + i = m; +#ifdef __AVX2__ + while (i >= 32) { +#ifdef __AVX512CD__ + __m512 z_zero = _mm512_setzero_ps(); + _mm512_storeu_ps(c_offset1, z_zero); + _mm512_storeu_ps(c_offset1 + 16, z_zero); +#else + __m256 y_zero = _mm256_setzero_ps(); + _mm256_storeu_ps(c_offset1, y_zero); + _mm256_storeu_ps(c_offset1 + 8, y_zero); + _mm256_storeu_ps(c_offset1 + 16, y_zero); + _mm256_storeu_ps(c_offset1 + 24, y_zero); +#endif + c_offset1 += 32; + i -= 32; + } + while (i >= 8) { + __m256 y_zero = _mm256_setzero_ps(); + _mm256_storeu_ps(c_offset1, y_zero); + c_offset1 += 8; + i -= 8; + } +#endif + while (i > 0) { + *c_offset1 = ZERO; + c_offset1 ++; + i --; + } + j --; + } while (j > 0); + + } else { + + j = n; + do { + c_offset1 = c_offset; + c_offset += ldc; + + i = (m >> 3); + if (i > 0){ + do { + ctemp1 = *(c_offset1 + 0); + ctemp2 = *(c_offset1 + 1); + ctemp3 = *(c_offset1 + 2); + ctemp4 = *(c_offset1 + 3); + ctemp5 = *(c_offset1 + 4); + ctemp6 = *(c_offset1 + 5); + ctemp7 = *(c_offset1 + 6); + ctemp8 = *(c_offset1 + 7); + + ctemp1 *= beta; + ctemp2 *= beta; + ctemp3 *= beta; + ctemp4 *= beta; + ctemp5 *= beta; + ctemp6 *= beta; + ctemp7 *= beta; + ctemp8 *= beta; + + *(c_offset1 + 0) = ctemp1; + *(c_offset1 + 1) = ctemp2; + *(c_offset1 + 2) = ctemp3; + *(c_offset1 + 3) = ctemp4; + *(c_offset1 + 4) = ctemp5; + *(c_offset1 + 5) = ctemp6; + *(c_offset1 + 6) = ctemp7; + *(c_offset1 + 7) = ctemp8; + c_offset1 += 8; + i --; + } while (i > 0); + } + + i = (m & 7); + if (i > 0){ + do { + ctemp1 = *c_offset1; + ctemp1 *= beta; + *c_offset1 = ctemp1; + c_offset1 ++; + i --; + } while (i > 0); + } + j --; + } while (j > 0); + + } + return 0; +}; diff --git a/kernel/x86_64/sgemm_kernel_16x4_skylakex.S b/kernel/x86_64/sgemm_kernel_16x4_skylakex.S new file mode 100644 index 000000000..ac4421252 --- /dev/null +++ b/kernel/x86_64/sgemm_kernel_16x4_skylakex.S @@ -0,0 +1,6811 @@ +/********************************************************************************* +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ + +/********************************************************************* +* 2014/07/28 Saar +* BLASTEST : OK +* CTEST : OK +* TEST : OK +* +* 2013/10/28 Saar +* Parameter: +* SGEMM_DEFAULT_UNROLL_N 4 +* SGEMM_DEFAULT_UNROLL_M 16 +* SGEMM_DEFAULT_P 768 +* SGEMM_DEFAULT_Q 384 +* A_PR1 512 +* B_PR1 512 +* +* +* 2014/07/28 Saar +* Performance at 9216x9216x9216: +* 1 thread: 102 GFLOPS (SANDYBRIDGE: 59) (MKL: 83) +* 2 threads: 195 GFLOPS (SANDYBRIDGE: 116) (MKL: 155) +* 3 threads: 281 GFLOPS (SANDYBRIDGE: 165) (MKL: 230) +* 4 threads: 366 GFLOPS (SANDYBRIDGE: 223) (MKL: 267) +* +*********************************************************************/ + +#define ASSEMBLER +#include "common.h" + +#define OLD_M %rdi +#define OLD_N %rsi +#define M %r13 +#define J %r14 +#define OLD_K %rdx + +#define A %rcx +#define B %r8 +#define C %r9 +#define LDC %r10 + +#define I %r11 +#define AO %rdi +#define BO %rsi +#define CO1 %r15 +#define K %r12 +#define BI %rbp +#define BO2 %rbp +#define SP %rbx + +#define BO1 %rdi +#define CO2 %rdx + +#ifndef WINDOWS_ABI + +#define STACKSIZE 96 + +#else + +#define STACKSIZE 256 + +#define OLD_A 40 + STACKSIZE(%rsp) +#define OLD_B 48 + STACKSIZE(%rsp) +#define OLD_C 56 + STACKSIZE(%rsp) +#define OLD_LDC 64 + STACKSIZE(%rsp) +#define OLD_OFFSET 72 + STACKSIZE(%rsp) + +#endif + +#if defined(OS_WINDOWS) +#define L_BUFFER_SIZE 8192 +#else +#define L_BUFFER_SIZE 12288 +#endif + +#define Ndiv6 24(%rsp) +#define Nmod6 32(%rsp) +#define N 40(%rsp) +#define ALPHA 48(%rsp) +#define OFFSET 56(%rsp) +#define KK 64(%rsp) +#define KKK 72(%rsp) +#define BUFFER1 128(%rsp) + +#if defined(OS_WINDOWS) +#if L_BUFFER_SIZE > 16384 +#define STACK_TOUCH \ + movl $0, 4096 * 4(%rsp);\ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 12288 +#define STACK_TOUCH \ + movl $0, 4096 * 3(%rsp);\ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 8192 +#define STACK_TOUCH \ + movl $0, 4096 * 2(%rsp);\ + movl $0, 4096 * 1(%rsp); +#elif L_BUFFER_SIZE > 4096 +#define STACK_TOUCH \ + movl $0, 4096 * 1(%rsp); +#else +#define STACK_TOUCH +#endif +#else +#define STACK_TOUCH +#endif + +#if defined(BULLDOZER) + +#define VFMADD231PS_( y0,y1,y2 ) vfmaddps y0,y1,y2,y0 + +#define VFMADD231SS_( x0,x1,x2 ) vfmaddss x0,x1,x2,x0 + +#else + +#define VFMADD231PS_( y0,y1,y2 ) vfmadd231ps y1,y2,y0 + +#define VFMADD231SS_( x0,x1,x2 ) vfmadd231ss x1,x2,x0 + +#endif + + +#define A_PR1 512 +#define B_PR1 512 + +/******************************************************************************************* +* 6 lines of N +*******************************************************************************************/ + +.macro KERNEL16x6_SUB + vmovups -16 * SIZE(AO), %zmm0 + vbroadcastss -4 * SIZE(BO), %zmm2 + vbroadcastss -3 * SIZE(BO), %zmm3 +# prefetcht0 A_PR1(AO) + + VFMADD231PS_( %zmm4,%zmm2,%zmm0 ) + VFMADD231PS_( %zmm6,%zmm3,%zmm0 ) + + vbroadcastss -2 * SIZE(BO), %zmm2 + vbroadcastss -1 * SIZE(BO), %zmm3 + VFMADD231PS_( %zmm8,%zmm2,%zmm0 ) + VFMADD231PS_( %zmm10,%zmm3,%zmm0 ) + + vbroadcastss 0 * SIZE(BO), %zmm2 + vbroadcastss 1 * SIZE(BO), %zmm3 + VFMADD231PS_( %zmm12,%zmm2,%zmm0 ) + VFMADD231PS_( %zmm14,%zmm3,%zmm0 ) + + addq $ 6*SIZE, BO + addq $ 16*SIZE, AO + decq %rax +.endm + +.macro KERNEL16x6_SUB4 + vmovups -16 * SIZE(AO), %zmm0 + vbroadcastss -4 * SIZE(BO), %zmm2 + vbroadcastss -3 * SIZE(BO), %zmm3 + + VFMADD231PS_( %zmm4,%zmm2,%zmm0 ) + VFMADD231PS_( %zmm6,%zmm3,%zmm0 ) + + vbroadcastss -2 * SIZE(BO), %zmm7 + vbroadcastss -1 * SIZE(BO), %zmm9 + VFMADD231PS_( %zmm8,%zmm7,%zmm0 ) + VFMADD231PS_( %zmm10,%zmm9,%zmm0 ) + + vbroadcastss 0 * SIZE(BO), %zmm11 + vbroadcastss 1 * SIZE(BO), %zmm13 + VFMADD231PS_( %zmm12,%zmm11,%zmm0 ) + VFMADD231PS_( %zmm14,%zmm13,%zmm0 ) + + addq $ 6*SIZE, BO + addq $ 16*SIZE, AO + decq %rax + vmovups -16 * SIZE(AO), %zmm0 + vbroadcastss -4 * SIZE(BO), %zmm16 + vbroadcastss -3 * SIZE(BO), %zmm17 + + VFMADD231PS_( %zmm4,%zmm16,%zmm0 ) + VFMADD231PS_( %zmm6,%zmm17,%zmm0 ) + + vbroadcastss -2 * SIZE(BO), %zmm18 + vbroadcastss -1 * SIZE(BO), %zmm19 + VFMADD231PS_( %zmm8,%zmm18,%zmm0 ) + VFMADD231PS_( %zmm10,%zmm19,%zmm0 ) + + vbroadcastss 0 * SIZE(BO), %zmm20 + vbroadcastss 1 * SIZE(BO), %zmm21 + VFMADD231PS_( %zmm12,%zmm20,%zmm0 ) + VFMADD231PS_( %zmm14,%zmm21,%zmm0 ) + + addq $ 6*SIZE, BO + addq $ 16*SIZE, AO + decq %rax + + vmovups -16 * SIZE(AO), %zmm0 + vbroadcastss -4 * SIZE(BO), %zmm22 + vbroadcastss -3 * SIZE(BO), %zmm23 + + VFMADD231PS_( %zmm4,%zmm22,%zmm0 ) + VFMADD231PS_( %zmm6,%zmm23,%zmm0 ) + + vbroadcastss -2 * SIZE(BO), %zmm24 + vbroadcastss -1 * SIZE(BO), %zmm25 + VFMADD231PS_( %zmm8,%zmm24,%zmm0 ) + VFMADD231PS_( %zmm10,%zmm25,%zmm0 ) + + vbroadcastss 0 * SIZE(BO), %zmm26 + vbroadcastss 1 * SIZE(BO), %zmm27 + VFMADD231PS_( %zmm12,%zmm26,%zmm0 ) + VFMADD231PS_( %zmm14,%zmm27,%zmm0 ) + + addq $ 6*SIZE, BO + addq $ 16*SIZE, AO + decq %rax + vmovups -16 * SIZE(AO), %zmm0 + vbroadcastss -4 * SIZE(BO), %zmm28 + vbroadcastss -3 * SIZE(BO), %zmm29 + + VFMADD231PS_( %zmm4,%zmm28,%zmm0 ) + VFMADD231PS_( %zmm6,%zmm29,%zmm0 ) + + vbroadcastss -2 * SIZE(BO), %zmm30 + vbroadcastss -1 * SIZE(BO), %zmm31 + VFMADD231PS_( %zmm8,%zmm30,%zmm0 ) + VFMADD231PS_( %zmm10,%zmm31,%zmm0 ) + + vbroadcastss 0 * SIZE(BO), %zmm1 + vbroadcastss 1 * SIZE(BO), %zmm5 + VFMADD231PS_( %zmm12,%zmm1,%zmm0 ) + VFMADD231PS_( %zmm14,%zmm5,%zmm0 ) + + addq $ 6*SIZE, BO + addq $ 16*SIZE, AO + decq %rax +.endm + +.macro SAVE16x6 + + vbroadcastss ALPHA, %zmm0 + + vmulps %zmm0 , %zmm4 , %zmm4 + vmulps %zmm0 , %zmm6 , %zmm6 + vmulps %zmm0 , %zmm8 , %zmm8 + vmulps %zmm0 , %zmm10, %zmm10 + vmulps %zmm0 , %zmm12, %zmm12 + vmulps %zmm0 , %zmm14, %zmm14 + + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %zmm4,%zmm4 + + vaddps (CO1, LDC), %zmm6,%zmm6 + + vaddps (CO1, LDC,2), %zmm8,%zmm8 + + vaddps (CO2), %zmm10,%zmm10 + + vaddps (CO2, LDC), %zmm12,%zmm12 + + vaddps (CO2, LDC,2), %zmm14,%zmm14 + +#endif + + vmovups %zmm4 , (CO1) + + vmovups %zmm6 , (CO1, LDC) + + vmovups %zmm8 , (CO1, LDC,2) + + vmovups %zmm10, (CO2) + + vmovups %zmm12, (CO2, LDC) + + vmovups %zmm14, (CO2, LDC,2) + +.endm + + + + +/*******************************************************************************************/ + +.macro KERNEL8x6_SUB + vmovups -16 * SIZE(AO), %ymm0 + vbroadcastss -4 * SIZE(BO), %ymm2 + vbroadcastss -3 * SIZE(BO), %ymm3 + + VFMADD231PS_( %ymm4,%ymm2,%ymm0 ) + VFMADD231PS_( %ymm6,%ymm3,%ymm0 ) + + vbroadcastss -2 * SIZE(BO), %ymm2 + vbroadcastss -1 * SIZE(BO), %ymm3 + VFMADD231PS_( %ymm8,%ymm2,%ymm0 ) + VFMADD231PS_( %ymm10,%ymm3,%ymm0 ) + + vbroadcastss 0 * SIZE(BO), %ymm2 + vbroadcastss 1 * SIZE(BO), %ymm3 + VFMADD231PS_( %ymm12,%ymm2,%ymm0 ) + VFMADD231PS_( %ymm14,%ymm3,%ymm0 ) + + addq $ 6*SIZE, BO + addq $ 8*SIZE, AO + decq %rax +.endm + +.macro SAVE8x6 + + vbroadcastss ALPHA, %ymm0 + + vmulps %ymm0 , %ymm4 , %ymm4 + vmulps %ymm0 , %ymm6 , %ymm6 + vmulps %ymm0 , %ymm8 , %ymm8 + vmulps %ymm0 , %ymm10, %ymm10 + vmulps %ymm0 , %ymm12, %ymm12 + vmulps %ymm0 , %ymm14, %ymm14 + + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %ymm4,%ymm4 + vaddps (CO1, LDC), %ymm6,%ymm6 + vaddps (CO1, LDC,2), %ymm8,%ymm8 + vaddps (CO2), %ymm10,%ymm10 + vaddps (CO2, LDC), %ymm12,%ymm12 + vaddps (CO2, LDC,2), %ymm14,%ymm14 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm6 , (CO1, LDC) + vmovups %ymm8 , (CO1, LDC,2) + vmovups %ymm10, (CO2) + vmovups %ymm12, (CO2, LDC) + vmovups %ymm14, (CO2, LDC,2) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL4x6_SUB + vmovups -16 * SIZE(AO), %xmm0 + vbroadcastss -4 * SIZE(BO), %xmm2 + vbroadcastss -3 * SIZE(BO), %xmm3 + + VFMADD231PS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231PS_( %xmm6,%xmm3,%xmm0 ) + + vbroadcastss -2 * SIZE(BO), %xmm2 + vbroadcastss -1 * SIZE(BO), %xmm3 + VFMADD231PS_( %xmm8,%xmm2,%xmm0 ) + VFMADD231PS_( %xmm10,%xmm3,%xmm0 ) + + vbroadcastss 0 * SIZE(BO), %xmm2 + vbroadcastss 1 * SIZE(BO), %xmm3 + VFMADD231PS_( %xmm12,%xmm2,%xmm0 ) + VFMADD231PS_( %xmm14,%xmm3,%xmm0 ) + + addq $ 6*SIZE, BO + addq $ 4*SIZE, AO + decq %rax +.endm + +.macro SAVE4x6 + + vbroadcastss ALPHA, %xmm0 + + vmulps %xmm0 , %xmm4 , %xmm4 + vmulps %xmm0 , %xmm6 , %xmm6 + vmulps %xmm0 , %xmm8 , %xmm8 + vmulps %xmm0 , %xmm10, %xmm10 + vmulps %xmm0 , %xmm12, %xmm12 + vmulps %xmm0 , %xmm14, %xmm14 + + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %xmm4,%xmm4 + vaddps (CO1, LDC), %xmm6,%xmm6 + vaddps (CO1, LDC,2), %xmm8,%xmm8 + vaddps (CO2), %xmm10,%xmm10 + vaddps (CO2, LDC), %xmm12,%xmm12 + vaddps (CO2, LDC,2), %xmm14,%xmm14 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm6 , (CO1, LDC) + vmovups %xmm8 , (CO1, LDC,2) + vmovups %xmm10, (CO2) + vmovups %xmm12, (CO2, LDC) + vmovups %xmm14, (CO2, LDC,2) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL2x6_SUB + vmovss -16 * SIZE(AO), %xmm0 + vmovss -15 * SIZE(AO), %xmm1 + vmovss -4 * SIZE(BO), %xmm2 + vmovss -3 * SIZE(BO), %xmm3 + + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm5,%xmm2,%xmm1 ) + VFMADD231SS_( %xmm6,%xmm3,%xmm0 ) + VFMADD231SS_( %xmm7,%xmm3,%xmm1 ) + + vmovss -2 * SIZE(BO), %xmm2 + vmovss -1 * SIZE(BO), %xmm3 + VFMADD231SS_( %xmm8,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm9,%xmm2,%xmm1 ) + VFMADD231SS_( %xmm10,%xmm3,%xmm0 ) + VFMADD231SS_( %xmm11,%xmm3,%xmm1 ) + + vmovss 0 * SIZE(BO), %xmm2 + vmovss 1 * SIZE(BO), %xmm3 + VFMADD231SS_( %xmm12,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm13,%xmm2,%xmm1 ) + VFMADD231SS_( %xmm14,%xmm3,%xmm0 ) + VFMADD231SS_( %xmm15,%xmm3,%xmm1 ) + + addq $ 6*SIZE, BO + addq $ 2*SIZE, AO + decq %rax +.endm + +.macro SAVE2x6 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + vmulss %xmm0 , %xmm5 , %xmm5 + vmulss %xmm0 , %xmm6 , %xmm6 + vmulss %xmm0 , %xmm7 , %xmm7 + vmulss %xmm0 , %xmm8 , %xmm8 + vmulss %xmm0 , %xmm9 , %xmm9 + vmulss %xmm0 , %xmm10, %xmm10 + vmulss %xmm0 , %xmm11, %xmm11 + vmulss %xmm0 , %xmm12, %xmm12 + vmulss %xmm0 , %xmm13, %xmm13 + vmulss %xmm0 , %xmm14, %xmm14 + vmulss %xmm0 , %xmm15, %xmm15 + + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + vaddss 1 * SIZE(CO1), %xmm5,%xmm5 + + vaddss (CO1, LDC), %xmm6,%xmm6 + vaddss 1 * SIZE(CO1, LDC), %xmm7,%xmm7 + + vaddss (CO1, LDC,2), %xmm8,%xmm8 + vaddss 1 * SIZE(CO1, LDC,2), %xmm9,%xmm9 + + vaddss (CO2), %xmm10,%xmm10 + vaddss 1 * SIZE(CO2), %xmm11,%xmm11 + + vaddss (CO2, LDC), %xmm12,%xmm12 + vaddss 1 * SIZE(CO2, LDC), %xmm13,%xmm13 + + vaddss (CO2, LDC,2), %xmm14,%xmm14 + vaddss 1 * SIZE(CO2, LDC,2), %xmm15,%xmm15 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , 1 * SIZE(CO1) + + vmovss %xmm6 , (CO1, LDC) + vmovss %xmm7 , 1 * SIZE(CO1, LDC) + + vmovss %xmm8 , (CO1, LDC,2) + vmovss %xmm9 , 1 * SIZE(CO1, LDC,2) + + vmovss %xmm10, (CO2) + vmovss %xmm11, 1 * SIZE(CO2) + + vmovss %xmm12, (CO2, LDC) + vmovss %xmm13, 1 * SIZE(CO2, LDC) + + vmovss %xmm14, (CO2, LDC,2) + vmovss %xmm15, 1 * SIZE(CO2, LDC,2) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL1x6_SUB + vmovss -16 * SIZE(AO), %xmm0 + vmovss -4 * SIZE(BO), %xmm2 + vmovss -3 * SIZE(BO), %xmm3 + + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm6,%xmm3,%xmm0 ) + + vmovss -2 * SIZE(BO), %xmm2 + vmovss -1 * SIZE(BO), %xmm3 + VFMADD231SS_( %xmm8,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm10,%xmm3,%xmm0 ) + + vmovss 0 * SIZE(BO), %xmm2 + vmovss 1 * SIZE(BO), %xmm3 + VFMADD231SS_( %xmm12,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm14,%xmm3,%xmm0 ) + + addq $ 6*SIZE, BO + addq $ 1*SIZE, AO + decq %rax +.endm + +.macro SAVE1x6 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + vmulss %xmm0 , %xmm6 , %xmm6 + vmulss %xmm0 , %xmm8 , %xmm8 + vmulss %xmm0 , %xmm10, %xmm10 + vmulss %xmm0 , %xmm12, %xmm12 + vmulss %xmm0 , %xmm14, %xmm14 + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + vaddss (CO1, LDC), %xmm6,%xmm6 + vaddss (CO1, LDC,2), %xmm8,%xmm8 + vaddss (CO2), %xmm10,%xmm10 + vaddss (CO2, LDC), %xmm12,%xmm12 + vaddss (CO2, LDC,2), %xmm14,%xmm14 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm6 , (CO1, LDC) + vmovss %xmm8 , (CO1, LDC,2) + vmovss %xmm10, (CO2) + vmovss %xmm12, (CO2, LDC) + vmovss %xmm14, (CO2, LDC,2) + +.endm + + +/*******************************************************************************************/ + + +/******************************************************************************************* +* 4 lines of N +*******************************************************************************************/ + +.macro KERNEL16x4_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %zmm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %zmm2 + vbroadcastss -3 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PS_( %zmm4,%zmm2,%zmm0 ) + VFMADD231PS_( %zmm6,%zmm3,%zmm0 ) + vbroadcastss -2 * SIZE(BO, BI, SIZE), %zmm2 + vbroadcastss -1 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PS_( %zmm8,%zmm2,%zmm0 ) + VFMADD231PS_( %zmm10,%zmm3,%zmm0 ) + addq $ 4 , BI + addq $ 16, %rax +.endm + +.macro SAVE16x4 + + vbroadcastss ALPHA, %zmm0 + + vmulps %zmm0 , %zmm4 , %zmm4 + vmulps %zmm0 , %zmm6 , %zmm6 + vmulps %zmm0 , %zmm8 , %zmm8 + vmulps %zmm0 , %zmm10, %zmm10 + + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %zmm4,%zmm4 + + vaddps (CO1, LDC), %zmm6,%zmm6 + + vaddps (CO2), %zmm8,%zmm8 + + vaddps (CO2, LDC), %zmm10,%zmm10 + +#endif + + vmovups %zmm4 , (CO1) + + vmovups %zmm6 , (CO1, LDC) + + vmovups %zmm8 , (CO2) + + vmovups %zmm10, (CO2, LDC) + + prefetcht0 64(CO1) + prefetcht0 64(CO1, LDC) + prefetcht0 64(CO2) + prefetcht0 64(CO2, LDC) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL8x4_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2 + vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PS_( %ymm4,%ymm2,%ymm0 ) + VFMADD231PS_( %ymm6,%ymm3,%ymm0 ) + vbroadcastss -2 * SIZE(BO, BI, SIZE), %ymm2 + vbroadcastss -1 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PS_( %ymm8,%ymm2,%ymm0 ) + VFMADD231PS_( %ymm10,%ymm3,%ymm0 ) + addq $ 4 , BI + addq $ 8 , %rax +.endm + +.macro SAVE8x4 + + vbroadcastss ALPHA, %ymm0 + + vmulps %ymm0 , %ymm4 , %ymm4 + vmulps %ymm0 , %ymm6 , %ymm6 + vmulps %ymm0 , %ymm8 , %ymm8 + vmulps %ymm0 , %ymm10, %ymm10 + + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %ymm4,%ymm4 + vaddps (CO1, LDC), %ymm6,%ymm6 + vaddps (CO2), %ymm8,%ymm8 + vaddps (CO2, LDC), %ymm10,%ymm10 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm6 , (CO1, LDC) + vmovups %ymm8 , (CO2) + vmovups %ymm10, (CO2, LDC) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL4x4_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm2 + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231PS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231PS_( %xmm6,%xmm3,%xmm0 ) + vbroadcastss -2 * SIZE(BO, BI, SIZE), %xmm2 + vbroadcastss -1 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231PS_( %xmm8,%xmm2,%xmm0 ) + VFMADD231PS_( %xmm10,%xmm3,%xmm0 ) + addq $ 4 , BI + addq $ 4 , %rax +.endm + +.macro SAVE4x4 + + vbroadcastss ALPHA, %xmm0 + + vmulps %xmm0 , %xmm4 , %xmm4 + vmulps %xmm0 , %xmm6 , %xmm6 + vmulps %xmm0 , %xmm8 , %xmm8 + vmulps %xmm0 , %xmm10, %xmm10 + + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %xmm4,%xmm4 + vaddps (CO1, LDC), %xmm6,%xmm6 + vaddps (CO2), %xmm8,%xmm8 + vaddps (CO2, LDC), %xmm10,%xmm10 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm6 , (CO1, LDC) + vmovups %xmm8 , (CO2) + vmovups %xmm10, (CO2, LDC) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL2x4_SUB + vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0 + vmovss -15 * SIZE(AO, %rax, SIZE), %xmm1 + vmovss -4 * SIZE(BO, BI, SIZE), %xmm2 + vmovss -3 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm5,%xmm2,%xmm1 ) + VFMADD231SS_( %xmm6,%xmm3,%xmm0 ) + VFMADD231SS_( %xmm7,%xmm3,%xmm1 ) + vmovss -2 * SIZE(BO, BI, SIZE), %xmm2 + vmovss -1 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SS_( %xmm8,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm9,%xmm2,%xmm1 ) + VFMADD231SS_( %xmm10,%xmm3,%xmm0 ) + VFMADD231SS_( %xmm11,%xmm3,%xmm1 ) + addq $ 4 , BI + addq $ 2, %rax +.endm + +.macro SAVE2x4 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + vmulss %xmm0 , %xmm5 , %xmm5 + vmulss %xmm0 , %xmm6 , %xmm6 + vmulss %xmm0 , %xmm7 , %xmm7 + vmulss %xmm0 , %xmm8 , %xmm8 + vmulss %xmm0 , %xmm9 , %xmm9 + vmulss %xmm0 , %xmm10, %xmm10 + vmulss %xmm0 , %xmm11, %xmm11 + + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + vaddss 1 * SIZE(CO1), %xmm5,%xmm5 + + vaddss (CO1, LDC), %xmm6,%xmm6 + vaddss 1 * SIZE(CO1, LDC), %xmm7,%xmm7 + + vaddss (CO2), %xmm8,%xmm8 + vaddss 1 * SIZE(CO2), %xmm9,%xmm9 + + vaddss (CO2, LDC), %xmm10,%xmm10 + vaddss 1 * SIZE(CO2, LDC), %xmm11,%xmm11 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , 1 * SIZE(CO1) + + vmovss %xmm6 , (CO1, LDC) + vmovss %xmm7 , 1 * SIZE(CO1, LDC) + + vmovss %xmm8 , (CO2) + vmovss %xmm9 , 1 * SIZE(CO2) + + vmovss %xmm10, (CO2, LDC) + vmovss %xmm11, 1 * SIZE(CO2, LDC) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL1x4_SUB + vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0 + vmovss -4 * SIZE(BO, BI, SIZE), %xmm2 + vmovss -3 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm6,%xmm3,%xmm0 ) + vmovss -2 * SIZE(BO, BI, SIZE), %xmm2 + vmovss -1 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SS_( %xmm8,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm10,%xmm3,%xmm0 ) + addq $ 4 , BI + addq $ 1, %rax +.endm + +.macro SAVE1x4 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + vmulss %xmm0 , %xmm6 , %xmm6 + vmulss %xmm0 , %xmm8 , %xmm8 + vmulss %xmm0 , %xmm10, %xmm10 + + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + vaddss (CO1, LDC), %xmm6,%xmm6 + vaddss (CO2), %xmm8,%xmm8 + vaddss (CO2, LDC), %xmm10,%xmm10 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm6 , (CO1, LDC) + vmovss %xmm8 , (CO2) + vmovss %xmm10, (CO2, LDC) + +.endm + + +/*******************************************************************************************/ + +/******************************************************************************************* +* 2 lines of N +*******************************************************************************************/ + +.macro KERNEL16x2_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %zmm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %zmm2 + vbroadcastss -3 * SIZE(BO, BI, SIZE), %zmm3 + VFMADD231PS_( %zmm4,%zmm2,%zmm0 ) + VFMADD231PS_( %zmm6,%zmm3,%zmm0 ) + addq $ 2 , BI + addq $ 16, %rax +.endm + +.macro SAVE16x2 + + vbroadcastss ALPHA, %zmm0 + + vmulps %zmm0 , %zmm4 , %zmm4 + vmulps %zmm0 , %zmm6 , %zmm6 + + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %zmm4,%zmm4 + + vaddps (CO1, LDC), %zmm6,%zmm6 + +#endif + + vmovups %zmm4 , (CO1) + + vmovups %zmm6 , (CO1, LDC) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL8x2_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2 + vbroadcastss -3 * SIZE(BO, BI, SIZE), %ymm3 + VFMADD231PS_( %ymm4,%ymm2,%ymm0 ) + VFMADD231PS_( %ymm6,%ymm3,%ymm0 ) + addq $ 2 , BI + addq $ 8 , %rax +.endm + +.macro SAVE8x2 + + vbroadcastss ALPHA, %ymm0 + + vmulps %ymm0 , %ymm4 , %ymm4 + vmulps %ymm0 , %ymm6 , %ymm6 + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %ymm4,%ymm4 + vaddps (CO1, LDC), %ymm6,%ymm6 + +#endif + + vmovups %ymm4 , (CO1) + vmovups %ymm6 , (CO1, LDC) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL4x2_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm2 + vbroadcastss -3 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231PS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231PS_( %xmm6,%xmm3,%xmm0 ) + addq $ 2 , BI + addq $ 4 , %rax +.endm + +.macro SAVE4x2 + + vbroadcastss ALPHA, %xmm0 + + vmulps %xmm0 , %xmm4 , %xmm4 + vmulps %xmm0 , %xmm6 , %xmm6 + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %xmm4,%xmm4 + vaddps (CO1, LDC), %xmm6,%xmm6 + +#endif + + vmovups %xmm4 , (CO1) + vmovups %xmm6 , (CO1, LDC) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL2x2_SUB + vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0 + vmovss -15 * SIZE(AO, %rax, SIZE), %xmm1 + vmovss -4 * SIZE(BO, BI, SIZE), %xmm2 + vmovss -3 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm5,%xmm2,%xmm1 ) + VFMADD231SS_( %xmm6,%xmm3,%xmm0 ) + VFMADD231SS_( %xmm7,%xmm3,%xmm1 ) + addq $ 2 , BI + addq $ 2, %rax +.endm + +.macro SAVE2x2 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + vmulss %xmm0 , %xmm5 , %xmm5 + vmulss %xmm0 , %xmm6 , %xmm6 + vmulss %xmm0 , %xmm7 , %xmm7 + + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + vaddss 1 * SIZE(CO1), %xmm5,%xmm5 + + vaddss (CO1, LDC), %xmm6,%xmm6 + vaddss 1 * SIZE(CO1, LDC), %xmm7,%xmm7 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , 1 * SIZE(CO1) + + vmovss %xmm6 , (CO1, LDC) + vmovss %xmm7 , 1 * SIZE(CO1, LDC) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL1x2_SUB + vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0 + vmovss -4 * SIZE(BO, BI, SIZE), %xmm2 + vmovss -3 * SIZE(BO, BI, SIZE), %xmm3 + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm6,%xmm3,%xmm0 ) + addq $ 2 , BI + addq $ 1, %rax +.endm + +.macro SAVE1x2 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + vmulss %xmm0 , %xmm6 , %xmm6 + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + vaddss (CO1, LDC), %xmm6,%xmm6 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm6 , (CO1, LDC) + +.endm + + +/*******************************************************************************************/ + +/******************************************************************************************* +* 1 line of N +*******************************************************************************************/ + +.macro KERNEL16x1_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %zmm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %zmm2 + VFMADD231PS_( %zmm4,%zmm2,%zmm0 ) + addq $ 1 , BI + addq $ 16, %rax +.endm + +.macro SAVE16x1 + + vbroadcastss ALPHA, %zmm0 + + vmulps %zmm0 , %zmm4 , %zmm4 + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %zmm4,%zmm4 + +#endif + + vmovups %zmm4 , (CO1) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL8x1_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %ymm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %ymm2 + VFMADD231PS_( %ymm4,%ymm2,%ymm0 ) + addq $ 1 , BI + addq $ 8 , %rax +.endm + +.macro SAVE8x1 + + vbroadcastss ALPHA, %ymm0 + + vmulps %ymm0 , %ymm4 , %ymm4 + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %ymm4,%ymm4 + +#endif + + vmovups %ymm4 , (CO1) + +.endm + + + +/*******************************************************************************************/ + +.macro KERNEL4x1_SUB + vmovups -16 * SIZE(AO, %rax, SIZE), %xmm0 + vbroadcastss -4 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231PS_( %xmm4,%xmm2,%xmm0 ) + addq $ 1 , BI + addq $ 4 , %rax +.endm + +.macro SAVE4x1 + + vbroadcastss ALPHA, %xmm0 + + vmulps %xmm0 , %xmm4 , %xmm4 + +#if !defined(TRMMKERNEL) + + vaddps (CO1), %xmm4,%xmm4 + +#endif + + vmovups %xmm4 , (CO1) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL2x1_SUB + vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0 + vmovss -15 * SIZE(AO, %rax, SIZE), %xmm1 + vmovss -4 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + VFMADD231SS_( %xmm5,%xmm2,%xmm1 ) + addq $ 1 , BI + addq $ 2 , %rax +.endm + +.macro SAVE2x1 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + vmulss %xmm0 , %xmm5 , %xmm5 + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + vaddss 1 * SIZE(CO1), %xmm5,%xmm5 + +#endif + + vmovss %xmm4 , (CO1) + vmovss %xmm5 , 1 * SIZE(CO1) + +.endm + + +/*******************************************************************************************/ + +.macro KERNEL1x1_SUB + vmovss -16 * SIZE(AO, %rax, SIZE), %xmm0 + vmovss -4 * SIZE(BO, BI, SIZE), %xmm2 + VFMADD231SS_( %xmm4,%xmm2,%xmm0 ) + addq $ 1 , BI + addq $ 1 , %rax +.endm + +.macro SAVE1x1 + + vmovss ALPHA, %xmm0 + + vmulss %xmm0 , %xmm4 , %xmm4 + +#if !defined(TRMMKERNEL) + + vaddss (CO1), %xmm4,%xmm4 + +#endif + + vmovss %xmm4 , (CO1) + +.endm + + +/*******************************************************************************************/ + +#if !defined(TRMMKERNEL) + +/************************************************************************************* +* GEMM Kernel +*************************************************************************************/ + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + vmovsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovss %xmm0, ALPHA + + salq $BASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $12, %rdi + divq %rdi // N / 12 + movq %rax, Ndiv6 // N / 12 + movq %rdx, Nmod6 // N % 12 + + movq Ndiv6, J + cmpq $0, J + je .L4_00 + ALIGN_4 + + +/*******************************************************************************************/ + +.L6_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + salq $2, %rax // 4 values of B + leaq (B, %rax,4), BO2 + movq BO2, B // next offset of B + movq K, %rax + + ALIGN_4 + + +.L6_02c: + + vmovups (BO1), %xmm0 + vmovsd (BO2), %xmm1 + vmovups %xmm0, (BO) + vmovsd %xmm1, 4*SIZE(BO) + addq $ 4*SIZE,BO1 + addq $ 4*SIZE,BO2 + addq $ 6*SIZE,BO + decq %rax + jnz .L6_02c + + +.L6_10: + movq C, CO1 + leaq (C, LDC, 2), CO2 + leaq (CO2, LDC, 1), CO2 // co2 = c + 3 * ldc + leaq (C, LDC, 4), C + leaq (C, LDC, 2), C // c = c + 6 * ldc + + movq A, AO // aoffset = a + addq $ 16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L6_20 + + ALIGN_4 + +.L6_11: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax // K = K - ( K % 8 ) + je .L6_16 + + ALIGN_4 + +.L6_12: + + KERNEL16x6_SUB4 + + KERNEL16x6_SUB4 + + je .L6_16 + + KERNEL16x6_SUB4 + + KERNEL16x6_SUB4 + + je .L6_16 + + jmp .L6_12 + ALIGN_4 + +.L6_16: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_19 + + ALIGN_4 + +.L6_17: + + KERNEL16x6_SUB + + jnz .L6_17 + ALIGN_4 + + +.L6_19: + + SAVE16x6 + + addq $16 * SIZE, CO1 # coffset += 16 + addq $16 * SIZE, CO2 # coffset += 16 + decq I # i -- + jg .L6_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L6_20: + // Test rest of M + + testq $15, M + jz .L6_60 // to next 6 lines of N + + testq $8, M + jz .L6_21pre + ALIGN_4 + +/**************************************************************************/ + +.L6_20_1: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L6_20_6 + + ALIGN_4 + +.L6_20_2: + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + je .L6_20_6 + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + je .L6_20_6 + + jmp .L6_20_2 + ALIGN_4 + +.L6_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_20_9 + + ALIGN_4 + +.L6_20_7: + + KERNEL8x6_SUB + + jnz .L6_20_7 + ALIGN_4 + + +.L6_20_9: + + SAVE8x6 + + addq $8 * SIZE, CO1 # coffset += 8 + addq $8 * SIZE, CO2 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L6_21pre: + + testq $4, M + jz .L6_30 + ALIGN_4 + +.L6_21: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L6_26 + + ALIGN_4 + +.L6_22: + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + je .L6_26 + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + je .L6_26 + + jmp .L6_22 + ALIGN_4 + +.L6_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_29 + + ALIGN_4 + +.L6_27: + + KERNEL4x6_SUB + + jnz .L6_27 + ALIGN_4 + + +.L6_29: + + SAVE4x6 + + addq $4 * SIZE, CO1 # coffset += 4 + addq $4 * SIZE, CO2 # coffset += 4 + ALIGN_4 + + +.L6_30: + testq $2, M + jz .L6_40 + + ALIGN_4 + +.L6_31: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L6_36 + + ALIGN_4 + +.L6_32: + + prefetcht0 A_PR1(AO) + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + je .L6_36 + + prefetcht0 A_PR1(AO) + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + je .L6_36 + + jmp .L6_32 + ALIGN_4 + +.L6_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_39 + + ALIGN_4 + +.L6_37: + + KERNEL2x6_SUB + + jnz .L6_37 + ALIGN_4 + + +.L6_39: + + SAVE2x6 + + addq $2 * SIZE, CO1 # coffset += 2 + addq $2 * SIZE, CO2 # coffset += 2 + ALIGN_4 + +.L6_40: + testq $1, M + jz .L6_60 // to next 4 lines of N + + ALIGN_4 + +.L6_41: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L6_46 + + ALIGN_4 + +.L6_42: + + prefetcht0 A_PR1(AO) + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + je .L6_46 + + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + je .L6_46 + + jmp .L6_42 + ALIGN_4 + +.L6_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L6_49 + + ALIGN_4 + +.L6_47: + + KERNEL1x6_SUB + + jnz .L6_47 + ALIGN_4 + + +.L6_49: + + SAVE1x6 + + addq $1 * SIZE, CO1 # coffset += 1 + addq $1 * SIZE, CO2 # coffset += 1 + ALIGN_4 + + + + + +.L6_60: + + +/*******************************************************************************************/ + + +.L7_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + salq $2, %rax // 4 values of B + leaq (B, %rax,4), BO2 + movq K, %rax + + ALIGN_4 + + +.L7_02c: + + vmovsd 2*SIZE(BO1), %xmm0 + vmovups (BO2), %xmm1 + vmovsd %xmm0, (BO) + vmovups %xmm1, 2*SIZE(BO) + addq $ 4*SIZE,BO1 + addq $ 4*SIZE,BO2 + addq $ 6*SIZE,BO + decq %rax + jnz .L7_02c + + movq BO2, B // next offset of B + +.L7_10: + movq C, CO1 + leaq (C, LDC, 2), CO2 + leaq (CO2, LDC, 1), CO2 // co2 = c + 3 * ldc + leaq (C, LDC, 4), C + leaq (C, LDC, 2), C // c = c + 6 * ldc + + movq A, AO // aoffset = a + addq $ 16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L7_20 + + ALIGN_4 + +.L7_11: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax // K = K - ( K % 8 ) + je .L7_16 + + ALIGN_4 + +.L7_12: + + KERNEL16x6_SUB4 + + KERNEL16x6_SUB4 + + je .L7_16 + + KERNEL16x6_SUB4 + + KERNEL16x6_SUB4 + + je .L7_16 + + jmp .L7_12 + ALIGN_4 + +.L7_16: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_19 + + ALIGN_4 + +.L7_17: + + KERNEL16x6_SUB + + jnz .L7_17 + ALIGN_4 + + +.L7_19: + + SAVE16x6 + + addq $16 * SIZE, CO1 # coffset += 16 + addq $16 * SIZE, CO2 # coffset += 16 + decq I # i -- + jg .L7_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L7_20: + // Test rest of M + + testq $15, M + jz .L7_60 // to next 6 lines of N + + testq $8, M + jz .L7_21pre + ALIGN_4 + +/**************************************************************************/ + +.L7_20_1: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L7_20_6 + + ALIGN_4 + +.L7_20_2: + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + je .L7_20_6 + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + prefetcht0 A_PR1(AO) + KERNEL8x6_SUB + KERNEL8x6_SUB + + je .L7_20_6 + + jmp .L7_20_2 + ALIGN_4 + +.L7_20_6: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_20_9 + + ALIGN_4 + +.L7_20_7: + + KERNEL8x6_SUB + + jnz .L7_20_7 + ALIGN_4 + + +.L7_20_9: + + SAVE8x6 + + addq $8 * SIZE, CO1 # coffset += 8 + addq $8 * SIZE, CO2 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L7_21pre: + + testq $4, M + jz .L7_30 + ALIGN_4 + +.L7_21: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L7_26 + + ALIGN_4 + +.L7_22: + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + je .L7_26 + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + prefetcht0 A_PR1(AO) + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + KERNEL4x6_SUB + + je .L7_26 + + jmp .L7_22 + ALIGN_4 + +.L7_26: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_29 + + ALIGN_4 + +.L7_27: + + KERNEL4x6_SUB + + jnz .L7_27 + ALIGN_4 + + +.L7_29: + + SAVE4x6 + + addq $4 * SIZE, CO1 # coffset += 4 + addq $4 * SIZE, CO2 # coffset += 4 + ALIGN_4 + + +.L7_30: + testq $2, M + jz .L7_40 + + ALIGN_4 + +.L7_31: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L7_36 + + ALIGN_4 + +.L7_32: + + prefetcht0 A_PR1(AO) + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + je .L7_36 + + prefetcht0 A_PR1(AO) + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + KERNEL2x6_SUB + + je .L7_36 + + jmp .L7_32 + ALIGN_4 + +.L7_36: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_39 + + ALIGN_4 + +.L7_37: + + KERNEL2x6_SUB + + jnz .L7_37 + ALIGN_4 + + +.L7_39: + + SAVE2x6 + + addq $2 * SIZE, CO1 # coffset += 2 + addq $2 * SIZE, CO2 # coffset += 2 + ALIGN_4 + +.L7_40: + testq $1, M + jz .L7_60 // to next 4 lines of N + + ALIGN_4 + +.L7_41: + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + + vzeroall + + movq K, %rax + + andq $-8, %rax + je .L7_46 + + ALIGN_4 + +.L7_42: + + prefetcht0 A_PR1(AO) + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + je .L7_46 + + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + KERNEL1x6_SUB + + je .L7_46 + + jmp .L7_42 + ALIGN_4 + +.L7_46: + movq K, %rax + + andq $7, %rax # if (k & 1) + je .L7_49 + + ALIGN_4 + +.L7_47: + + KERNEL1x6_SUB + + jnz .L7_47 + ALIGN_4 + + +.L7_49: + + SAVE1x6 + + addq $1 * SIZE, CO1 # coffset += 1 + addq $1 * SIZE, CO2 # coffset += 1 + ALIGN_4 + + + + + +.L7_60: + + decq J // j -- + jg .L6_01 // next 12 lines of N + + + + +/*******************************************************************************************/ +.L4_00: + + movq Nmod6, J + sarq $2, J // j = j / 4 + cmpq $ 0, J + je .L2_00 + ALIGN_4 + + +.L4_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $2, %rax // K / 4 + jz .L4_01b + ALIGN_4 + + +.L4_01a: + prefetcht0 512(BO1) + prefetchw 512(BO) + + vmovups (BO1), %xmm0 + vmovups 4*SIZE(BO1), %xmm1 + vmovups 8*SIZE(BO1), %xmm2 + vmovups 12*SIZE(BO1), %xmm3 + + vmovups %xmm0, (BO) + vmovups %xmm1, 4*SIZE(BO) + vmovups %xmm2, 8*SIZE(BO) + vmovups %xmm3,12*SIZE(BO) + + addq $ 16*SIZE,BO1 + addq $ 16*SIZE,BO + decq %rax + jnz .L4_01a + + +.L4_01b: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L4_02d + ALIGN_4 + +.L4_02c: + + vmovups (BO1), %xmm0 + vmovups %xmm0, (BO) + addq $ 4*SIZE,BO1 + addq $ 4*SIZE,BO + decq %rax + jnz .L4_02c + +.L4_02d: + + movq BO1, B // next offset of B + +.L4_10: + movq C, CO1 + leaq (C, LDC, 2), CO2 + leaq (C, LDC, 4), C // c += 4 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $ 16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L4_20 + + ALIGN_4 + +.L4_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L4_16 + movq %rax, BI // Index for BO + leaq (,BI,4) , BI // BI = BI * 4 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_12: + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + je .L4_16 + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + je .L4_16 + + jmp .L4_12 + ALIGN_4 + +.L4_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_19 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_17: + + KERNEL16x4_SUB + + jl .L4_17 + ALIGN_4 + + +.L4_19: + + SAVE16x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + addq $16 * SIZE, CO2 # coffset += 16 + decq I # i -- + jg .L4_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L4_20: + // Test rest of M + + testq $15, M + jz .L4_60 // to next 3 lines of N + + testq $8, M + jz .L4_21pre + ALIGN_4 + +/**************************************************************************/ + +.L4_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L4_20_6 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_20_2: + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + je .L4_20_6 + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + je .L4_20_6 + + jmp .L4_20_2 + ALIGN_4 + +.L4_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_20_9 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_20_7: + + KERNEL8x4_SUB + + jl .L4_20_7 + ALIGN_4 + + +.L4_20_9: + + SAVE8x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + addq $8 * SIZE, CO2 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L4_21pre: + + testq $4, M + jz .L4_30 + ALIGN_4 + +.L4_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L4_26 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_22: + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + je .L4_26 + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + je .L4_26 + + jmp .L4_22 + ALIGN_4 + +.L4_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_29 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_27: + + KERNEL4x4_SUB + + jl .L4_27 + ALIGN_4 + + +.L4_29: + + SAVE4x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + addq $4 * SIZE, CO2 # coffset += 4 + ALIGN_4 + + +.L4_30: + testq $2, M + jz .L4_40 + + ALIGN_4 + +.L4_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L4_36 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_32: + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + je .L4_36 + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + je .L4_36 + + jmp .L4_32 + ALIGN_4 + +.L4_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_39 + + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_37: + + KERNEL2x4_SUB + + jl .L4_37 + ALIGN_4 + + +.L4_39: + + SAVE2x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + addq $2 * SIZE, CO2 # coffset += 2 + ALIGN_4 + +.L4_40: + testq $1, M + jz .L4_60 // to next 4 lines of N + + ALIGN_4 + +.L4_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L4_46 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_42: + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + je .L4_46 + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + je .L4_46 + + jmp .L4_42 + ALIGN_4 + +.L4_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_49 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_47: + + KERNEL1x4_SUB + + jl .L4_47 + ALIGN_4 + + +.L4_49: + + SAVE1x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + addq $1 * SIZE, CO2 # coffset += 1 + ALIGN_4 + + + + + +.L4_60: +#if defined(TRMMKERNEL) && !defined(LEFT) + addq $4, KK +#endif + + decq J // j -- + jg .L4_01 // next 4 lines of N + + + +/*******************************************************************************************/ +.L2_00: + + movq Nmod6, J + andq $3, J // j % 4 + je .L999 + + movq Nmod6, J + andq $2, J // j % 4 + je .L1_0 + +.L2_01: + + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $2, %rax // K / 4 + jz .L2_01b + ALIGN_4 + +.L2_01a: + + vmovsd (BO1), %xmm0 + vmovsd 2*SIZE(BO1), %xmm1 + vmovsd 4*SIZE(BO1), %xmm2 + vmovsd 6*SIZE(BO1), %xmm3 + + vmovsd %xmm0, (BO) + vmovsd %xmm1, 2*SIZE(BO) + vmovsd %xmm2, 4*SIZE(BO) + vmovsd %xmm3, 6*SIZE(BO) + + addq $8*SIZE,BO1 + addq $8*SIZE,BO + decq %rax + jnz .L2_01a + + +.L2_01b: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L2_02d + ALIGN_4 + +.L2_02c: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L2_02c + +.L2_02d: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L2_20 + + ALIGN_4 + +.L2_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + je .L2_16 + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL16x2_SUB + + jl .L2_17 + ALIGN_4 + + +.L2_19: + + SAVE16x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L2_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_20: + // Test rest of M + + testq $15, M + jz .L2_60 // to next 2 lines of N + + testq $8, M + jz .L2_21pre + ALIGN_4 + +/**************************************************************************/ + +.L2_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_2: + + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + je .L2_20_6 + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + je .L2_20_6 + + jmp .L2_20_2 + ALIGN_4 + +.L2_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_7: + + KERNEL8x2_SUB + + jl .L2_20_7 + ALIGN_4 + + +.L2_20_9: + + SAVE8x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L2_21pre: + + testq $4, M + jz .L2_30 + ALIGN_4 + +.L2_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_26 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 1 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + je .L2_26 + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL4x2_SUB + + jl .L2_27 + ALIGN_4 + + +.L2_29: + + SAVE4x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L2_30: + testq $2, M + jz .L2_40 + + ALIGN_4 + +.L2_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_36 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_32: + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + je .L2_36 + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + je .L2_36 + + jmp .L2_32 + ALIGN_4 + +.L2_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_37: + + KERNEL2x2_SUB + + jl .L2_37 + ALIGN_4 + + +.L2_39: + + SAVE2x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L2_46 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + je .L2_46 + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB + + jl .L2_47 + ALIGN_4 + + +.L2_49: + + SAVE1x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + + + +.L2_60: +#if defined(TRMMKERNEL) && !defined(LEFT) + addq $2, KK +#endif + + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovss (BO1), %xmm0 + vmovss %xmm0, (BO) + addq $1*SIZE,BO1 + addq $1*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L1_20 + + ALIGN_4 + +.L1_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + je .L1_16 + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL16x1_SUB + + jl .L1_17 + ALIGN_4 + + +.L1_19: + + SAVE16x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L1_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_20: + // Test rest of M + + testq $15, M + jz .L999 + + testq $8, M + jz .L1_21pre + ALIGN_4 + +/**************************************************************************/ + +.L1_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_20_6 + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_2: + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + je .L1_20_6 + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + je .L1_20_6 + + jmp .L1_20_2 + ALIGN_4 + +.L1_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_20_9 + + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_7: + + KERNEL8x1_SUB + + jl .L1_20_7 + ALIGN_4 + + +.L1_20_9: + + SAVE8x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L1_21pre: + + testq $4, M + jz .L1_30 + ALIGN_4 + +.L1_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_26 + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + je .L1_26 + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL4x1_SUB + + jl .L1_27 + ALIGN_4 + + +.L1_29: + + SAVE4x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L1_30: + testq $2, M + jz .L1_40 + + ALIGN_4 + +.L1_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_36 + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_32: + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + je .L1_36 + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + je .L1_36 + + jmp .L1_32 + ALIGN_4 + +.L1_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_39 + + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_37: + + KERNEL2x1_SUB + + jl .L1_37 + ALIGN_4 + + +.L1_39: + + SAVE2x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L1_40: + testq $1, M + jz .L999 + + ALIGN_4 + +.L1_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L1_46 + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + je .L1_46 + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB + + jl .L1_47 + ALIGN_4 + + +.L1_49: + + SAVE1x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE + + + +#else + +/************************************************************************************* +* TRMM Kernel +*************************************************************************************/ + + + PROLOGUE + PROFCODE + + subq $STACKSIZE, %rsp + movq %rbx, (%rsp) + movq %rbp, 8(%rsp) + movq %r12, 16(%rsp) + movq %r13, 24(%rsp) + movq %r14, 32(%rsp) + movq %r15, 40(%rsp) + + vzeroupper + +#ifdef WINDOWS_ABI + movq %rdi, 48(%rsp) + movq %rsi, 56(%rsp) + movups %xmm6, 64(%rsp) + movups %xmm7, 80(%rsp) + movups %xmm8, 96(%rsp) + movups %xmm9, 112(%rsp) + movups %xmm10, 128(%rsp) + movups %xmm11, 144(%rsp) + movups %xmm12, 160(%rsp) + movups %xmm13, 176(%rsp) + movups %xmm14, 192(%rsp) + movups %xmm15, 208(%rsp) + + movq ARG1, OLD_M + movq ARG2, OLD_N + movq ARG3, OLD_K + movq OLD_A, A + movq OLD_B, B + movq OLD_C, C + movq OLD_LDC, LDC +#ifdef TRMMKERNEL + vmovsd OLD_OFFSET, %xmm12 +#endif + vmovaps %xmm3, %xmm0 + +#else + movq STACKSIZE + 8(%rsp), LDC +#ifdef TRMMKERNEL + movsd STACKSIZE + 16(%rsp), %xmm12 +#endif + +#endif + + movq %rsp, SP # save old stack + subq $128 + L_BUFFER_SIZE, %rsp + andq $-4096, %rsp # align stack + + STACK_TOUCH + + cmpq $0, OLD_M + je .L999 + + cmpq $0, OLD_N + je .L999 + + cmpq $0, OLD_K + je .L999 + + movq OLD_M, M + movq OLD_N, N + movq OLD_K, K + + vmovss %xmm0, ALPHA + + salq $BASE_SHIFT, LDC + + movq N, %rax + xorq %rdx, %rdx + movq $4, %rdi + divq %rdi // N / 4 + movq %rax, Ndiv6 // N / 4 + movq %rdx, Nmod6 // N % 4 + + + +#ifdef TRMMKERNEL + vmovsd %xmm12, OFFSET + vmovsd %xmm12, KK +#ifndef LEFT + negq KK +#endif +#endif + + movq Ndiv6, J + cmpq $0, J + je .L2_0 + ALIGN_4 + +/*******************************************************************************************/ + +.L4_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $2, %rax // K / 4 + jz .L4_01b + ALIGN_4 + + +.L4_01a: + prefetcht0 512(BO1) + prefetchw 512(BO) + + vmovups (BO1), %xmm0 + vmovups 4*SIZE(BO1), %xmm1 + vmovups 8*SIZE(BO1), %xmm2 + vmovups 12*SIZE(BO1), %xmm3 + + vmovups %xmm0, (BO) + vmovups %xmm1, 4*SIZE(BO) + vmovups %xmm2, 8*SIZE(BO) + vmovups %xmm3,12*SIZE(BO) + + addq $ 16*SIZE,BO1 + addq $ 16*SIZE,BO + decq %rax + jnz .L4_01a + + +.L4_01b: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L4_02d + ALIGN_4 + +.L4_02c: + + vmovups (BO1), %xmm0 + vmovups %xmm0, (BO) + addq $ 4*SIZE,BO1 + addq $ 4*SIZE,BO + decq %rax + jnz .L4_02c + +.L4_02d: + + movq BO1, B // next offset of B + +.L4_10: + movq C, CO1 + leaq (C, LDC, 2), CO2 + leaq (C, LDC, 4), C // c += 4 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $ 16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L4_20 + + ALIGN_4 + +.L4_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L4_16 + movq %rax, BI // Index for BO + leaq (,BI,4) , BI // BI = BI * 4 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_12: + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + je .L4_16 + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + prefetcht0 A_PR1(AO, %rax, SIZE) + prefetcht0 B_PR1(BO, BI , SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + prefetcht0 A_PR1(AO, %rax, SIZE) + KERNEL16x4_SUB + + je .L4_16 + + jmp .L4_12 + ALIGN_4 + +.L4_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_19 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_17: + + KERNEL16x4_SUB + + jl .L4_17 + ALIGN_4 + + +.L4_19: + + SAVE16x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + addq $16 * SIZE, CO2 # coffset += 16 + decq I # i -- + jg .L4_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L4_20: + // Test rest of M + + testq $15, M + jz .L4_60 // to next 3 lines of N + + testq $8, M + jz .L4_21pre + ALIGN_4 + +/**************************************************************************/ + +.L4_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L4_20_6 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_20_2: + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + je .L4_20_6 + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + KERNEL8x4_SUB + + je .L4_20_6 + + jmp .L4_20_2 + ALIGN_4 + +.L4_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_20_9 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_20_7: + + KERNEL8x4_SUB + + jl .L4_20_7 + ALIGN_4 + + +.L4_20_9: + + SAVE8x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + addq $8 * SIZE, CO2 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L4_21pre: + + testq $4, M + jz .L4_30 + ALIGN_4 + +.L4_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L4_26 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_22: + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + je .L4_26 + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + KERNEL4x4_SUB + + je .L4_26 + + jmp .L4_22 + ALIGN_4 + +.L4_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_29 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_27: + + KERNEL4x4_SUB + + jl .L4_27 + ALIGN_4 + + +.L4_29: + + SAVE4x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + addq $4 * SIZE, CO2 # coffset += 4 + ALIGN_4 + + +.L4_30: + testq $2, M + jz .L4_40 + + ALIGN_4 + +.L4_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L4_36 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_32: + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + je .L4_36 + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + KERNEL2x4_SUB + + je .L4_36 + + jmp .L4_32 + ALIGN_4 + +.L4_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_39 + + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_37: + + KERNEL2x4_SUB + + jl .L4_37 + ALIGN_4 + + +.L4_39: + + SAVE2x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + addq $2 * SIZE, CO2 # coffset += 2 + ALIGN_4 + +.L4_40: + testq $1, M + jz .L4_60 // to next 4 lines of N + + ALIGN_4 + +.L4_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $4, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L4_46 + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_42: + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + je .L4_46 + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + KERNEL1x4_SUB + + je .L4_46 + + jmp .L4_42 + ALIGN_4 + +.L4_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L4_49 + + movq %rax, BI // Index for BO + leaq (,BI,4), BI // BI = BI * 4 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L4_47: + + KERNEL1x4_SUB + + jl .L4_47 + ALIGN_4 + + +.L4_49: + + SAVE1x4 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (,BI, 4), BI // BI = BI * 4 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + addq $1 * SIZE, CO2 # coffset += 1 + ALIGN_4 + + + + + +.L4_60: +#if defined(TRMMKERNEL) && !defined(LEFT) + addq $4, KK +#endif + + decq J // j -- + jg .L4_01 // next 4 lines of N + + + +/*******************************************************************************************/ +.L2_0: + + movq Nmod6, J + andq $3, J // j % 4 + je .L999 + + movq Nmod6, J + andq $2, J // j % 4 + je .L1_0 + +.L2_01: + + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + sarq $2, %rax // K / 4 + jz .L2_01b + ALIGN_4 + +.L2_01a: + + vmovsd (BO1), %xmm0 + vmovsd 2*SIZE(BO1), %xmm1 + vmovsd 4*SIZE(BO1), %xmm2 + vmovsd 6*SIZE(BO1), %xmm3 + + vmovsd %xmm0, (BO) + vmovsd %xmm1, 2*SIZE(BO) + vmovsd %xmm2, 4*SIZE(BO) + vmovsd %xmm3, 6*SIZE(BO) + + addq $8*SIZE,BO1 + addq $8*SIZE,BO + decq %rax + jnz .L2_01a + + +.L2_01b: + + movq K, %rax + andq $3, %rax // K % 4 + jz .L2_02d + ALIGN_4 + +.L2_02c: + + vmovsd (BO1), %xmm0 + vmovsd %xmm0, (BO) + addq $2*SIZE,BO1 + addq $2*SIZE,BO + decq %rax + jnz .L2_02c + +.L2_02d: + + movq BO1, B // next offset of B + +.L2_10: + movq C, CO1 + leaq (C, LDC, 2), C // c += 2 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L2_20 + + ALIGN_4 + +.L2_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L2_16 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_12: + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + je .L2_16 + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + KERNEL16x2_SUB + + je .L2_16 + + jmp .L2_12 + ALIGN_4 + +.L2_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_19 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_17: + + KERNEL16x2_SUB + + jl .L2_17 + ALIGN_4 + + +.L2_19: + + SAVE16x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L2_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L2_20: + // Test rest of M + + testq $15, M + jz .L2_60 // to next 2 lines of N + + testq $8, M + jz .L2_21pre + ALIGN_4 + +/**************************************************************************/ + +.L2_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_20_6 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_2: + + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + je .L2_20_6 + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + KERNEL8x2_SUB + + je .L2_20_6 + + jmp .L2_20_2 + ALIGN_4 + +.L2_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_20_9 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_20_7: + + KERNEL8x2_SUB + + jl .L2_20_7 + ALIGN_4 + + +.L2_20_9: + + SAVE8x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L2_21pre: + + testq $4, M + jz .L2_30 + ALIGN_4 + +.L2_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_26 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 1 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_22: + + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + je .L2_26 + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + KERNEL4x2_SUB + + je .L2_26 + + jmp .L2_22 + ALIGN_4 + +.L2_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_29 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_27: + + KERNEL4x2_SUB + + jl .L2_27 + ALIGN_4 + + +.L2_29: + + SAVE4x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L2_30: + testq $2, M + jz .L2_40 + + ALIGN_4 + +.L2_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L2_36 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_32: + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + je .L2_36 + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + KERNEL2x2_SUB + + je .L2_36 + + jmp .L2_32 + ALIGN_4 + +.L2_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_39 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_37: + + KERNEL2x2_SUB + + jl .L2_37 + ALIGN_4 + + +.L2_39: + + SAVE2x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L2_40: + testq $1, M + jz .L2_60 // to next 2 lines of N + + ALIGN_4 + +.L2_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $2, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L2_46 + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_42: + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + je .L2_46 + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + KERNEL1x2_SUB + + je .L2_46 + + jmp .L2_42 + ALIGN_4 + +.L2_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L2_49 + + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L2_47: + + KERNEL1x2_SUB + + jl .L2_47 + ALIGN_4 + + +.L2_49: + + SAVE1x2 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BI,BI,1), BI // BI = BI * 2 ; number of values + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + + + + +.L2_60: +#if defined(TRMMKERNEL) && !defined(LEFT) + addq $2, KK +#endif + + + + +.L1_0: + +/************************************************************************************************ +* Loop for Nmod6 % 2 > 0 +*************************************************************************************************/ + + movq Nmod6, J + andq $1, J // j % 2 + je .L999 + ALIGN_4 + +.L1_01: + // copy to sub buffer + movq B, BO1 + leaq BUFFER1, BO // first buffer to BO + movq K, %rax + ALIGN_4 + +.L1_02b: + + vmovss (BO1), %xmm0 + vmovss %xmm0, (BO) + addq $1*SIZE,BO1 + addq $1*SIZE,BO + decq %rax + jnz .L1_02b + +.L1_02c: + + movq BO1, B // next offset of B + +.L1_10: + movq C, CO1 + leaq (C, LDC, 1), C // c += 1 * ldc + +#if defined(TRMMKERNEL) && defined(LEFT) + movq OFFSET, %rax + movq %rax, KK +#endif + + movq A, AO // aoffset = a + addq $16 * SIZE, AO + + movq M, I + sarq $4, I // i = (m >> 4) + je .L1_20 + + ALIGN_4 + +.L1_11: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $16, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax // K = K - ( K % 8 ) + je .L1_16 + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_12: + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + je .L1_16 + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + KERNEL16x1_SUB + + je .L1_16 + + jmp .L1_12 + ALIGN_4 + +.L1_16: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_19 + + movq %rax, BI // Index for BO + + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_17: + + KERNEL16x1_SUB + + jl .L1_17 + ALIGN_4 + + +.L1_19: + + SAVE16x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $4, %rax // rax = rax * 16 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $16, KK +#endif + + addq $16 * SIZE, CO1 # coffset += 16 + decq I # i -- + jg .L1_11 + ALIGN_4 + +/************************************************************************** +* Rest of M +***************************************************************************/ +.L1_20: + // Test rest of M + + testq $15, M + jz .L999 + + testq $8, M + jz .L1_21pre + ALIGN_4 + +/**************************************************************************/ + +.L1_20_1: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $8, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_20_6 + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_2: + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + je .L1_20_6 + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + KERNEL8x1_SUB + + je .L1_20_6 + + jmp .L1_20_2 + ALIGN_4 + +.L1_20_6: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_20_9 + + movq %rax, BI // Index for BO + + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_20_7: + + KERNEL8x1_SUB + + jl .L1_20_7 + ALIGN_4 + + +.L1_20_9: + + SAVE8x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $3, %rax // rax = rax * 8 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $8, KK +#endif + + addq $8 * SIZE, CO1 # coffset += 8 + ALIGN_4 + + + +/**************************************************************************/ + +.L1_21pre: + + testq $4, M + jz .L1_30 + ALIGN_4 + +.L1_21: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $4, %rax // number of values in A +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_26 + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_22: + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + je .L1_26 + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + KERNEL4x1_SUB + + je .L1_26 + + jmp .L1_22 + ALIGN_4 + +.L1_26: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_29 + + movq %rax, BI // Index for BO + + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_27: + + KERNEL4x1_SUB + + jl .L1_27 + ALIGN_4 + + +.L1_29: + + SAVE4x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $2, %rax // rax = rax * 4 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $4, KK +#endif + + addq $4 * SIZE, CO1 # coffset += 4 + ALIGN_4 + + +.L1_30: + testq $2, M + jz .L1_40 + + ALIGN_4 + +.L1_31: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $2, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + + andq $-8, %rax + je .L1_36 + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_32: + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + je .L1_36 + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + KERNEL2x1_SUB + + je .L1_36 + + jmp .L1_32 + ALIGN_4 + +.L1_36: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_39 + + movq %rax, BI // Index for BO + + salq $1, %rax // rax = rax *2 ; number of values + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_37: + + KERNEL2x1_SUB + + jl .L1_37 + ALIGN_4 + + +.L1_39: + + SAVE2x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + salq $1, %rax // rax = rax * 2 ; number of values + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $2, KK +#endif + + addq $2 * SIZE, CO1 # coffset += 2 + ALIGN_4 + +.L1_40: + testq $1, M + jz .L999 + + ALIGN_4 + +.L1_41: +#if !defined(TRMMKERNEL) || \ + (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO +#else + movq KK, %rax + leaq BUFFER1, BO // first buffer to BO + addq $4 * SIZE, BO + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + + vzeroall + +#ifndef TRMMKERNEL + movq K, %rax +#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA)) + movq K, %rax + subq KK, %rax + movq %rax, KKK +#else + movq KK, %rax +#ifdef LEFT + addq $1, %rax // number of values in AO +#else + addq $1, %rax // number of values in BO +#endif + movq %rax, KKK +#endif + + andq $-8, %rax + je .L1_46 + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_42: + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + je .L1_46 + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + KERNEL1x1_SUB + + je .L1_46 + + jmp .L1_42 + ALIGN_4 + +.L1_46: +#ifndef TRMMKERNEL + movq K, %rax +#else + movq KKK, %rax +#endif + + andq $7, %rax # if (k & 1) + je .L1_49 + + movq %rax, BI // Index for BO + + leaq (AO, %rax, SIZE), AO + leaq (BO, BI, SIZE), BO + negq BI + negq %rax + ALIGN_4 + +.L1_47: + + KERNEL1x1_SUB + + jl .L1_47 + ALIGN_4 + + +.L1_49: + + SAVE1x1 + +#if (defined(TRMMKERNEL) && defined(LEFT) && defined(TRANSA)) || \ + (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA)) + movq K, %rax + subq KKK, %rax + movq %rax, BI // Index for BO + leaq (BO, BI, SIZE), BO + leaq (AO, %rax, SIZE), AO +#endif + + +#if defined(TRMMKERNEL) && defined(LEFT) + addq $1, KK +#endif + + addq $1 * SIZE, CO1 # coffset += 1 + ALIGN_4 + + +.L999: + movq SP, %rsp + movq (%rsp), %rbx + movq 8(%rsp), %rbp + movq 16(%rsp), %r12 + movq 24(%rsp), %r13 + movq 32(%rsp), %r14 + movq 40(%rsp), %r15 + +#ifdef WINDOWS_ABI + movq 48(%rsp), %rdi + movq 56(%rsp), %rsi + movups 64(%rsp), %xmm6 + movups 80(%rsp), %xmm7 + movups 96(%rsp), %xmm8 + movups 112(%rsp), %xmm9 + movups 128(%rsp), %xmm10 + movups 144(%rsp), %xmm11 + movups 160(%rsp), %xmm12 + movups 176(%rsp), %xmm13 + movups 192(%rsp), %xmm14 + movups 208(%rsp), %xmm15 +#endif + + addq $STACKSIZE, %rsp + ret + + EPILOGUE + + +#endif + diff --git a/kernel/x86_64/sgemm_kernel_16x4_skylakex.c b/kernel/x86_64/sgemm_kernel_16x4_skylakex.c new file mode 100644 index 000000000..3246e681f --- /dev/null +++ b/kernel/x86_64/sgemm_kernel_16x4_skylakex.c @@ -0,0 +1,1642 @@ +/********************************************************************************* +Copyright (c) 2013, The OpenBLAS Project +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the +distribution. +3. Neither the name of the OpenBLAS project nor the names of +its contributors may be used to endorse or promote products +derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**********************************************************************************/ + + +/* comment below left for history, data does not represent the implementation in this file */ + +/********************************************************************* +* 2014/07/28 Saar +* BLASTEST : OK +* CTEST : OK +* TEST : OK +* +* 2013/10/28 Saar +* Parameter: +* SGEMM_DEFAULT_UNROLL_N 4 +* SGEMM_DEFAULT_UNROLL_M 16 +* SGEMM_DEFAULT_P 768 +* SGEMM_DEFAULT_Q 384 +* A_PR1 512 +* B_PR1 512 +* +* +* 2014/07/28 Saar +* Performance at 9216x9216x9216: +* 1 thread: 102 GFLOPS (SANDYBRIDGE: 59) (MKL: 83) +* 2 threads: 195 GFLOPS (SANDYBRIDGE: 116) (MKL: 155) +* 3 threads: 281 GFLOPS (SANDYBRIDGE: 165) (MKL: 230) +* 4 threads: 366 GFLOPS (SANDYBRIDGE: 223) (MKL: 267) +* +*********************************************************************/ + +#include "common.h" +#include + + + +/******************************************************************************************* +* 8 lines of N +*******************************************************************************************/ + + + + + + +/******************************************************************************************* +* 4 lines of N +*******************************************************************************************/ + +#define INIT64x4() \ + row0 = _mm512_setzero_ps(); \ + row1 = _mm512_setzero_ps(); \ + row2 = _mm512_setzero_ps(); \ + row3 = _mm512_setzero_ps(); \ + row0b = _mm512_setzero_ps(); \ + row1b = _mm512_setzero_ps(); \ + row2b = _mm512_setzero_ps(); \ + row3b = _mm512_setzero_ps(); \ + row0c = _mm512_setzero_ps(); \ + row1c = _mm512_setzero_ps(); \ + row2c = _mm512_setzero_ps(); \ + row3c = _mm512_setzero_ps(); \ + row0d = _mm512_setzero_ps(); \ + row1d = _mm512_setzero_ps(); \ + row2d = _mm512_setzero_ps(); \ + row3d = _mm512_setzero_ps(); \ + +#define KERNEL64x4_SUB() \ + zmm0 = _mm512_loadu_ps(AO); \ + zmm1 = _mm512_loadu_ps(A1); \ + zmm5 = _mm512_loadu_ps(A2); \ + zmm7 = _mm512_loadu_ps(A3); \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+1)); \ + row0 += zmm0 * zmm2; \ + row1 += zmm0 * zmm3; \ + row0b += zmm1 * zmm2; \ + row1b += zmm1 * zmm3; \ + row0c += zmm5 * zmm2; \ + row1c += zmm5 * zmm3; \ + row0d += zmm7 * zmm2; \ + row1d += zmm7 * zmm3; \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO+2)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+3)); \ + row2 += zmm0 * zmm2; \ + row3 += zmm0 * zmm3; \ + row2b += zmm1 * zmm2; \ + row3b += zmm1 * zmm3; \ + row2c += zmm5 * zmm2; \ + row3c += zmm5 * zmm3; \ + row2d += zmm7 * zmm2; \ + row3d += zmm7 * zmm3; \ + BO += 4; \ + AO += 16; \ + A1 += 16; \ + A2 += 16; \ + A3 += 16; \ + + +#define SAVE64x4(ALPHA) \ + zmm0 = _mm512_set1_ps(ALPHA); \ + row0 *= zmm0; \ + row1 *= zmm0; \ + row2 *= zmm0; \ + row3 *= zmm0; \ + row0b *= zmm0; \ + row1b *= zmm0; \ + row2b *= zmm0; \ + row3b *= zmm0; \ + row0c *= zmm0; \ + row1c *= zmm0; \ + row2c *= zmm0; \ + row3c *= zmm0; \ + row0d *= zmm0; \ + row1d *= zmm0; \ + row2d *= zmm0; \ + row3d *= zmm0; \ + row0 += _mm512_loadu_ps(CO1 + 0*ldc); \ + row1 += _mm512_loadu_ps(CO1 + 1*ldc); \ + row2 += _mm512_loadu_ps(CO1 + 2*ldc); \ + row3 += _mm512_loadu_ps(CO1 + 3*ldc); \ + _mm512_storeu_ps(CO1 + 0*ldc, row0); \ + _mm512_storeu_ps(CO1 + 1*ldc, row1); \ + _mm512_storeu_ps(CO1 + 2*ldc, row2); \ + _mm512_storeu_ps(CO1 + 3*ldc, row3); \ + row0b += _mm512_loadu_ps(CO1 + 0*ldc + 16); \ + row1b += _mm512_loadu_ps(CO1 + 1*ldc + 16); \ + row2b += _mm512_loadu_ps(CO1 + 2*ldc + 16); \ + row3b += _mm512_loadu_ps(CO1 + 3*ldc + 16); \ + _mm512_storeu_ps(CO1 + 0*ldc + 16, row0b); \ + _mm512_storeu_ps(CO1 + 1*ldc + 16, row1b); \ + _mm512_storeu_ps(CO1 + 2*ldc + 16, row2b); \ + _mm512_storeu_ps(CO1 + 3*ldc + 16, row3b); \ + row0c += _mm512_loadu_ps(CO1 + 0*ldc + 32); \ + row1c += _mm512_loadu_ps(CO1 + 1*ldc + 32); \ + row2c += _mm512_loadu_ps(CO1 + 2*ldc + 32); \ + row3c += _mm512_loadu_ps(CO1 + 3*ldc + 32); \ + _mm512_storeu_ps(CO1 + 0*ldc + 32, row0c); \ + _mm512_storeu_ps(CO1 + 1*ldc + 32, row1c); \ + _mm512_storeu_ps(CO1 + 2*ldc + 32, row2c); \ + _mm512_storeu_ps(CO1 + 3*ldc + 32, row3c); \ + row0d += _mm512_loadu_ps(CO1 + 0*ldc + 48); \ + row1d += _mm512_loadu_ps(CO1 + 1*ldc + 48); \ + row2d += _mm512_loadu_ps(CO1 + 2*ldc + 48); \ + row3d += _mm512_loadu_ps(CO1 + 3*ldc + 48); \ + _mm512_storeu_ps(CO1 + 0*ldc + 48, row0d); \ + _mm512_storeu_ps(CO1 + 1*ldc + 48, row1d); \ + _mm512_storeu_ps(CO1 + 2*ldc + 48, row2d); \ + _mm512_storeu_ps(CO1 + 3*ldc + 48, row3d); + + +#define INIT48x4() \ + row0 = _mm512_setzero_ps(); \ + row1 = _mm512_setzero_ps(); \ + row2 = _mm512_setzero_ps(); \ + row3 = _mm512_setzero_ps(); \ + row0b = _mm512_setzero_ps(); \ + row1b = _mm512_setzero_ps(); \ + row2b = _mm512_setzero_ps(); \ + row3b = _mm512_setzero_ps(); \ + row0c = _mm512_setzero_ps(); \ + row1c = _mm512_setzero_ps(); \ + row2c = _mm512_setzero_ps(); \ + row3c = _mm512_setzero_ps(); \ + +#define KERNEL48x4_SUB() \ + zmm0 = _mm512_loadu_ps(AO); \ + zmm1 = _mm512_loadu_ps(A1); \ + zmm5 = _mm512_loadu_ps(A2); \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+1)); \ + row0 += zmm0 * zmm2; \ + row1 += zmm0 * zmm3; \ + row0b += zmm1 * zmm2; \ + row1b += zmm1 * zmm3; \ + row0c += zmm5 * zmm2; \ + row1c += zmm5 * zmm3; \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO+2)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+3)); \ + row2 += zmm0 * zmm2; \ + row3 += zmm0 * zmm3; \ + row2b += zmm1 * zmm2; \ + row3b += zmm1 * zmm3; \ + row2c += zmm5 * zmm2; \ + row3c += zmm5 * zmm3; \ + BO += 4; \ + AO += 16; \ + A1 += 16; \ + A2 += 16; + + +#define SAVE48x4(ALPHA) \ + zmm0 = _mm512_set1_ps(ALPHA); \ + row0 *= zmm0; \ + row1 *= zmm0; \ + row2 *= zmm0; \ + row3 *= zmm0; \ + row0b *= zmm0; \ + row1b *= zmm0; \ + row2b *= zmm0; \ + row3b *= zmm0; \ + row0c *= zmm0; \ + row1c *= zmm0; \ + row2c *= zmm0; \ + row3c *= zmm0; \ + row0 += _mm512_loadu_ps(CO1 + 0*ldc); \ + row1 += _mm512_loadu_ps(CO1 + 1*ldc); \ + row2 += _mm512_loadu_ps(CO1 + 2*ldc); \ + row3 += _mm512_loadu_ps(CO1 + 3*ldc); \ + _mm512_storeu_ps(CO1 + 0*ldc, row0); \ + _mm512_storeu_ps(CO1 + 1*ldc, row1); \ + _mm512_storeu_ps(CO1 + 2*ldc, row2); \ + _mm512_storeu_ps(CO1 + 3*ldc, row3); \ + row0b += _mm512_loadu_ps(CO1 + 0*ldc + 16); \ + row1b += _mm512_loadu_ps(CO1 + 1*ldc + 16); \ + row2b += _mm512_loadu_ps(CO1 + 2*ldc + 16); \ + row3b += _mm512_loadu_ps(CO1 + 3*ldc + 16); \ + _mm512_storeu_ps(CO1 + 0*ldc + 16, row0b); \ + _mm512_storeu_ps(CO1 + 1*ldc + 16, row1b); \ + _mm512_storeu_ps(CO1 + 2*ldc + 16, row2b); \ + _mm512_storeu_ps(CO1 + 3*ldc + 16, row3b); \ + row0c += _mm512_loadu_ps(CO1 + 0*ldc + 32); \ + row1c += _mm512_loadu_ps(CO1 + 1*ldc + 32); \ + row2c += _mm512_loadu_ps(CO1 + 2*ldc + 32); \ + row3c += _mm512_loadu_ps(CO1 + 3*ldc + 32); \ + _mm512_storeu_ps(CO1 + 0*ldc + 32, row0c); \ + _mm512_storeu_ps(CO1 + 1*ldc + 32, row1c); \ + _mm512_storeu_ps(CO1 + 2*ldc + 32, row2c); \ + _mm512_storeu_ps(CO1 + 3*ldc + 32, row3c); + + +#define INIT32x4() \ + row0 = _mm512_setzero_ps(); \ + row1 = _mm512_setzero_ps(); \ + row2 = _mm512_setzero_ps(); \ + row3 = _mm512_setzero_ps(); \ + row0b = _mm512_setzero_ps(); \ + row1b = _mm512_setzero_ps(); \ + row2b = _mm512_setzero_ps(); \ + row3b = _mm512_setzero_ps(); \ + +#define KERNEL32x4_SUB() \ + zmm0 = _mm512_loadu_ps(AO); \ + zmm1 = _mm512_loadu_ps(A1); \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+1)); \ + row0 += zmm0 * zmm2; \ + row1 += zmm0 * zmm3; \ + row0b += zmm1 * zmm2; \ + row1b += zmm1 * zmm3; \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO+2)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+3)); \ + row2 += zmm0 * zmm2; \ + row3 += zmm0 * zmm3; \ + row2b += zmm1 * zmm2; \ + row3b += zmm1 * zmm3; \ + BO += 4; \ + AO += 16; \ + A1 += 16; + + +#define SAVE32x4(ALPHA) \ + zmm0 = _mm512_set1_ps(ALPHA); \ + row0 *= zmm0; \ + row1 *= zmm0; \ + row2 *= zmm0; \ + row3 *= zmm0; \ + row0b *= zmm0; \ + row1b *= zmm0; \ + row2b *= zmm0; \ + row3b *= zmm0; \ + row0 += _mm512_loadu_ps(CO1 + 0*ldc); \ + row1 += _mm512_loadu_ps(CO1 + 1*ldc); \ + row2 += _mm512_loadu_ps(CO1 + 2*ldc); \ + row3 += _mm512_loadu_ps(CO1 + 3*ldc); \ + _mm512_storeu_ps(CO1 + 0*ldc, row0); \ + _mm512_storeu_ps(CO1 + 1*ldc, row1); \ + _mm512_storeu_ps(CO1 + 2*ldc, row2); \ + _mm512_storeu_ps(CO1 + 3*ldc, row3); \ + row0b += _mm512_loadu_ps(CO1 + 0*ldc + 16); \ + row1b += _mm512_loadu_ps(CO1 + 1*ldc + 16); \ + row2b += _mm512_loadu_ps(CO1 + 2*ldc + 16); \ + row3b += _mm512_loadu_ps(CO1 + 3*ldc + 16); \ + _mm512_storeu_ps(CO1 + 0*ldc + 16, row0b); \ + _mm512_storeu_ps(CO1 + 1*ldc + 16, row1b); \ + _mm512_storeu_ps(CO1 + 2*ldc + 16, row2b); \ + _mm512_storeu_ps(CO1 + 3*ldc + 16, row3b); + + + +#define INIT16x4() \ + row0 = _mm512_setzero_ps(); \ + row1 = _mm512_setzero_ps(); \ + row2 = _mm512_setzero_ps(); \ + row3 = _mm512_setzero_ps(); \ + +#define KERNEL16x4_SUB() \ + zmm0 = _mm512_loadu_ps(AO); \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+1)); \ + row0 += zmm0 * zmm2; \ + row1 += zmm0 * zmm3; \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO+2)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO+3)); \ + row2 += zmm0 * zmm2; \ + row3 += zmm0 * zmm3; \ + BO += 4; \ + AO += 16; + + +#define SAVE16x4(ALPHA) \ + zmm0 = _mm512_set1_ps(ALPHA); \ + row0 *= zmm0; \ + row1 *= zmm0; \ + row2 *= zmm0; \ + row3 *= zmm0; \ + row0 += _mm512_loadu_ps(CO1 + 0 * ldc); \ + row1 += _mm512_loadu_ps(CO1 + 1 * ldc); \ + row2 += _mm512_loadu_ps(CO1 + 2 * ldc); \ + row3 += _mm512_loadu_ps(CO1 + 3 * ldc); \ + _mm512_storeu_ps(CO1 + 0 * ldc, row0); \ + _mm512_storeu_ps(CO1 + 1 * ldc, row1); \ + _mm512_storeu_ps(CO1 + 2 * ldc, row2); \ + _mm512_storeu_ps(CO1 + 3 * ldc, row3); + + + +/*******************************************************************************************/ + +#define INIT8x4() \ + ymm4 = _mm256_setzero_ps(); \ + ymm6 = _mm256_setzero_ps(); \ + ymm8 = _mm256_setzero_ps(); \ + ymm10 = _mm256_setzero_ps(); \ + +#define KERNEL8x4_SUB() \ + ymm0 = _mm256_loadu_ps(AO); \ + ymm2 = _mm256_broadcastss_ps(_mm_load_ss(BO + 0)); \ + ymm3 = _mm256_broadcastss_ps(_mm_load_ss(BO + 1)); \ + ymm4 += ymm0 * ymm2; \ + ymm6 += ymm0 * ymm3; \ + ymm2 = _mm256_broadcastss_ps(_mm_load_ss(BO + 2)); \ + ymm3 = _mm256_broadcastss_ps(_mm_load_ss(BO + 3)); \ + ymm8 += ymm0 * ymm2; \ + ymm10 += ymm0 * ymm3; \ + BO += 4; \ + AO += 8; + + +#define SAVE8x4(ALPHA) \ + ymm0 = _mm256_set1_ps(ALPHA); \ + ymm4 *= ymm0; \ + ymm6 *= ymm0; \ + ymm8 *= ymm0; \ + ymm10 *= ymm0; \ + ymm4 += _mm256_loadu_ps(CO1 + 0 * ldc); \ + ymm6 += _mm256_loadu_ps(CO1 + 1 * ldc); \ + ymm8 += _mm256_loadu_ps(CO1 + 2 * ldc); \ + ymm10 += _mm256_loadu_ps(CO1 + 3 * ldc); \ + _mm256_storeu_ps(CO1 + 0 * ldc, ymm4); \ + _mm256_storeu_ps(CO1 + 1 * ldc, ymm6); \ + _mm256_storeu_ps(CO1 + 2 * ldc, ymm8); \ + _mm256_storeu_ps(CO1 + 3 * ldc, ymm10); \ + + + +/*******************************************************************************************/ + +#define INIT4x4() \ + row0 = _mm_setzero_ps(); \ + row1 = _mm_setzero_ps(); \ + row2 = _mm_setzero_ps(); \ + row3 = _mm_setzero_ps(); \ + + +#define KERNEL4x4_SUB() \ + xmm0 = _mm_loadu_ps(AO); \ + xmm2 = _mm_broadcastss_ps(_mm_load_ss(BO + 0)); \ + xmm3 = _mm_broadcastss_ps(_mm_load_ss(BO + 1)); \ + row0 += xmm0 * xmm2; \ + row1 += xmm0 * xmm3; \ + xmm2 = _mm_broadcastss_ps(_mm_load_ss(BO + 2)); \ + xmm3 = _mm_broadcastss_ps(_mm_load_ss(BO + 3)); \ + row2 += xmm0 * xmm2; \ + row3 += xmm0 * xmm3; \ + BO += 4; \ + AO += 4; + + +#define SAVE4x4(ALPHA) \ + xmm0 = _mm_set1_ps(ALPHA); \ + row0 *= xmm0; \ + row1 *= xmm0; \ + row2 *= xmm0; \ + row3 *= xmm0; \ + row0 += _mm_loadu_ps(CO1 + 0 * ldc); \ + row1 += _mm_loadu_ps(CO1 + 1 * ldc); \ + row2 += _mm_loadu_ps(CO1 + 2 * ldc); \ + row3 += _mm_loadu_ps(CO1 + 3 * ldc); \ + _mm_storeu_ps(CO1 + 0 * ldc, row0); \ + _mm_storeu_ps(CO1 + 1 * ldc, row1); \ + _mm_storeu_ps(CO1 + 2 * ldc, row2); \ + _mm_storeu_ps(CO1 + 3 * ldc, row3); \ + + +/*******************************************************************************************/ + +#define INIT2x4() \ + row0 = 0; row0b = 0; row1 = 0; row1b = 0; \ + row2 = 0; row2b = 0; row3 = 0; row3b = 0; + +#define KERNEL2x4_SUB() \ + xmm0 = *(AO); \ + xmm1 = *(AO + 1); \ + xmm2 = *(BO + 0); \ + xmm3 = *(BO + 1); \ + row0 += xmm0 * xmm2; \ + row0b += xmm1 * xmm2; \ + row1 += xmm0 * xmm3; \ + row1b += xmm1 * xmm3; \ + xmm2 = *(BO + 2); \ + xmm3 = *(BO + 3); \ + row2 += xmm0 * xmm2; \ + row2b += xmm1 * xmm2; \ + row3 += xmm0 * xmm3; \ + row3b += xmm1 * xmm3; \ + BO += 4; \ + AO += 2; + + +#define SAVE2x4(ALPHA) \ + xmm0 = ALPHA; \ + row0 *= xmm0; \ + row0b *= xmm0; \ + row1 *= xmm0; \ + row1b *= xmm0; \ + row2 *= xmm0; \ + row2b *= xmm0; \ + row3 *= xmm0; \ + row3b *= xmm0; \ + *(CO1 + 0 * ldc + 0) += row0; \ + *(CO1 + 0 * ldc + 1) += row0b; \ + *(CO1 + 1 * ldc + 0) += row1; \ + *(CO1 + 1 * ldc + 1) += row1b; \ + *(CO1 + 2 * ldc + 0) += row2; \ + *(CO1 + 2 * ldc + 1) += row2b; \ + *(CO1 + 3 * ldc + 0) += row3; \ + *(CO1 + 3 * ldc + 1) += row3b; \ + + + +/*******************************************************************************************/ + +#define INIT1x4() \ + row0 = 0; row1 = 0; row2 = 0; row3 = 0; +#define KERNEL1x4_SUB() \ + xmm0 = *(AO ); \ + xmm2 = *(BO + 0); \ + xmm3 = *(BO + 1); \ + row0 += xmm0 * xmm2; \ + row1 += xmm0 * xmm3; \ + xmm2 = *(BO + 2); \ + xmm3 = *(BO + 3); \ + row2 += xmm0 * xmm2; \ + row3 += xmm0 * xmm3; \ + BO += 4; \ + AO += 1; + + +#define SAVE1x4(ALPHA) \ + xmm0 = ALPHA; \ + row0 *= xmm0; \ + row1 *= xmm0; \ + row2 *= xmm0; \ + row3 *= xmm0; \ + *(CO1 + 0 * ldc) += row0; \ + *(CO1 + 1 * ldc) += row1; \ + *(CO1 + 2 * ldc) += row2; \ + *(CO1 + 3 * ldc) += row3; \ + + + +/*******************************************************************************************/ + +/******************************************************************************************* +* 2 lines of N +*******************************************************************************************/ + +#define INIT16x2() \ + row0 = _mm512_setzero_ps(); \ + row1 = _mm512_setzero_ps(); \ + + +#define KERNEL16x2_SUB() \ + zmm0 = _mm512_loadu_ps(AO); \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO)); \ + zmm3 = _mm512_broadcastss_ps(_mm_load_ss(BO + 1)); \ + row0 += zmm0 * zmm2; \ + row1 += zmm0 * zmm3; \ + BO += 2; \ + AO += 16; + + +#define SAVE16x2(ALPHA) \ + zmm0 = _mm512_set1_ps(ALPHA); \ + row0 *= zmm0; \ + row1 *= zmm0; \ + row0 += _mm512_loadu_ps(CO1); \ + row1 += _mm512_loadu_ps(CO1 + ldc); \ + _mm512_storeu_ps(CO1 , row0); \ + _mm512_storeu_ps(CO1 + ldc, row1); \ + + + + +/*******************************************************************************************/ + +#define INIT8x2() \ + ymm4 = _mm256_setzero_ps(); \ + ymm6 = _mm256_setzero_ps(); \ + +#define KERNEL8x2_SUB() \ + ymm0 = _mm256_loadu_ps(AO); \ + ymm2 = _mm256_broadcastss_ps(_mm_load_ss(BO)); \ + ymm3 = _mm256_broadcastss_ps(_mm_load_ss(BO + 1)); \ + ymm4 += ymm0 * ymm2; \ + ymm6 += ymm0 * ymm3; \ + BO += 2; \ + AO += 8; + + +#define SAVE8x2(ALPHA) \ + ymm0 = _mm256_set1_ps(ALPHA); \ + ymm4 *= ymm0; \ + ymm6 *= ymm0; \ + ymm4 += _mm256_loadu_ps(CO1); \ + ymm6 += _mm256_loadu_ps(CO1 + ldc); \ + _mm256_storeu_ps(CO1 , ymm4); \ + _mm256_storeu_ps(CO1 + ldc, ymm6); \ + + + +/*******************************************************************************************/ + +#define INIT4x2() \ + row0 = _mm_setzero_ps(); \ + row1 = _mm_setzero_ps(); \ + +#define KERNEL4x2_SUB() \ + xmm0 = _mm_loadu_ps(AO); \ + xmm2 = _mm_broadcastss_ps(_mm_load_ss(BO)); \ + xmm3 = _mm_broadcastss_ps(_mm_load_ss(BO + 1)); \ + row0 += xmm0 * xmm2; \ + row1 += xmm0 * xmm3; \ + BO += 2; \ + AO += 4; + + +#define SAVE4x2(ALPHA) \ + xmm0 = _mm_set1_ps(ALPHA); \ + row0 *= xmm0; \ + row1 *= xmm0; \ + row0 += _mm_loadu_ps(CO1); \ + row1 += _mm_loadu_ps(CO1 + ldc); \ + _mm_storeu_ps(CO1 , row0); \ + _mm_storeu_ps(CO1 + ldc, row1); \ + + + +/*******************************************************************************************/ + + +#define INIT2x2() \ + row0 = 0; row0b = 0; row1 = 0; row1b = 0; \ + +#define KERNEL2x2_SUB() \ + xmm0 = *(AO + 0); \ + xmm1 = *(AO + 1); \ + xmm2 = *(BO + 0); \ + xmm3 = *(BO + 1); \ + row0 += xmm0 * xmm2; \ + row0b += xmm1 * xmm2; \ + row1 += xmm0 * xmm3; \ + row1b += xmm1 * xmm3; \ + BO += 2; \ + AO += 2; \ + + +#define SAVE2x2(ALPHA) \ + xmm0 = ALPHA; \ + row0 *= xmm0; \ + row0b *= xmm0; \ + row1 *= xmm0; \ + row1b *= xmm0; \ + *(CO1 ) += row0; \ + *(CO1 +1 ) += row0b; \ + *(CO1 + ldc ) += row1; \ + *(CO1 + ldc +1) += row1b; \ + + +/*******************************************************************************************/ + +#define INIT1x2() \ + row0 = 0; row1 = 0; + +#define KERNEL1x2_SUB() \ + xmm0 = *(AO); \ + xmm2 = *(BO + 0); \ + xmm3 = *(BO + 1); \ + row0 += xmm0 * xmm2; \ + row1 += xmm0 * xmm3; \ + BO += 2; \ + AO += 1; + + +#define SAVE1x2(ALPHA) \ + xmm0 = ALPHA; \ + row0 *= xmm0; \ + row1 *= xmm0; \ + *(CO1 ) += row0; \ + *(CO1 + ldc ) += row1; \ + + +/*******************************************************************************************/ + +/******************************************************************************************* +* 1 line of N +*******************************************************************************************/ + +#define INIT16x1() \ + row0 = _mm512_setzero_ps(); \ + +#define KERNEL16x1_SUB() \ + zmm0 = _mm512_loadu_ps(AO); \ + zmm2 = _mm512_broadcastss_ps(_mm_load_ss(BO)); \ + row0 += zmm0 * zmm2; \ + BO += 1; \ + AO += 16; + + +#define SAVE16x1(ALPHA) \ + zmm0 = _mm512_set1_ps(ALPHA); \ + row0 *= zmm0; \ + row0 += _mm512_loadu_ps(CO1); \ + _mm512_storeu_ps(CO1 , row0); \ + + +/*******************************************************************************************/ + +#define INIT8x1() \ + ymm4 = _mm256_setzero_ps(); + +#define KERNEL8x1_SUB() \ + ymm0 = _mm256_loadu_ps(AO); \ + ymm2 = _mm256_broadcastss_ps(_mm_load_ss(BO)); \ + ymm4 += ymm0 * ymm2; \ + BO += 1; \ + AO += 8; + + +#define SAVE8x1(ALPHA) \ + ymm0 = _mm256_set1_ps(ALPHA); \ + ymm4 *= ymm0; \ + ymm4 += _mm256_loadu_ps(CO1); \ + _mm256_storeu_ps(CO1 , ymm4); \ + + +/*******************************************************************************************/ + +#define INIT4x1() \ + row0 = _mm_setzero_ps(); \ + +#define KERNEL4x1_SUB() \ + xmm0 = _mm_loadu_ps(AO); \ + xmm2 = _mm_broadcastss_ps(_mm_load_ss(BO)); \ + row0 += xmm0 * xmm2; \ + BO += 1; \ + AO += 4; + + +#define SAVE4x1(ALPHA) \ + xmm0 = _mm_set1_ps(ALPHA); \ + row0 *= xmm0; \ + row0 += _mm_loadu_ps(CO1); \ + _mm_storeu_ps(CO1 , row0); \ + + + +/*******************************************************************************************/ + +#define INIT2x1() \ + row0 = 0; row0b = 0; + +#define KERNEL2x1_SUB() \ + xmm0 = *(AO + 0); \ + xmm1 = *(AO + 1); \ + xmm2 = *(BO); \ + row0 += xmm0 * xmm2; \ + row0b += xmm1 * xmm2; \ + BO += 1; \ + AO += 2; + + +#define SAVE2x1(ALPHA) \ + xmm0 = ALPHA; \ + row0 *= xmm0; \ + row0b *= xmm0; \ + *(CO1 ) += row0; \ + *(CO1 +1 ) += row0b; \ + + +/*******************************************************************************************/ + +#define INIT1x1() \ + row0 = 0; + +#define KERNEL1x1_SUB() \ + xmm0 = *(AO); \ + xmm2 = *(BO); \ + row0 += xmm0 * xmm2; \ + BO += 1; \ + AO += 1; + + +#define SAVE1x1(ALPHA) \ + xmm0 = ALPHA; \ + row0 *= xmm0; \ + *(CO1 ) += row0; \ + + +/*******************************************************************************************/ + + +/************************************************************************************* +* GEMM Kernel +*************************************************************************************/ + +int __attribute__ ((noinline)) +CNAME(BLASLONG m, BLASLONG n, BLASLONG k, float alpha, float * __restrict A, float * __restrict B, float * __restrict C, BLASLONG ldc) +{ + unsigned long M = m, N = n, K = k; + if (M == 0) + return 0; + if (N == 0) + return 0; + if (K == 0) + return 0; + + + while (N >= 4) { + float *CO1; + float *AO; + int i; + // L8_10 + CO1 = C; + C += 4 * ldc; + + AO = A; + + i = m; + while (i >= 64) { + float *BO; + float *A1, *A2, *A3; + // L8_11 + __m512 zmm0, zmm1, zmm2, zmm3, row0, zmm5, row1, zmm7, row2, row3, row0b, row1b, row2b, row3b, row0c, row1c, row2c, row3c, row0d, row1d, row2d, row3d; + BO = B; + int kloop = K; + + A1 = AO + 16 * K; + A2 = A1 + 16 * K; + A3 = A2 + 16 * K; + + INIT64x4() + + while (kloop > 0) { + // L12_17 + KERNEL64x4_SUB() + kloop--; + } + // L8_19 + SAVE64x4(alpha) + CO1 += 64; + AO += 48 * K; + + i -= 64; + } + while (i >= 32) { + float *BO; + float *A1; + // L8_11 + __m512 zmm0, zmm1, zmm2, zmm3, row0, row1, row2, row3, row0b, row1b, row2b, row3b; + BO = B; + int kloop = K; + + A1 = AO + 16 * K; + + INIT32x4() + + while (kloop > 0) { + // L12_17 + KERNEL32x4_SUB() + kloop--; + } + // L8_19 + SAVE32x4(alpha) + CO1 += 32; + AO += 16 * K; + + i -= 32; + } + while (i >= 16) { + float *BO; + // L8_11 + __m512 zmm0, zmm2, zmm3, row0, row1, row2, row3; + BO = B; + int kloop = K; + + INIT16x4() + + while (kloop > 0) { + // L12_17 + KERNEL16x4_SUB() + kloop--; + } + // L8_19 + SAVE16x4(alpha) + CO1 += 16; + + i -= 16; + } + while (i >= 8) { + float *BO; + // L8_11 + __m256 ymm0, ymm2, ymm3, ymm4, ymm6,ymm8,ymm10; + BO = B; + int kloop = K; + + INIT8x4() + + while (kloop > 0) { + // L12_17 + KERNEL8x4_SUB() + kloop--; + } + // L8_19 + SAVE8x4(alpha) + CO1 += 8; + + i -= 8; + } + while (i >= 4) { + // L8_11 + float *BO; + __m128 xmm0, xmm2, xmm3, row0, row1, row2, row3; + BO = B; + int kloop = K; + + INIT4x4() + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL4x4_SUB() + kloop--; + } + // L8_19 + SAVE4x4(alpha) + CO1 += 4; + + i -= 4; + } + +/************************************************************************** +* Rest of M +***************************************************************************/ + + while (i >= 2) { + float *BO; + float xmm0, xmm1, xmm2, xmm3, row0, row0b, row1, row1b, row2, row2b, row3, row3b; + BO = B; + + INIT2x4() + int kloop = K; + + while (kloop > 0) { + KERNEL2x4_SUB() + kloop--; + } + SAVE2x4(alpha) + CO1 += 2; + i -= 2; + } + // L13_40 + while (i >= 1) { + float *BO; + float xmm0, xmm2, xmm3, row0, row1, row2, row3; + int kloop = K; + BO = B; + INIT1x4() + + while (kloop > 0) { + KERNEL1x4_SUB() + kloop--; + } + SAVE1x4(alpha) + CO1 += 1; + i -= 1; + } + + B += K * 4; + N -= 4; + } + +/**************************************************************************************************/ + + // L8_0 + while (N >= 2) { + float *CO1; + float *AO; + int i; + // L8_10 + CO1 = C; + C += 2 * ldc; + + AO = A; + + i = m; + while (i >= 16) { + float *BO; + + // L8_11 + __m512 zmm0, zmm2, zmm3, row0, row1; + BO = B; + int kloop = K; + + INIT16x2() + + while (kloop > 0) { + // L12_17 + KERNEL16x2_SUB() + kloop--; + } + // L8_19 + SAVE16x2(alpha) + CO1 += 16; + + i -= 16; + } + while (i >= 8) { + float *BO; + __m256 ymm0, ymm2, ymm3, ymm4, ymm6; + // L8_11 + BO = B; + int kloop = K; + + INIT8x2() + + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL8x2_SUB() + kloop--; + } + // L8_19 + SAVE8x2(alpha) + CO1 += 8; + + i-=8; + } + + while (i >= 4) { + float *BO; + __m128 xmm0, xmm2, xmm3, row0, row1; + // L8_11 + BO = B; + int kloop = K; + + INIT4x2() + + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL4x2_SUB() + kloop--; + } + // L8_19 + SAVE4x2(alpha) + CO1 += 4; + + i-=4; + } + +/************************************************************************** +* Rest of M +***************************************************************************/ + + while (i >= 2) { + float *BO; + float xmm0, xmm1, xmm2, xmm3, row0, row0b, row1, row1b; + int kloop = K; + BO = B; + + INIT2x2() + + while (kloop > 0) { + KERNEL2x2_SUB() + kloop--; + } + SAVE2x2(alpha) + CO1 += 2; + i -= 2; + } + // L13_40 + while (i >= 1) { + float *BO; + float xmm0, xmm2, xmm3, row0, row1; + int kloop = K; + BO = B; + + INIT1x2() + + while (kloop > 0) { + KERNEL1x2_SUB() + kloop--; + } + SAVE1x2(alpha) + CO1 += 1; + i -= 1; + } + + B += K * 2; + N -= 2; + } + + // L8_0 + while (N >= 1) { + // L8_10 + float *CO1; + float *AO; + int i; + + CO1 = C; + C += ldc; + + AO = A; + + i = m; + while (i >= 16) { + float *BO; + __m512 zmm0, zmm2, row0; + // L8_11 + BO = B; + int kloop = K; + + INIT16x1() + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL16x1_SUB() + kloop--; + } + // L8_19 + SAVE16x1(alpha) + CO1 += 16; + + i-= 16; + } + while (i >= 8) { + float *BO; + __m256 ymm0, ymm2, ymm4; + // L8_11 + BO = B; + int kloop = K; + + INIT8x1() + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL8x1_SUB() + kloop--; + } + // L8_19 + SAVE8x1(alpha) + CO1 += 8; + + i-= 8; + } + while (i >= 4) { + float *BO; + __m128 xmm0, xmm2, row0; + // L8_11 + BO = B; + int kloop = K; + + INIT4x1() + // L8_16 + while (kloop > 0) { + // L12_17 + KERNEL4x1_SUB() + kloop--; + } + // L8_19 + SAVE4x1(alpha) + CO1 += 4; + + i-= 4; + } + +/************************************************************************** +* Rest of M +***************************************************************************/ + + while (i >= 2) { + float *BO; + float xmm0, xmm1, xmm2, row0, row0b; + int kloop = K; + BO = B; + + INIT2x1() + + while (kloop > 0) { + KERNEL2x1_SUB() + kloop--; + } + SAVE2x1(alpha) + CO1 += 2; + i -= 2; + } + // L13_40 + while (i >= 1) { + float *BO; + float xmm0, xmm2, row0; + int kloop = K; + + BO = B; + INIT1x1() + + + while (kloop > 0) { + KERNEL1x1_SUB() + kloop--; + } + SAVE1x1(alpha) + CO1 += 1; + i -= 1; + } + + B += K * 1; + N -= 1; + } + + + return 0; +} + + +/* + * "Direct sgemm" code. This code operates directly on the inputs and outputs + * of the sgemm call, avoiding the copies, memory realignments and threading, + * and only supports alpha = 1 and beta = 0. + * This is a common case and provides value for relatively small matrixes. + * For larger matrixes the "regular" sgemm code is superior, there the cost of + * copying/shuffling the B matrix really pays off. + */ + + + +#define DECLARE_RESULT_512(N,M) __m512 result##N##M = _mm512_setzero_ps() +#define BROADCAST_LOAD_A_512(N,M) __m512 Aval##M = _mm512_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) +#define LOAD_B_512(N,M) __m512 Bval##N = _mm512_loadu_ps(&B[strideB * k + j + (N*16)]) +#define MATMUL_512(N,M) result##N##M = _mm512_fmadd_ps(Aval##M, Bval##N , result##N##M) +#define STORE_512(N,M) _mm512_storeu_ps(&R[(i+M) * strideR + j+(N*16)], result##N##M) + + +#define DECLARE_RESULT_256(N,M) __m256 result##N##M = _mm256_setzero_ps() +#define BROADCAST_LOAD_A_256(N,M) __m256 Aval##M = _mm256_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) +#define LOAD_B_256(N,M) __m256 Bval##N = _mm256_loadu_ps(&B[strideB * k + j + (N*8)]) +#define MATMUL_256(N,M) result##N##M = _mm256_fmadd_ps(Aval##M, Bval##N , result##N##M) +#define STORE_256(N,M) _mm256_storeu_ps(&R[(i+M) * strideR + j+(N*8)], result##N##M) + +#define DECLARE_RESULT_128(N,M) __m128 result##N##M = _mm_setzero_ps() +#define BROADCAST_LOAD_A_128(N,M) __m128 Aval##M = _mm_broadcastss_ps(_mm_load_ss(&A[k + strideA * (i+M)])) +#define LOAD_B_128(N,M) __m128 Bval##N = _mm_loadu_ps(&B[strideB * k + j + (N*4)]) +#define MATMUL_128(N,M) result##N##M = _mm_fmadd_ps(Aval##M, Bval##N , result##N##M) +#define STORE_128(N,M) _mm_storeu_ps(&R[(i+M) * strideR + j+(N*4)], result##N##M) + +#define DECLARE_RESULT_SCALAR(N,M) float result##N##M = 0; +#define BROADCAST_LOAD_A_SCALAR(N,M) float Aval##M = A[k + strideA * (i + M)]; +#define LOAD_B_SCALAR(N,M) float Bval##N = B[k * strideB + j + N]; +#define MATMUL_SCALAR(N,M) result##N##M += Aval##M * Bval##N; +#define STORE_SCALAR(N,M) R[(i+M) * strideR + j + N] = result##N##M; + +int sgemm_kernel_direct_performant(BLASLONG M, BLASLONG N, BLASLONG K) +{ + int mnk = M * N * K; + /* large matrixes -> not performant */ + if (mnk >= 28 * 512 * 512) + return 0; + + /* + * if the B matrix is not a nice multiple if 4 we get many unaligned accesses, + * and the regular sgemm copy/realignment of data pays off much quicker + */ + if ((N & 3) != 0 && (mnk >= 8 * 512 * 512)) + return 0; + +#ifdef SMP + /* if we can run multithreaded, the threading changes the based threshold */ + if (mnk > 2 * 350 * 512 && num_cpu_avail(3)> 1) + return 0; +#endif + + return 1; +} + + + +void sgemm_kernel_direct (BLASLONG M, BLASLONG N, BLASLONG K, float * __restrict A, BLASLONG strideA, float * __restrict B, BLASLONG strideB , float * __restrict R, BLASLONG strideR) +{ + int i, j, k; + + int m4 = M & ~3; + int m2 = M & ~1; + + int n64 = N & ~63; + int n32 = N & ~31; + int n16 = N & ~15; + int n8 = N & ~7; + int n4 = N & ~3; + int n2 = N & ~1; + + i = 0; + + for (i = 0; i < m4; i+=4) { + + for (j = 0; j < n64; j+= 64) { + k = 0; + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1); + DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2); DECLARE_RESULT_512(2, 2); DECLARE_RESULT_512(3, 2); + DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3); DECLARE_RESULT_512(2, 3); DECLARE_RESULT_512(3, 3); + + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + BROADCAST_LOAD_A_512(x, 2); + BROADCAST_LOAD_A_512(x, 3); + + LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1); + MATMUL_512(0, 2); MATMUL_512(1, 2); MATMUL_512(2, 2); MATMUL_512(3, 2); + MATMUL_512(0, 3); MATMUL_512(1, 3); MATMUL_512(2, 3); MATMUL_512(3, 3); + } + STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); + STORE_512(0, 1); STORE_512(1, 1); STORE_512(2, 1); STORE_512(3, 1); + STORE_512(0, 2); STORE_512(1, 2); STORE_512(2, 2); STORE_512(3, 2); + STORE_512(0, 3); STORE_512(1, 3); STORE_512(2, 3); STORE_512(3, 3); + } + + for (; j < n32; j+= 32) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); + DECLARE_RESULT_512(0, 2); DECLARE_RESULT_512(1, 2); + DECLARE_RESULT_512(0, 3); DECLARE_RESULT_512(1, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + BROADCAST_LOAD_A_512(x, 2); + BROADCAST_LOAD_A_512(x, 3); + + LOAD_B_512(0, x); LOAD_B_512(1, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); + MATMUL_512(0, 2); MATMUL_512(1, 2); + MATMUL_512(0, 3); MATMUL_512(1, 3); + } + STORE_512(0, 0); STORE_512(1, 0); + STORE_512(0, 1); STORE_512(1, 1); + STORE_512(0, 2); STORE_512(1, 2); + STORE_512(0, 3); STORE_512(1, 3); + } + + for (; j < n16; j+= 16) { + DECLARE_RESULT_512(0, 0); + DECLARE_RESULT_512(0, 1); + DECLARE_RESULT_512(0, 2); + DECLARE_RESULT_512(0, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + BROADCAST_LOAD_A_512(x, 2); + BROADCAST_LOAD_A_512(x, 3); + + LOAD_B_512(0, x); + + MATMUL_512(0, 0); + MATMUL_512(0, 1); + MATMUL_512(0, 2); + MATMUL_512(0, 3); + } + STORE_512(0, 0); + STORE_512(0, 1); + STORE_512(0, 2); + STORE_512(0, 3); + } + + for (; j < n8; j+= 8) { + DECLARE_RESULT_256(0, 0); + DECLARE_RESULT_256(0, 1); + DECLARE_RESULT_256(0, 2); + DECLARE_RESULT_256(0, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_256(x, 0); + BROADCAST_LOAD_A_256(x, 1); + BROADCAST_LOAD_A_256(x, 2); + BROADCAST_LOAD_A_256(x, 3); + + LOAD_B_256(0, x); + + MATMUL_256(0, 0); + MATMUL_256(0, 1); + MATMUL_256(0, 2); + MATMUL_256(0, 3); + } + STORE_256(0, 0); + STORE_256(0, 1); + STORE_256(0, 2); + STORE_256(0, 3); + } + + for (; j < n4; j+= 4) { + DECLARE_RESULT_128(0, 0); + DECLARE_RESULT_128(0, 1); + DECLARE_RESULT_128(0, 2); + DECLARE_RESULT_128(0, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_128(x, 0); + BROADCAST_LOAD_A_128(x, 1); + BROADCAST_LOAD_A_128(x, 2); + BROADCAST_LOAD_A_128(x, 3); + + LOAD_B_128(0, x); + + MATMUL_128(0, 0); + MATMUL_128(0, 1); + MATMUL_128(0, 2); + MATMUL_128(0, 3); + } + STORE_128(0, 0); + STORE_128(0, 1); + STORE_128(0, 2); + STORE_128(0, 3); + } + + for (; j < n2; j+= 2) { + DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); + DECLARE_RESULT_SCALAR(0, 1); DECLARE_RESULT_SCALAR(1, 1); + DECLARE_RESULT_SCALAR(0, 2); DECLARE_RESULT_SCALAR(1, 2); + DECLARE_RESULT_SCALAR(0, 3); DECLARE_RESULT_SCALAR(1, 3); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(x, 0); + BROADCAST_LOAD_A_SCALAR(x, 1); + BROADCAST_LOAD_A_SCALAR(x, 2); + BROADCAST_LOAD_A_SCALAR(x, 3); + + LOAD_B_SCALAR(0, x); LOAD_B_SCALAR(1, x); + + MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); + MATMUL_SCALAR(0, 1); MATMUL_SCALAR(1, 1); + MATMUL_SCALAR(0, 2); MATMUL_SCALAR(1, 2); + MATMUL_SCALAR(0, 3); MATMUL_SCALAR(1, 3); + } + STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); + STORE_SCALAR(0, 1); STORE_SCALAR(1, 1); + STORE_SCALAR(0, 2); STORE_SCALAR(1, 2); + STORE_SCALAR(0, 3); STORE_SCALAR(1, 3); + } + + for (; j < N; j++) { + DECLARE_RESULT_SCALAR(0, 0) + DECLARE_RESULT_SCALAR(0, 1) + DECLARE_RESULT_SCALAR(0, 2) + DECLARE_RESULT_SCALAR(0, 3) + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(0, 0); + BROADCAST_LOAD_A_SCALAR(0, 1); + BROADCAST_LOAD_A_SCALAR(0, 2); + BROADCAST_LOAD_A_SCALAR(0, 3); + + LOAD_B_SCALAR(0, 0); + + MATMUL_SCALAR(0, 0); + MATMUL_SCALAR(0, 1); + MATMUL_SCALAR(0, 2); + MATMUL_SCALAR(0, 3); + } + STORE_SCALAR(0, 0); + STORE_SCALAR(0, 1); + STORE_SCALAR(0, 2); + STORE_SCALAR(0, 3); + } + } + + for (; i < m2; i+=2) { + j = 0; + + for (; j < n64; j+= 64) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); DECLARE_RESULT_512(2, 1); DECLARE_RESULT_512(3, 1); + + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + + LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); MATMUL_512(2, 1); MATMUL_512(3, 1); + } + STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); + STORE_512(0, 1); STORE_512(1, 1); STORE_512(2, 1); STORE_512(3, 1); + } + + for (; j < n32; j+= 32) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); + DECLARE_RESULT_512(0, 1); DECLARE_RESULT_512(1, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + + LOAD_B_512(0, x); LOAD_B_512(1, x); + + MATMUL_512(0, 0); MATMUL_512(1, 0); + MATMUL_512(0, 1); MATMUL_512(1, 1); + } + STORE_512(0, 0); STORE_512(1, 0); + STORE_512(0, 1); STORE_512(1, 1); + } + + + for (; j < n16; j+= 16) { + DECLARE_RESULT_512(0, 0); + DECLARE_RESULT_512(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + BROADCAST_LOAD_A_512(x, 1); + + LOAD_B_512(0, x); + + MATMUL_512(0, 0); + MATMUL_512(0, 1); + } + STORE_512(0, 0); + STORE_512(0, 1); + } + + for (; j < n8; j+= 8) { + DECLARE_RESULT_256(0, 0); + DECLARE_RESULT_256(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_256(x, 0); + BROADCAST_LOAD_A_256(x, 1); + + LOAD_B_256(0, x); + + MATMUL_256(0, 0); + MATMUL_256(0, 1); + } + STORE_256(0, 0); + STORE_256(0, 1); + } + + for (; j < n4; j+= 4) { + DECLARE_RESULT_128(0, 0); + DECLARE_RESULT_128(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_128(x, 0); + BROADCAST_LOAD_A_128(x, 1); + + LOAD_B_128(0, x); + + MATMUL_128(0, 0); + MATMUL_128(0, 1); + } + STORE_128(0, 0); + STORE_128(0, 1); + } + for (; j < n2; j+= 2) { + DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); + DECLARE_RESULT_SCALAR(0, 1); DECLARE_RESULT_SCALAR(1, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(x, 0); + BROADCAST_LOAD_A_SCALAR(x, 1); + + LOAD_B_SCALAR(0, x); LOAD_B_SCALAR(1, x); + + MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); + MATMUL_SCALAR(0, 1); MATMUL_SCALAR(1, 1); + } + STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); + STORE_SCALAR(0, 1); STORE_SCALAR(1, 1); + } + + for (; j < N; j++) { + DECLARE_RESULT_SCALAR(0, 0); + DECLARE_RESULT_SCALAR(0, 1); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(0, 0); + BROADCAST_LOAD_A_SCALAR(0, 1); + + LOAD_B_SCALAR(0, 0); + + MATMUL_SCALAR(0, 0); + MATMUL_SCALAR(0, 1); + } + STORE_SCALAR(0, 0); + STORE_SCALAR(0, 1); + } + } + + for (; i < M; i+=1) { + j = 0; + for (; j < n64; j+= 64) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); DECLARE_RESULT_512(2, 0); DECLARE_RESULT_512(3, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + LOAD_B_512(0, x); LOAD_B_512(1, x); LOAD_B_512(2, x); LOAD_B_512(3, x); + MATMUL_512(0, 0); MATMUL_512(1, 0); MATMUL_512(2, 0); MATMUL_512(3, 0); + } + STORE_512(0, 0); STORE_512(1, 0); STORE_512(2, 0); STORE_512(3, 0); + } + for (; j < n32; j+= 32) { + DECLARE_RESULT_512(0, 0); DECLARE_RESULT_512(1, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + LOAD_B_512(0, x); LOAD_B_512(1, x); + MATMUL_512(0, 0); MATMUL_512(1, 0); + } + STORE_512(0, 0); STORE_512(1, 0); + } + + + for (; j < n16; j+= 16) { + DECLARE_RESULT_512(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_512(x, 0); + + LOAD_B_512(0, x); + + MATMUL_512(0, 0); + } + STORE_512(0, 0); + } + + for (; j < n8; j+= 8) { + DECLARE_RESULT_256(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_256(x, 0); + LOAD_B_256(0, x); + MATMUL_256(0, 0); + } + STORE_256(0, 0); + } + + for (; j < n4; j+= 4) { + DECLARE_RESULT_128(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_128(x, 0); + LOAD_B_128(0, x); + MATMUL_128(0, 0); + } + STORE_128(0, 0); + } + + for (; j < n2; j+= 2) { + DECLARE_RESULT_SCALAR(0, 0); DECLARE_RESULT_SCALAR(1, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(x, 0); + LOAD_B_SCALAR(0, 0); LOAD_B_SCALAR(1, 0); + MATMUL_SCALAR(0, 0); MATMUL_SCALAR(1, 0); + } + STORE_SCALAR(0, 0); STORE_SCALAR(1, 0); + } + + for (; j < N; j++) { + DECLARE_RESULT_SCALAR(0, 0); + + for (k = 0; k < K; k++) { + BROADCAST_LOAD_A_SCALAR(0, 0); + LOAD_B_SCALAR(0, 0); + MATMUL_SCALAR(0, 0); + } + STORE_SCALAR(0, 0); + } + } +} \ No newline at end of file diff --git a/kernel/x86_64/sgemm_ncopy_4_skylakex.c b/kernel/x86_64/sgemm_ncopy_4_skylakex.c new file mode 100644 index 000000000..6b2b0f5b1 --- /dev/null +++ b/kernel/x86_64/sgemm_ncopy_4_skylakex.c @@ -0,0 +1,206 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#include + + +int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){ + BLASLONG i, j; + + FLOAT *a_offset, *a_offset1, *a_offset2, *a_offset3, *a_offset4; + FLOAT *b_offset; + FLOAT ctemp1, ctemp2, ctemp3, ctemp4; + FLOAT ctemp5, ctemp6, ctemp7, ctemp8; + FLOAT ctemp9, ctemp13; + + a_offset = a; + b_offset = b; + + j = (n >> 2); + if (j > 0){ + do{ + a_offset1 = a_offset; + a_offset2 = a_offset1 + lda; + a_offset3 = a_offset2 + lda; + a_offset4 = a_offset3 + lda; + a_offset += 4 * lda; + + i = (m >> 2); + if (i > 0){ + do{ + __m128 row0, row1, row2, row3; + + row0 = _mm_loadu_ps(a_offset1); + row1 = _mm_loadu_ps(a_offset2); + row2 = _mm_loadu_ps(a_offset3); + row3 = _mm_loadu_ps(a_offset4); + + _MM_TRANSPOSE4_PS(row0, row1, row2, row3); + + _mm_storeu_ps(b_offset + 0, row0); + _mm_storeu_ps(b_offset + 4, row1); + _mm_storeu_ps(b_offset + 8, row2); + _mm_storeu_ps(b_offset + 12, row3); + + a_offset1 += 4; + a_offset2 += 4; + a_offset3 += 4; + a_offset4 += 4; + + b_offset += 16; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i > 0){ + do{ + ctemp1 = *(a_offset1 + 0); + ctemp5 = *(a_offset2 + 0); + ctemp9 = *(a_offset3 + 0); + ctemp13 = *(a_offset4 + 0); + + *(b_offset + 0) = ctemp1; + *(b_offset + 1) = ctemp5; + *(b_offset + 2) = ctemp9; + *(b_offset + 3) = ctemp13; + + a_offset1 ++; + a_offset2 ++; + a_offset3 ++; + a_offset4 ++; + + b_offset += 4; + i --; + }while(i > 0); + } + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 2){ + a_offset1 = a_offset; + a_offset2 = a_offset1 + lda; + a_offset += 2 * lda; + + i = (m >> 2); + if (i > 0){ + do{ + ctemp1 = *(a_offset1 + 0); + ctemp2 = *(a_offset1 + 1); + ctemp3 = *(a_offset1 + 2); + ctemp4 = *(a_offset1 + 3); + + ctemp5 = *(a_offset2 + 0); + ctemp6 = *(a_offset2 + 1); + ctemp7 = *(a_offset2 + 2); + ctemp8 = *(a_offset2 + 3); + + *(b_offset + 0) = ctemp1; + *(b_offset + 1) = ctemp5; + *(b_offset + 2) = ctemp2; + *(b_offset + 3) = ctemp6; + + *(b_offset + 4) = ctemp3; + *(b_offset + 5) = ctemp7; + *(b_offset + 6) = ctemp4; + *(b_offset + 7) = ctemp8; + + a_offset1 += 4; + a_offset2 += 4; + b_offset += 8; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i > 0){ + do{ + ctemp1 = *(a_offset1 + 0); + ctemp5 = *(a_offset2 + 0); + + *(b_offset + 0) = ctemp1; + *(b_offset + 1) = ctemp5; + + a_offset1 ++; + a_offset2 ++; + b_offset += 2; + i --; + }while(i > 0); + } + } /* end of if(j > 0) */ + + if (n & 1){ + a_offset1 = a_offset; + + i = (m >> 2); + if (i > 0){ + do{ + ctemp1 = *(a_offset1 + 0); + ctemp2 = *(a_offset1 + 1); + ctemp3 = *(a_offset1 + 2); + ctemp4 = *(a_offset1 + 3); + + *(b_offset + 0) = ctemp1; + *(b_offset + 1) = ctemp2; + *(b_offset + 2) = ctemp3; + *(b_offset + 3) = ctemp4; + + a_offset1 += 4; + b_offset += 4; + i --; + }while(i > 0); + } + + i = (m & 3); + if (i > 0){ + do{ + ctemp1 = *(a_offset1 + 0); + *(b_offset + 0) = ctemp1; + a_offset1 ++; + b_offset += 1; + i --; + }while(i > 0); + } + } /* end of if(j > 0) */ + + return 0; +} diff --git a/kernel/x86_64/sgemm_tcopy_16_skylakex.c b/kernel/x86_64/sgemm_tcopy_16_skylakex.c new file mode 100644 index 000000000..dbacc5081 --- /dev/null +++ b/kernel/x86_64/sgemm_tcopy_16_skylakex.c @@ -0,0 +1,387 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +int CNAME(BLASLONG m, BLASLONG n, FLOAT * __restrict a, BLASLONG lda, FLOAT * __restrict b){ + + BLASLONG i, j; + + FLOAT *aoffset; + FLOAT *aoffset1, *aoffset2; + FLOAT *boffset; + + FLOAT ctemp01, ctemp02, ctemp03, ctemp04; + FLOAT ctemp05, ctemp06, ctemp07, ctemp08; + FLOAT ctemp09, ctemp10, ctemp11, ctemp12; + FLOAT ctemp13, ctemp14, ctemp15, ctemp16; + FLOAT ctemp17, ctemp18, ctemp19, ctemp20; + FLOAT ctemp21, ctemp22, ctemp23, ctemp24; + FLOAT ctemp25, ctemp26, ctemp27, ctemp28; + FLOAT ctemp29, ctemp30, ctemp31, ctemp32; + + aoffset = a; + boffset = b; + +#if 0 + fprintf(stderr, "m = %d n = %d\n", m, n); +#endif + + j = (n >> 4); + if (j > 0){ + do{ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 16; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + ctemp09 = *(aoffset1 + 8); + ctemp10 = *(aoffset1 + 9); + ctemp11 = *(aoffset1 + 10); + ctemp12 = *(aoffset1 + 11); + ctemp13 = *(aoffset1 + 12); + ctemp14 = *(aoffset1 + 13); + ctemp15 = *(aoffset1 + 14); + ctemp16 = *(aoffset1 + 15); + + ctemp17 = *(aoffset2 + 0); + ctemp18 = *(aoffset2 + 1); + ctemp19 = *(aoffset2 + 2); + ctemp20 = *(aoffset2 + 3); + ctemp21 = *(aoffset2 + 4); + ctemp22 = *(aoffset2 + 5); + ctemp23 = *(aoffset2 + 6); + ctemp24 = *(aoffset2 + 7); + ctemp25 = *(aoffset2 + 8); + ctemp26 = *(aoffset2 + 9); + ctemp27 = *(aoffset2 + 10); + ctemp28 = *(aoffset2 + 11); + ctemp29 = *(aoffset2 + 12); + ctemp30 = *(aoffset2 + 13); + ctemp31 = *(aoffset2 + 14); + ctemp32 = *(aoffset2 + 15); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + *(boffset + 8) = ctemp09; + *(boffset + 9) = ctemp10; + *(boffset + 10) = ctemp11; + *(boffset + 11) = ctemp12; + *(boffset + 12) = ctemp13; + *(boffset + 13) = ctemp14; + *(boffset + 14) = ctemp15; + *(boffset + 15) = ctemp16; + + *(boffset + 16) = ctemp17; + *(boffset + 17) = ctemp18; + *(boffset + 18) = ctemp19; + *(boffset + 19) = ctemp20; + *(boffset + 20) = ctemp21; + *(boffset + 21) = ctemp22; + *(boffset + 22) = ctemp23; + *(boffset + 23) = ctemp24; + + *(boffset + 24) = ctemp25; + *(boffset + 25) = ctemp26; + *(boffset + 26) = ctemp27; + *(boffset + 27) = ctemp28; + *(boffset + 28) = ctemp29; + *(boffset + 29) = ctemp30; + *(boffset + 30) = ctemp31; + *(boffset + 31) = ctemp32; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 32; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + ctemp09 = *(aoffset1 + 8); + ctemp10 = *(aoffset1 + 9); + ctemp11 = *(aoffset1 + 10); + ctemp12 = *(aoffset1 + 11); + ctemp13 = *(aoffset1 + 12); + ctemp14 = *(aoffset1 + 13); + ctemp15 = *(aoffset1 + 14); + ctemp16 = *(aoffset1 + 15); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + *(boffset + 8) = ctemp09; + *(boffset + 9) = ctemp10; + *(boffset + 10) = ctemp11; + *(boffset + 11) = ctemp12; + *(boffset + 12) = ctemp13; + *(boffset + 13) = ctemp14; + *(boffset + 14) = ctemp15; + *(boffset + 15) = ctemp16; + + boffset += 16; + } + + j--; + }while(j > 0); + } /* end of if(j > 0) */ + + if (n & 8){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 8; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + + ctemp09 = *(aoffset2 + 0); + ctemp10 = *(aoffset2 + 1); + ctemp11 = *(aoffset2 + 2); + ctemp12 = *(aoffset2 + 3); + ctemp13 = *(aoffset2 + 4); + ctemp14 = *(aoffset2 + 5); + ctemp15 = *(aoffset2 + 6); + ctemp16 = *(aoffset2 + 7); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + *(boffset + 8) = ctemp09; + *(boffset + 9) = ctemp10; + *(boffset + 10) = ctemp11; + *(boffset + 11) = ctemp12; + *(boffset + 12) = ctemp13; + *(boffset + 13) = ctemp14; + *(boffset + 14) = ctemp15; + *(boffset + 15) = ctemp16; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 16; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + ctemp05 = *(aoffset1 + 4); + ctemp06 = *(aoffset1 + 5); + ctemp07 = *(aoffset1 + 6); + ctemp08 = *(aoffset1 + 7); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + boffset += 8; + } + } + + if (n & 4){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 4; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + ctemp05 = *(aoffset2 + 0); + ctemp06 = *(aoffset2 + 1); + ctemp07 = *(aoffset2 + 2); + ctemp08 = *(aoffset2 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + *(boffset + 4) = ctemp05; + *(boffset + 5) = ctemp06; + *(boffset + 6) = ctemp07; + *(boffset + 7) = ctemp08; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 8; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset1 + 2); + ctemp04 = *(aoffset1 + 3); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + boffset += 4; + } + } + + if (n & 2){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + aoffset += 2; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + ctemp03 = *(aoffset2 + 0); + ctemp04 = *(aoffset2 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + *(boffset + 2) = ctemp03; + *(boffset + 3) = ctemp04; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 4; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset1 + 1); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + boffset += 2; + } + } + + if (n & 1){ + aoffset1 = aoffset; + aoffset2 = aoffset + lda; + + i = (m >> 1); + if (i > 0){ + do{ + ctemp01 = *(aoffset1 + 0); + ctemp02 = *(aoffset2 + 0); + + *(boffset + 0) = ctemp01; + *(boffset + 1) = ctemp02; + + aoffset1 += 2 * lda; + aoffset2 += 2 * lda; + boffset += 2; + + i --; + }while(i > 0); + } + + if (m & 1){ + ctemp01 = *(aoffset1 + 0); + *(boffset + 0) = ctemp01; + // boffset += 1; + } + } + + return 0; +} diff --git a/kernel/x86_64/sgemv_n_4.c b/kernel/x86_64/sgemv_n_4.c index fd028964b..65305ac59 100644 --- a/kernel/x86_64/sgemv_n_4.c +++ b/kernel/x86_64/sgemv_n_4.c @@ -35,7 +35,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sgemv_n_microk_nehalem-4.c" #elif defined(SANDYBRIDGE) #include "sgemv_n_microk_sandy-4.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "sgemv_n_microk_haswell-4.c" #endif diff --git a/kernel/x86_64/sgemv_n_microk_nehalem-4.c b/kernel/x86_64/sgemv_n_microk_nehalem-4.c index 36dfb14ee..11a3e943b 100644 --- a/kernel/x86_64/sgemv_n_microk_nehalem-4.c +++ b/kernel/x86_64/sgemv_n_microk_nehalem-4.c @@ -59,18 +59,18 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "shufps $0, %%xmm6 , %%xmm6 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "xorps %%xmm4 , %%xmm4 \n\t" "xorps %%xmm5 , %%xmm5 \n\t" "movups (%3,%0,4), %%xmm7 \n\t" // 4 * y - ".align 2 \n\t" + ".p2align 1 \n\t" "movups (%4,%0,4), %%xmm8 \n\t" "movups (%5,%0,4), %%xmm9 \n\t" "movups (%6,%0,4), %%xmm10 \n\t" "movups (%7,%0,4), %%xmm11 \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "mulps %%xmm12, %%xmm8 \n\t" "mulps %%xmm13, %%xmm9 \n\t" "mulps %%xmm14, %%xmm10 \n\t" @@ -84,7 +84,7 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "movups (%5,%8,4), %%xmm9 \n\t" "movups (%6,%8,4), %%xmm10 \n\t" "movups (%7,%8,4), %%xmm11 \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "mulps %%xmm0 , %%xmm8 \n\t" "mulps %%xmm1 , %%xmm9 \n\t" "mulps %%xmm2 , %%xmm10 \n\t" @@ -154,7 +154,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT "movss (%8), %%xmm6 \n\t" // alpha "shufps $0, %%xmm6 , %%xmm6 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "xorps %%xmm4 , %%xmm4 \n\t" "movups (%3,%0,4), %%xmm7 \n\t" // 4 * y diff --git a/kernel/x86_64/sgemv_n_microk_sandy-4.c b/kernel/x86_64/sgemv_n_microk_sandy-4.c index f617ccd5a..b35daa35b 100644 --- a/kernel/x86_64/sgemv_n_microk_sandy-4.c +++ b/kernel/x86_64/sgemv_n_microk_sandy-4.c @@ -129,7 +129,7 @@ static void sgemv_kernel_4x8( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, BLASLO "je 4f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t" "vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t" @@ -299,7 +299,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT "je 4f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vxorps %%ymm4 , %%ymm4 , %%ymm4 \n\t" "vxorps %%ymm5 , %%ymm5 , %%ymm5 \n\t" diff --git a/kernel/x86_64/sgemv_t_4.c b/kernel/x86_64/sgemv_t_4.c index f04d461f7..065e5b385 100644 --- a/kernel/x86_64/sgemv_t_4.c +++ b/kernel/x86_64/sgemv_t_4.c @@ -34,7 +34,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "sgemv_t_microk_bulldozer-4.c" #elif defined(SANDYBRIDGE) #include "sgemv_t_microk_sandy-4.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "sgemv_t_microk_haswell-4.c" #endif diff --git a/kernel/x86_64/sgemv_t_microk_nehalem-4.c b/kernel/x86_64/sgemv_t_microk_nehalem-4.c index b3c07126c..8955c8431 100644 --- a/kernel/x86_64/sgemv_t_microk_nehalem-4.c +++ b/kernel/x86_64/sgemv_t_microk_nehalem-4.c @@ -40,7 +40,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) "xorps %%xmm6 , %%xmm6 \n\t" "xorps %%xmm7 , %%xmm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movups (%2,%0,4), %%xmm12 \n\t" // 4 * x diff --git a/kernel/x86_64/sgemv_t_microk_sandy-4.c b/kernel/x86_64/sgemv_t_microk_sandy-4.c index ca49fe7e6..34ffec944 100644 --- a/kernel/x86_64/sgemv_t_microk_sandy-4.c +++ b/kernel/x86_64/sgemv_t_microk_sandy-4.c @@ -85,7 +85,7 @@ static void sgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) "je 4f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "prefetcht0 384(%2,%0,4) \n\t" "vmovups (%2,%0,4), %%ymm12 \n\t" // 8 * x diff --git a/kernel/x86_64/sger_microk_sandy-2.c b/kernel/x86_64/sger_microk_sandy-2.c index 51c3bef3e..79180b991 100644 --- a/kernel/x86_64/sger_microk_sandy-2.c +++ b/kernel/x86_64/sger_microk_sandy-2.c @@ -53,7 +53,7 @@ static void sger_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "subq $16, %1 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmulps %%xmm4, %%xmm0, %%xmm4 \n\t" diff --git a/kernel/x86_64/ssymv_L.c b/kernel/x86_64/ssymv_L.c index 199d8a517..73ae001ea 100644 --- a/kernel/x86_64/ssymv_L.c +++ b/kernel/x86_64/ssymv_L.c @@ -32,7 +32,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ssymv_L_microk_bulldozer-2.c" #elif defined(NEHALEM) #include "ssymv_L_microk_nehalem-2.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "ssymv_L_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "ssymv_L_microk_sandy-2.c" diff --git a/kernel/x86_64/ssymv_L_microk_haswell-2.c b/kernel/x86_64/ssymv_L_microk_haswell-2.c index 516524528..69db008b6 100644 --- a/kernel/x86_64/ssymv_L_microk_haswell-2.c +++ b/kernel/x86_64/ssymv_L_microk_haswell-2.c @@ -43,7 +43,7 @@ static void ssymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL "vbroadcastss 8(%8), %%xmm6 \n\t" // temp1[1] "vbroadcastss 12(%8), %%xmm7 \n\t" // temp1[1] - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,4), %%xmm9 \n\t" // 2 * y diff --git a/kernel/x86_64/ssymv_L_microk_nehalem-2.c b/kernel/x86_64/ssymv_L_microk_nehalem-2.c index fb5337946..c0fe5d640 100644 --- a/kernel/x86_64/ssymv_L_microk_nehalem-2.c +++ b/kernel/x86_64/ssymv_L_microk_nehalem-2.c @@ -47,7 +47,7 @@ static void ssymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, F "shufps $0, %%xmm6, %%xmm6 \n\t" "shufps $0, %%xmm7, %%xmm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movups (%2,%0,4), %%xmm8 \n\t" // 4 * x "movups (%3,%0,4), %%xmm9 \n\t" // 4 * y diff --git a/kernel/x86_64/ssymv_L_microk_sandy-2.c b/kernel/x86_64/ssymv_L_microk_sandy-2.c index 07293a964..093ca8073 100644 --- a/kernel/x86_64/ssymv_L_microk_sandy-2.c +++ b/kernel/x86_64/ssymv_L_microk_sandy-2.c @@ -45,7 +45,7 @@ static void ssymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL "vbroadcastss 8(%8), %%xmm6 \n\t" // temp1[1] "vbroadcastss 12(%8), %%xmm7 \n\t" // temp1[1] - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,4), %%xmm9 \n\t" // 2 * y @@ -143,7 +143,7 @@ static void ssymv_kernel_4x4(BLASLONG from, BLASLONG to, FLOAT **a, FLOAT *x, FL "vbroadcastss 8(%8), %%ymm6 \n\t" // temp1[1] "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y diff --git a/kernel/x86_64/ssymv_U.c b/kernel/x86_64/ssymv_U.c index 691a071f7..f37c251a1 100644 --- a/kernel/x86_64/ssymv_U.c +++ b/kernel/x86_64/ssymv_U.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "ssymv_U_microk_bulldozer-2.c" #elif defined(NEHALEM) #include "ssymv_U_microk_nehalem-2.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "ssymv_U_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "ssymv_U_microk_sandy-2.c" diff --git a/kernel/x86_64/ssymv_U_microk_haswell-2.c b/kernel/x86_64/ssymv_U_microk_haswell-2.c index 42f801c96..a32e59b44 100644 --- a/kernel/x86_64/ssymv_U_microk_haswell-2.c +++ b/kernel/x86_64/ssymv_U_microk_haswell-2.c @@ -46,7 +46,7 @@ static void ssymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] "xorq %0,%0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y diff --git a/kernel/x86_64/ssymv_U_microk_nehalem-2.c b/kernel/x86_64/ssymv_U_microk_nehalem-2.c index 2fb8f4494..b8e6ee732 100644 --- a/kernel/x86_64/ssymv_U_microk_nehalem-2.c +++ b/kernel/x86_64/ssymv_U_microk_nehalem-2.c @@ -50,7 +50,7 @@ static void ssymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT "xorq %0,%0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movups (%2,%0,4), %%xmm8 \n\t" // 4 * x "movups (%3,%0,4), %%xmm9 \n\t" // 4 * y diff --git a/kernel/x86_64/ssymv_U_microk_sandy-2.c b/kernel/x86_64/ssymv_U_microk_sandy-2.c index 4b699af50..e8650650c 100644 --- a/kernel/x86_64/ssymv_U_microk_sandy-2.c +++ b/kernel/x86_64/ssymv_U_microk_sandy-2.c @@ -46,7 +46,7 @@ static void ssymv_kernel_4x4(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *a2, FLOAT "vbroadcastss 12(%8), %%ymm7 \n\t" // temp1[1] "xorq %0,%0 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%3,%0,4), %%ymm9 \n\t" // 2 * y diff --git a/kernel/x86_64/symv_L_sse.S b/kernel/x86_64/symv_L_sse.S index 8cae3fc1b..8a5c44c9b 100644 --- a/kernel/x86_64/symv_L_sse.S +++ b/kernel/x86_64/symv_L_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 12) diff --git a/kernel/x86_64/symv_L_sse2.S b/kernel/x86_64/symv_L_sse2.S index d7091624d..0c40a3435 100644 --- a/kernel/x86_64/symv_L_sse2.S +++ b/kernel/x86_64/symv_L_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 12) diff --git a/kernel/x86_64/symv_U_sse.S b/kernel/x86_64/symv_U_sse.S index 3549b9863..7a2eeace5 100644 --- a/kernel/x86_64/symv_U_sse.S +++ b/kernel/x86_64/symv_U_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 12) diff --git a/kernel/x86_64/symv_U_sse2.S b/kernel/x86_64/symv_U_sse2.S index 882b035a9..0408b577c 100644 --- a/kernel/x86_64/symv_U_sse2.S +++ b/kernel/x86_64/symv_U_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 12) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/zaxpy.c b/kernel/x86_64/zaxpy.c index 8cb1d532f..53866cf95 100644 --- a/kernel/x86_64/zaxpy.c +++ b/kernel/x86_64/zaxpy.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "zaxpy_microk_bulldozer-2.c" #elif defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "zaxpy_microk_steamroller-2.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "zaxpy_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "zaxpy_microk_sandy-2.c" diff --git a/kernel/x86_64/zaxpy_microk_haswell-2.c b/kernel/x86_64/zaxpy_microk_haswell-2.c index e7e559502..30e8b1955 100644 --- a/kernel/x86_64/zaxpy_microk_haswell-2.c +++ b/kernel/x86_64/zaxpy_microk_haswell-2.c @@ -50,11 +50,11 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" #endif - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,8), %%ymm5 \n\t" // 2 complex values from x - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 2 complex values from x "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 2 complex values from x "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 2 complex values from x @@ -70,7 +70,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vpermilpd $0x5 , %%ymm11, %%ymm10 \n\t" // exchange real and imag part "vfmadd213pd (%3,%0,8), %%ymm0 , %%ymm5 \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "vfmadd213pd 32(%3,%0,8), %%ymm0 , %%ymm7 \n\t" "vfmadd213pd 64(%3,%0,8), %%ymm0 , %%ymm9 \n\t" "vfmadd213pd 96(%3,%0,8), %%ymm0 , %%ymm11 \n\t" @@ -96,7 +96,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vfmadd231pd %%ymm1 , %%ymm10, %%ymm15 \n\t" "vmovups %%ymm5 , (%3,%0,8) \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups %%ymm7 , 32(%3,%0,8) \n\t" "vmovups %%ymm9 , 64(%3,%0,8) \n\t" "vmovups %%ymm11, 96(%3,%0,8) \n\t" diff --git a/kernel/x86_64/zaxpy_microk_sandy-2.c b/kernel/x86_64/zaxpy_microk_sandy-2.c index 8b0a7ed05..233af143a 100644 --- a/kernel/x86_64/zaxpy_microk_sandy-2.c +++ b/kernel/x86_64/zaxpy_microk_sandy-2.c @@ -54,11 +54,11 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" #endif - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x @@ -89,7 +89,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t" "vmovups %%ymm5 , (%3,%0,8) \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups %%ymm7 , 32(%3,%0,8) \n\t" "vmovups %%ymm9 , 64(%3,%0,8) \n\t" "vmovups %%ymm11, 96(%3,%0,8) \n\t" @@ -127,13 +127,13 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vmulpd (%5), %%ymm0 , %%ymm0 \n\t" #endif - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "prefetcht0 512(%2,%0,8) \n\t" "prefetcht0 576(%2,%0,8) \n\t" "vmovups (%2,%0,8), %%ymm5 \n\t" // 4 complex values from x - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups 32(%2,%0,8), %%ymm7 \n\t" // 4 complex values from x "vmovups 64(%2,%0,8), %%ymm9 \n\t" // 4 complex values from x "vmovups 96(%2,%0,8), %%ymm11 \n\t" // 4 complex values from x @@ -166,7 +166,7 @@ static void zaxpy_kernel_4( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha) "vaddpd %%ymm10, %%ymm11, %%ymm11 \n\t" "vmovups %%ymm5 , (%3,%0,8) \n\t" - ".align 2 \n\t" + ".p2align 1 \n\t" "vmovups %%ymm7 , 32(%3,%0,8) \n\t" "vmovups %%ymm9 , 64(%3,%0,8) \n\t" "vmovups %%ymm11, 96(%3,%0,8) \n\t" diff --git a/kernel/x86_64/zdot.c b/kernel/x86_64/zdot.c index d11c76647..ef12569c8 100644 --- a/kernel/x86_64/zdot.c +++ b/kernel/x86_64/zdot.c @@ -33,7 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "zdot_microk_bulldozer-2.c" #elif defined(STEAMROLLER) || defined(PILEDRIVER) || defined(EXCAVATOR) #include "zdot_microk_steamroller-2.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "zdot_microk_haswell-2.c" #elif defined(SANDYBRIDGE) #include "zdot_microk_sandy-2.c" diff --git a/kernel/x86_64/zdot_microk_haswell-2.c b/kernel/x86_64/zdot_microk_haswell-2.c index 810cb4439..11056a3c1 100644 --- a/kernel/x86_64/zdot_microk_haswell-2.c +++ b/kernel/x86_64/zdot_microk_haswell-2.c @@ -50,7 +50,7 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x "vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x @@ -131,7 +131,7 @@ static void zdot_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot) "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "prefetcht0 512(%2,%0,8) \n\t" "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x diff --git a/kernel/x86_64/zdot_microk_sandy-2.c b/kernel/x86_64/zdot_microk_sandy-2.c index fd06612e6..87c5b0340 100644 --- a/kernel/x86_64/zdot_microk_sandy-2.c +++ b/kernel/x86_64/zdot_microk_sandy-2.c @@ -49,7 +49,7 @@ if ( n < 1280 ) "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x "vmovups 32(%2,%0,8), %%ymm9 \n\t" // 2 * x @@ -137,7 +137,7 @@ if ( n < 1280 ) "vxorpd %%ymm6, %%ymm6, %%ymm6 \n\t" "vxorpd %%ymm7, %%ymm7, %%ymm7 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "prefetcht0 512(%2,%0,8) \n\t" "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 * x diff --git a/kernel/x86_64/zgemv_n_4.c b/kernel/x86_64/zgemv_n_4.c index f6f88155c..0fedc496b 100644 --- a/kernel/x86_64/zgemv_n_4.c +++ b/kernel/x86_64/zgemv_n_4.c @@ -30,7 +30,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "zgemv_n_microk_haswell-4.c" #elif defined(SANDYBRIDGE) #include "zgemv_n_microk_sandy-4.c" diff --git a/kernel/x86_64/zgemv_n_microk_sandy-4.c b/kernel/x86_64/zgemv_n_microk_sandy-4.c index 245f45d05..696ed0eee 100644 --- a/kernel/x86_64/zgemv_n_microk_sandy-4.c +++ b/kernel/x86_64/zgemv_n_microk_sandy-4.c @@ -47,7 +47,7 @@ static void zgemv_kernel_4x4( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) "vbroadcastsd 56(%2), %%ymm7 \n\t" // imag part x3 - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 256(%4,%0,8) \n\t" @@ -164,7 +164,7 @@ static void zgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y) "vbroadcastsd 16(%2), %%ymm2 \n\t" // real part x1 "vbroadcastsd 24(%2), %%ymm3 \n\t" // imag part x1 - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" // "prefetcht0 256(%4,%0,8) \n\t" @@ -253,7 +253,7 @@ static void zgemv_kernel_4x1( BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y) "vbroadcastsd (%2), %%ymm0 \n\t" // real part x0 "vbroadcastsd 8(%2), %%ymm1 \n\t" // imag part x0 - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" // "prefetcht0 256(%4,%0,8) \n\t" @@ -355,7 +355,7 @@ static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest,FLOAT a "vbroadcastsd (%4), %%ymm0 \n\t" // alpha_r "vbroadcastsd (%5), %%ymm1 \n\t" // alpha_i - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" // "prefetcht0 192(%2,%0,8) \n\t" "vmovups (%2,%0,8), %%ymm8 \n\t" // 2 complex values from src diff --git a/kernel/x86_64/zgemv_t_4.c b/kernel/x86_64/zgemv_t_4.c index 3e4b7d5df..2ab7a671b 100644 --- a/kernel/x86_64/zgemv_t_4.c +++ b/kernel/x86_64/zgemv_t_4.c @@ -31,7 +31,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(BULLDOZER) || defined(PILEDRIVER) || defined(STEAMROLLER) || defined(EXCAVATOR) #include "zgemv_t_microk_bulldozer-4.c" -#elif defined(HASWELL) || defined(ZEN) +#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "zgemv_t_microk_haswell-4.c" #endif diff --git a/kernel/x86_64/zscal.c b/kernel/x86_64/zscal.c index aa5d8fac0..2a6d0e4c7 100644 --- a/kernel/x86_64/zscal.c +++ b/kernel/x86_64/zscal.c @@ -28,7 +28,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" -#if defined(HASWELL) || defined(ZEN) +#if defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #include "zscal_microk_haswell-2.c" #elif defined(BULLDOZER) || defined(PILEDRIVER) #include "zscal_microk_bulldozer-2.c" diff --git a/kernel/x86_64/zscal_microk_haswell-2.c b/kernel/x86_64/zscal_microk_haswell-2.c index b1a34c1dd..d9253c1ed 100644 --- a/kernel/x86_64/zscal_microk_haswell-2.c +++ b/kernel/x86_64/zscal_microk_haswell-2.c @@ -54,7 +54,7 @@ static void zscal_kernel_8( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $8 , %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" @@ -156,7 +156,7 @@ static void zscal_kernel_8_zero_r( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $8 , %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" @@ -245,7 +245,7 @@ static void zscal_kernel_8_zero_i( BLASLONG n, FLOAT *alpha, FLOAT *x) "subq $8 , %0 \n\t" "jz 2f \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" @@ -312,7 +312,7 @@ static void zscal_kernel_8_zero( BLASLONG n, FLOAT *alpha, FLOAT *x) "addq $128, %1 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" //"prefetcht0 128(%1) \n\t" diff --git a/kernel/x86_64/zsymv_L_sse.S b/kernel/x86_64/zsymv_L_sse.S index dd95eea17..e44bd7550 100644 --- a/kernel/x86_64/zsymv_L_sse.S +++ b/kernel/x86_64/zsymv_L_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/zsymv_L_sse2.S b/kernel/x86_64/zsymv_L_sse2.S index 75124cf3e..e9f330c36 100644 --- a/kernel/x86_64/zsymv_L_sse2.S +++ b/kernel/x86_64/zsymv_L_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/zsymv_U_sse.S b/kernel/x86_64/zsymv_U_sse.S index db1a4ff5f..9f0dead18 100644 --- a/kernel/x86_64/zsymv_U_sse.S +++ b/kernel/x86_64/zsymv_U_sse.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/x86_64/zsymv_U_sse2.S b/kernel/x86_64/zsymv_U_sse2.S index 599765a6d..b6106a37d 100644 --- a/kernel/x86_64/zsymv_U_sse2.S +++ b/kernel/x86_64/zsymv_U_sse2.S @@ -57,7 +57,7 @@ #define PREFETCHSIZE (16 * 24) #endif -#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) +#if defined(NEHALEM) || defined(SANDYBRIDGE) || defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX) #define PREFETCH prefetcht0 #define PREFETCHW prefetcht0 #define PREFETCHSIZE (16 * 24) diff --git a/kernel/zarch/KERNEL.Z13 b/kernel/zarch/KERNEL.Z13 index 8e5c3706a..add628bfe 100644 --- a/kernel/zarch/KERNEL.Z13 +++ b/kernel/zarch/KERNEL.Z13 @@ -49,6 +49,7 @@ SDOTKERNEL = ../arm/dot.c DDOTKERNEL = ddot.c CDOTKERNEL = ../arm/zdot.c ZDOTKERNEL = zdot.c +DSDOTKERNEL = ../generic/dot.c SNRM2KERNEL = ../arm/nrm2.c DNRM2KERNEL = ../arm/nrm2.c diff --git a/kernel/zarch/KERNEL.ZARCH_GENERIC b/kernel/zarch/KERNEL.ZARCH_GENERIC index d80f84e71..848ee9b54 100644 --- a/kernel/zarch/KERNEL.ZARCH_GENERIC +++ b/kernel/zarch/KERNEL.ZARCH_GENERIC @@ -49,6 +49,7 @@ SDOTKERNEL = ../arm/dot.c DDOTKERNEL = ../arm/dot.c CDOTKERNEL = ../arm/zdot.c ZDOTKERNEL = ../arm/zdot.c +DSDOTKERNEL = ../generic/dot.c SNRM2KERNEL = ../arm/nrm2.c DNRM2KERNEL = ../arm/nrm2.c diff --git a/lapack-netlib/LAPACKE/include/lapacke_config.h b/lapack-netlib/LAPACKE/include/lapacke_config.h index 1e2509bf0..8262c3488 100644 --- a/lapack-netlib/LAPACKE/include/lapacke_config.h +++ b/lapack-netlib/LAPACKE/include/lapacke_config.h @@ -34,6 +34,13 @@ #ifndef _LAPACKE_CONFIG_H_ #define _LAPACKE_CONFIG_H_ +// For Android prior to API 21 (no include) +#if defined(__ANDROID__) +#if __ANDROID_API__ < 21 +#define LAPACK_COMPLEX_STRUCTURE +#endif +#endif + #ifdef __cplusplus #if defined(LAPACK_COMPLEX_CPP) #include diff --git a/lapack-netlib/LAPACKE/src/Makefile b/lapack-netlib/LAPACKE/src/Makefile index 44884d4a5..7672f9f73 100644 --- a/lapack-netlib/LAPACKE/src/Makefile +++ b/lapack-netlib/LAPACKE/src/Makefile @@ -2454,6 +2454,8 @@ endif all: ../../$(LAPACKELIB) +.PHONY: ../../$(LAPACKELIB) + ../../$(LAPACKELIB): $(OBJ_A) $(OBJ_B) $(DEPRECATED) $(EXTENDED) $(MATGEN) $(ARCH) $(ARCHFLAGS) $@ $(OBJ_A) $(ARCH) $(ARCHFLAGS) $@ $(OBJ_B) diff --git a/lapack-netlib/LAPACKE/src/lapacke_chetrf_aa_work.c b/lapack-netlib/LAPACKE/src/lapacke_chetrf_aa_work.c index b4a7595d8..e4d538779 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_chetrf_aa_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_chetrf_aa_work.c @@ -41,7 +41,7 @@ lapack_int LAPACKE_chetrf_aa_work( int matrix_layout, char uplo, lapack_int n, lapack_int info = 0; if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ - LAPACK_chetrf( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); + LAPACK_chetrf_aa( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } @@ -56,7 +56,7 @@ lapack_int LAPACKE_chetrf_aa_work( int matrix_layout, char uplo, lapack_int n, } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { - LAPACK_chetrf( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_chetrf_aa( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); return (info < 0) ? (info - 1) : info; } /* Allocate memory for temporary array(s) */ @@ -69,7 +69,7 @@ lapack_int LAPACKE_chetrf_aa_work( int matrix_layout, char uplo, lapack_int n, /* Transpose input matrices */ LAPACKE_che_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - LAPACK_chetrf( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_chetrf_aa( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_clacgv.c b/lapack-netlib/LAPACKE/src/lapacke_clacgv.c index 0014906ed..9a77c8ec0 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_clacgv.c +++ b/lapack-netlib/LAPACKE/src/lapacke_clacgv.c @@ -39,7 +39,7 @@ lapack_int LAPACKE_clacgv( lapack_int n, lapack_complex_float* x, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_c_nancheck( 1+(n-1)*ABS(incx), x, incx ) ) { + if( LAPACKE_c_nancheck( n, x, incx ) ) { return -2; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_clarfb.c b/lapack-netlib/LAPACKE/src/lapacke_clarfb.c index 18e24509d..3aeb0d7e4 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_clarfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_clarfb.c @@ -51,16 +51,21 @@ lapack_int LAPACKE_clarfb( int matrix_layout, char side, char trans, char direct #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); - nrows_v = ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : - ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); + lapack_int lrv, lcv; /* row, column stride */ + if( matrix_layout == LAPACK_COL_MAJOR ) { + lrv = 1; + lcv = ldv; + } else { + lrv = ldv; + lcv = 1; + } + ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); + + nrows_v = ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'r' ) ) ? n : + ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); if( LAPACKE_cge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -13; } @@ -70,8 +75,8 @@ lapack_int LAPACKE_clarfb( int matrix_layout, char side, char trans, char direct if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_ctr_nancheck( matrix_layout, 'l', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_cge_nancheck( matrix_layout, nrows_v-k, ncols_v, &v[k*ldv], - ldv ) ) + if( LAPACKE_cge_nancheck( matrix_layout, nrows_v-k, ncols_v, + &v[k*lrv], ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > nrows_v ) { @@ -79,23 +84,23 @@ lapack_int LAPACKE_clarfb( int matrix_layout, char side, char trans, char direct return -8; } if( LAPACKE_ctr_nancheck( matrix_layout, 'u', 'u', k, - &v[(nrows_v-k)*ldv], ldv ) ) + &v[(nrows_v-k)*lrv], ldv ) ) return -9; if( LAPACKE_cge_nancheck( matrix_layout, nrows_v-k, ncols_v, v, ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_ctr_nancheck( matrix_layout, 'u', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_cge_nancheck( matrix_layout, nrows_v, ncols_v-k, &v[k], - ldv ) ) + if( LAPACKE_cge_nancheck( matrix_layout, nrows_v, ncols_v-k, + &v[k*lrv], ldv ) ) return -9; - } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { + } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > ncols_v ) { LAPACKE_xerbla( "LAPACKE_clarfb", -8 ); return -8; } - if( LAPACKE_ctr_nancheck( matrix_layout, 'l', 'u', k, &v[ncols_v-k], - ldv ) ) + if( LAPACKE_ctr_nancheck( matrix_layout, 'l', 'u', k, + &v[(ncols_v-k)*lcv], ldv ) ) return -9; if( LAPACKE_cge_nancheck( matrix_layout, nrows_v, ncols_v-k, v, ldv ) ) return -9; diff --git a/lapack-netlib/LAPACKE/src/lapacke_clarfg.c b/lapack-netlib/LAPACKE/src/lapacke_clarfg.c index 0381a42bc..9e852a406 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_clarfg.c +++ b/lapack-netlib/LAPACKE/src/lapacke_clarfg.c @@ -43,7 +43,7 @@ lapack_int LAPACKE_clarfg( lapack_int n, lapack_complex_float* alpha, if( LAPACKE_c_nancheck( 1, alpha, 1 ) ) { return -2; } - if( LAPACKE_c_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_c_nancheck( n-1, x, incx ) ) { return -3; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_clarfx.c b/lapack-netlib/LAPACKE/src/lapacke_clarfx.c index 977e283e1..786c21412 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_clarfx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_clarfx.c @@ -38,6 +38,7 @@ lapack_int LAPACKE_clarfx( int matrix_layout, char side, lapack_int m, lapack_complex_float tau, lapack_complex_float* c, lapack_int ldc, lapack_complex_float* work ) { + lapack_int lv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_clarfx", -1 ); return -1; @@ -51,7 +52,8 @@ lapack_int LAPACKE_clarfx( int matrix_layout, char side, lapack_int m, if( LAPACKE_c_nancheck( 1, &tau, 1 ) ) { return -6; } - if( LAPACKE_c_nancheck( m, v, 1 ) ) { + lv = (LAPACKE_lsame( side, 'l' ) ? m : n); + if( LAPACKE_c_nancheck( lv, v, 1 ) ) { return -5; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_classq.c b/lapack-netlib/LAPACKE/src/lapacke_classq.c index b8f231dbb..e4d746c5a 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_classq.c +++ b/lapack-netlib/LAPACKE/src/lapacke_classq.c @@ -39,7 +39,7 @@ lapack_int LAPACKE_classq( lapack_int n, lapack_complex_float* x, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input vector `x` and in/out scalars `scale` and `sumsq` for NaNs */ - if( LAPACKE_c_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_c_nancheck( n, x, incx ) ) { return -2; } if( LAPACKE_s_nancheck( 1, scale, 1 ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_csytrf_aa_work.c b/lapack-netlib/LAPACKE/src/lapacke_csytrf_aa_work.c index d4f24142b..f6661c85c 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_csytrf_aa_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_csytrf_aa_work.c @@ -41,7 +41,7 @@ lapack_int LAPACKE_csytrf_aa_work( int matrix_layout, char uplo, lapack_int n, lapack_int info = 0; if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ - LAPACK_csytrf( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); + LAPACK_csytrf_aa( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } @@ -56,7 +56,7 @@ lapack_int LAPACKE_csytrf_aa_work( int matrix_layout, char uplo, lapack_int n, } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { - LAPACK_csytrf( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_csytrf_aa( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); return (info < 0) ? (info - 1) : info; } /* Allocate memory for temporary array(s) */ @@ -69,7 +69,7 @@ lapack_int LAPACKE_csytrf_aa_work( int matrix_layout, char uplo, lapack_int n, /* Transpose input matrices */ LAPACKE_csy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - LAPACK_csytrf( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_csytrf_aa( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_cunmtr.c b/lapack-netlib/LAPACKE/src/lapacke_cunmtr.c index 1864c4121..d9fb2dca0 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cunmtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cunmtr.c @@ -58,7 +58,7 @@ lapack_int LAPACKE_cunmtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_cge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -10; } - if( LAPACKE_c_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_c_nancheck( r-1, tau, 1 ) ) { return -9; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_cupmtr.c b/lapack-netlib/LAPACKE/src/lapacke_cupmtr.c index 51f6d8276..ba026ae68 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_cupmtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_cupmtr.c @@ -58,7 +58,7 @@ lapack_int LAPACKE_cupmtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_cge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -9; } - if( LAPACKE_c_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_c_nancheck( r-1, tau, 1 ) ) { return -8; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dlarfb.c b/lapack-netlib/LAPACKE/src/lapacke_dlarfb.c index 55c26f4b6..a1f49dde1 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dlarfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dlarfb.c @@ -50,16 +50,21 @@ lapack_int LAPACKE_dlarfb( int matrix_layout, char side, char trans, char direct #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); - nrows_v = ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : - ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); + lapack_int lrv, lcv; /* row, column stride */ + if( matrix_layout == LAPACK_COL_MAJOR ) { + lrv = 1; + lcv = ldv; + } else { + lrv = ldv; + lcv = 1; + } + ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); + + nrows_v = ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'r' ) ) ? n : + ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); if( LAPACKE_dge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -13; } @@ -69,8 +74,8 @@ lapack_int LAPACKE_dlarfb( int matrix_layout, char side, char trans, char direct if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_dtr_nancheck( matrix_layout, 'l', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_dge_nancheck( matrix_layout, nrows_v-k, ncols_v, &v[k*ldv], - ldv ) ) + if( LAPACKE_dge_nancheck( matrix_layout, nrows_v-k, ncols_v, + &v[k*lrv], ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > nrows_v ) { @@ -78,23 +83,23 @@ lapack_int LAPACKE_dlarfb( int matrix_layout, char side, char trans, char direct return -8; } if( LAPACKE_dtr_nancheck( matrix_layout, 'u', 'u', k, - &v[(nrows_v-k)*ldv], ldv ) ) + &v[(nrows_v-k)*lrv], ldv ) ) return -9; if( LAPACKE_dge_nancheck( matrix_layout, nrows_v-k, ncols_v, v, ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_dtr_nancheck( matrix_layout, 'u', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_dge_nancheck( matrix_layout, nrows_v, ncols_v-k, &v[k], - ldv ) ) + if( LAPACKE_dge_nancheck( matrix_layout, nrows_v, ncols_v-k, + &v[k*lrv], ldv ) ) return -9; - } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { + } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > ncols_v ) { LAPACKE_xerbla( "LAPACKE_dlarfb", -8 ); return -8; } - if( LAPACKE_dtr_nancheck( matrix_layout, 'l', 'u', k, &v[ncols_v-k], - ldv ) ) + if( LAPACKE_dtr_nancheck( matrix_layout, 'l', 'u', k, + &v[(ncols_v-k)*lcv], ldv ) ) return -9; if( LAPACKE_dge_nancheck( matrix_layout, nrows_v, ncols_v-k, v, ldv ) ) return -9; diff --git a/lapack-netlib/LAPACKE/src/lapacke_dlarfg.c b/lapack-netlib/LAPACKE/src/lapacke_dlarfg.c index 0f627b323..df401c41d 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dlarfg.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dlarfg.c @@ -42,7 +42,7 @@ lapack_int LAPACKE_dlarfg( lapack_int n, double* alpha, double* x, if( LAPACKE_d_nancheck( 1, alpha, 1 ) ) { return -2; } - if( LAPACKE_d_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_d_nancheck( n-1, x, incx ) ) { return -3; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dlarfx.c b/lapack-netlib/LAPACKE/src/lapacke_dlarfx.c index ab4a58e76..7b7b7201e 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dlarfx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dlarfx.c @@ -37,6 +37,7 @@ lapack_int LAPACKE_dlarfx( int matrix_layout, char side, lapack_int m, lapack_int n, const double* v, double tau, double* c, lapack_int ldc, double* work ) { + lapack_int lv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_dlarfx", -1 ); return -1; @@ -50,7 +51,8 @@ lapack_int LAPACKE_dlarfx( int matrix_layout, char side, lapack_int m, if( LAPACKE_d_nancheck( 1, &tau, 1 ) ) { return -6; } - if( LAPACKE_d_nancheck( m, v, 1 ) ) { + lv = (LAPACKE_lsame( side, 'l' ) ? m : n); + if( LAPACKE_d_nancheck( lv, v, 1 ) ) { return -5; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dlassq.c b/lapack-netlib/LAPACKE/src/lapacke_dlassq.c index a564240d4..0e096b6d4 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dlassq.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dlassq.c @@ -38,7 +38,7 @@ lapack_int LAPACKE_dlassq( lapack_int n, double* x, lapack_int incx, double* sca #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input vector `x` and in/out scalars `scale` and `sumsq` for NaNs */ - if( LAPACKE_d_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_d_nancheck( n, x, incx ) ) { return -2; } if( LAPACKE_d_nancheck( 1, scale, 1 ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_dopmtr.c b/lapack-netlib/LAPACKE/src/lapacke_dopmtr.c index 93d3d3d30..7fbfb11fd 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dopmtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dopmtr.c @@ -56,7 +56,7 @@ lapack_int LAPACKE_dopmtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_dge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -9; } - if( LAPACKE_d_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_d_nancheck( r-1, tau, 1 ) ) { return -8; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dormtr.c b/lapack-netlib/LAPACKE/src/lapacke_dormtr.c index 05e4c57c8..db75a6609 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dormtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dormtr.c @@ -57,7 +57,7 @@ lapack_int LAPACKE_dormtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_dge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -10; } - if( LAPACKE_d_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_d_nancheck( r-1, tau, 1 ) ) { return -9; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_2stage_work.c b/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_2stage_work.c index 2cc7b9ad2..dbd6e9049 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_2stage_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_2stage_work.c @@ -50,7 +50,6 @@ lapack_int LAPACKE_dsytrf_aa_2stage_work( int matrix_layout, char uplo, lapack_i } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int lda_t = MAX(1,n); - lapack_int ldb_t = MAX(1,n); double* a_t = NULL; double* tb_t = NULL; /* Check leading dimension(s) */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_work.c b/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_work.c index cbf97b632..e72bfa6de 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_dsytrf_aa_work.c @@ -40,7 +40,7 @@ lapack_int LAPACKE_dsytrf_aa_work( int matrix_layout, char uplo, lapack_int n, lapack_int info = 0; if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ - LAPACK_dsytrf( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); + LAPACK_dsytrf_aa( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } @@ -55,7 +55,7 @@ lapack_int LAPACKE_dsytrf_aa_work( int matrix_layout, char uplo, lapack_int n, } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { - LAPACK_dsytrf( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_dsytrf_aa( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); return (info < 0) ? (info - 1) : info; } /* Allocate memory for temporary array(s) */ @@ -67,7 +67,7 @@ lapack_int LAPACKE_dsytrf_aa_work( int matrix_layout, char uplo, lapack_int n, /* Transpose input matrices */ LAPACKE_dsy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - LAPACK_dsytrf( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_dsytrf_aa( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_slarfb.c b/lapack-netlib/LAPACKE/src/lapacke_slarfb.c index 72fa75ef1..0ebdc931a 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_slarfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_slarfb.c @@ -50,16 +50,21 @@ lapack_int LAPACKE_slarfb( int matrix_layout, char side, char trans, char direct #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); - nrows_v = ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : - ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); + lapack_int lrv, lcv; /* row, column stride */ + if( matrix_layout == LAPACK_COL_MAJOR ) { + lrv = 1; + lcv = ldv; + } else { + lrv = ldv; + lcv = 1; + } + ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); + + nrows_v = ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'r' ) ) ? n : + ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); if( LAPACKE_sge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -13; } @@ -69,8 +74,8 @@ lapack_int LAPACKE_slarfb( int matrix_layout, char side, char trans, char direct if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_str_nancheck( matrix_layout, 'l', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_sge_nancheck( matrix_layout, nrows_v-k, ncols_v, &v[k*ldv], - ldv ) ) + if( LAPACKE_sge_nancheck( matrix_layout, nrows_v-k, ncols_v, + &v[k*lrv], ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > nrows_v ) { @@ -78,23 +83,23 @@ lapack_int LAPACKE_slarfb( int matrix_layout, char side, char trans, char direct return -8; } if( LAPACKE_str_nancheck( matrix_layout, 'u', 'u', k, - &v[(nrows_v-k)*ldv], ldv ) ) + &v[(nrows_v-k)*lrv], ldv ) ) return -9; if( LAPACKE_sge_nancheck( matrix_layout, nrows_v-k, ncols_v, v, ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_str_nancheck( matrix_layout, 'u', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_sge_nancheck( matrix_layout, nrows_v, ncols_v-k, &v[k], - ldv ) ) + if( LAPACKE_sge_nancheck( matrix_layout, nrows_v, ncols_v-k, + &v[k*lrv], ldv ) ) return -9; - } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { + } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > ncols_v ) { LAPACKE_xerbla( "LAPACKE_slarfb", -8 ); return -8; } - if( LAPACKE_str_nancheck( matrix_layout, 'l', 'u', k, &v[ncols_v-k], - ldv ) ) + if( LAPACKE_str_nancheck( matrix_layout, 'l', 'u', k, + &v[(ncols_v-k)*lcv], ldv ) ) return -9; if( LAPACKE_sge_nancheck( matrix_layout, nrows_v, ncols_v-k, v, ldv ) ) return -9; diff --git a/lapack-netlib/LAPACKE/src/lapacke_slarfg.c b/lapack-netlib/LAPACKE/src/lapacke_slarfg.c index 295277387..ea9a83575 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_slarfg.c +++ b/lapack-netlib/LAPACKE/src/lapacke_slarfg.c @@ -42,7 +42,7 @@ lapack_int LAPACKE_slarfg( lapack_int n, float* alpha, float* x, if( LAPACKE_s_nancheck( 1, alpha, 1 ) ) { return -2; } - if( LAPACKE_s_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_s_nancheck( n-1, x, incx ) ) { return -3; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_slarfx.c b/lapack-netlib/LAPACKE/src/lapacke_slarfx.c index 426137815..c2b797a98 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_slarfx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_slarfx.c @@ -37,6 +37,7 @@ lapack_int LAPACKE_slarfx( int matrix_layout, char side, lapack_int m, lapack_int n, const float* v, float tau, float* c, lapack_int ldc, float* work ) { + lapack_int lv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_slarfx", -1 ); return -1; @@ -50,7 +51,8 @@ lapack_int LAPACKE_slarfx( int matrix_layout, char side, lapack_int m, if( LAPACKE_s_nancheck( 1, &tau, 1 ) ) { return -6; } - if( LAPACKE_s_nancheck( m, v, 1 ) ) { + lv = (LAPACKE_lsame( side, 'l' ) ? m : n); + if( LAPACKE_s_nancheck( lv, v, 1 ) ) { return -5; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_slassq.c b/lapack-netlib/LAPACKE/src/lapacke_slassq.c index 668289e18..3e265e359 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_slassq.c +++ b/lapack-netlib/LAPACKE/src/lapacke_slassq.c @@ -38,7 +38,7 @@ lapack_int LAPACKE_slassq( lapack_int n, float* x, lapack_int incx, float* scale #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input vector `x` and in/out scalars `scale` and `sumsq` for NaNs */ - if( LAPACKE_s_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_s_nancheck( n, x, incx ) ) { return -2; } if( LAPACKE_s_nancheck( 1, scale, 1 ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_sopmtr.c b/lapack-netlib/LAPACKE/src/lapacke_sopmtr.c index 333789837..bf8eed4f9 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sopmtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sopmtr.c @@ -56,7 +56,7 @@ lapack_int LAPACKE_sopmtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_sge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -9; } - if( LAPACKE_s_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_s_nancheck( r-1, tau, 1 ) ) { return -8; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_sormtr.c b/lapack-netlib/LAPACKE/src/lapacke_sormtr.c index 5a9d44138..9f0e9fddf 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_sormtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_sormtr.c @@ -57,7 +57,7 @@ lapack_int LAPACKE_sormtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_sge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -10; } - if( LAPACKE_s_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_s_nancheck( r-1, tau, 1 ) ) { return -9; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_ssytrf_aa_work.c b/lapack-netlib/LAPACKE/src/lapacke_ssytrf_aa_work.c index d68cb17c1..182946a45 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_ssytrf_aa_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_ssytrf_aa_work.c @@ -40,7 +40,7 @@ lapack_int LAPACKE_ssytrf_aa_work( int matrix_layout, char uplo, lapack_int n, lapack_int info = 0; if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ - LAPACK_ssytrf( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); + LAPACK_ssytrf_aa( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } @@ -55,7 +55,7 @@ lapack_int LAPACKE_ssytrf_aa_work( int matrix_layout, char uplo, lapack_int n, } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { - LAPACK_ssytrf( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_ssytrf_aa( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); return (info < 0) ? (info - 1) : info; } /* Allocate memory for temporary array(s) */ @@ -67,7 +67,7 @@ lapack_int LAPACKE_ssytrf_aa_work( int matrix_layout, char uplo, lapack_int n, /* Transpose input matrices */ LAPACKE_ssy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - LAPACK_ssytrf( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_ssytrf_aa( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_2stage_work.c b/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_2stage_work.c index 5b8010d9e..b9ba0fb56 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_2stage_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_2stage_work.c @@ -50,7 +50,6 @@ lapack_int LAPACKE_zhetrf_aa_2stage_work( int matrix_layout, char uplo, lapack_i } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int lda_t = MAX(1,n); - lapack_int ldb_t = MAX(1,n); lapack_complex_double* a_t = NULL; lapack_complex_double* tb_t = NULL; /* Check leading dimension(s) */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_work.c b/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_work.c index 5214217fb..dbad2d81e 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zhetrf_aa_work.c @@ -41,7 +41,7 @@ lapack_int LAPACKE_zhetrf_aa_work( int matrix_layout, char uplo, lapack_int n, lapack_int info = 0; if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ - LAPACK_zhetrf( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); + LAPACK_zhetrf_aa( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } @@ -56,7 +56,7 @@ lapack_int LAPACKE_zhetrf_aa_work( int matrix_layout, char uplo, lapack_int n, } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { - LAPACK_zhetrf( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_zhetrf_aa( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); return (info < 0) ? (info - 1) : info; } /* Allocate memory for temporary array(s) */ @@ -69,7 +69,7 @@ lapack_int LAPACKE_zhetrf_aa_work( int matrix_layout, char uplo, lapack_int n, /* Transpose input matrices */ LAPACKE_zhe_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - LAPACK_zhetrf( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_zhetrf_aa( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zlacgv.c b/lapack-netlib/LAPACKE/src/lapacke_zlacgv.c index 3b1130ba5..cd412dc24 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zlacgv.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zlacgv.c @@ -39,7 +39,7 @@ lapack_int LAPACKE_zlacgv( lapack_int n, lapack_complex_double* x, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - if( LAPACKE_z_nancheck( 1+(n-1)*ABS(incx), x, incx ) ) { + if( LAPACKE_z_nancheck( n, x, incx ) ) { return -2; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zlarfb.c b/lapack-netlib/LAPACKE/src/lapacke_zlarfb.c index 6ea4960f3..4fc2eb0ab 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zlarfb.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zlarfb.c @@ -51,16 +51,21 @@ lapack_int LAPACKE_zlarfb( int matrix_layout, char side, char trans, char direct #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input matrices for NaNs */ - ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'r' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); - nrows_v = ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'l' ) ) ? m : - ( ( LAPACKE_lsame( storev, 'c' ) && - LAPACKE_lsame( side, 'r' ) ) ? n : - ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); + lapack_int lrv, lcv; /* row, column stride */ + if( matrix_layout == LAPACK_COL_MAJOR ) { + lrv = 1; + lcv = ldv; + } else { + lrv = ldv; + lcv = 1; + } + ncols_v = LAPACKE_lsame( storev, 'c' ) ? k : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( side, 'r' ) ) ? n : 1) ); + + nrows_v = ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'l' ) ) ? m : + ( ( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( side, 'r' ) ) ? n : + ( LAPACKE_lsame( storev, 'r' ) ? k : 1) ); if( LAPACKE_zge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -13; } @@ -70,8 +75,8 @@ lapack_int LAPACKE_zlarfb( int matrix_layout, char side, char trans, char direct if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_ztr_nancheck( matrix_layout, 'l', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_zge_nancheck( matrix_layout, nrows_v-k, ncols_v, &v[k*ldv], - ldv ) ) + if( LAPACKE_zge_nancheck( matrix_layout, nrows_v-k, ncols_v, + &v[k*lrv], ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'c' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > nrows_v ) { @@ -79,23 +84,23 @@ lapack_int LAPACKE_zlarfb( int matrix_layout, char side, char trans, char direct return -8; } if( LAPACKE_ztr_nancheck( matrix_layout, 'u', 'u', k, - &v[(nrows_v-k)*ldv], ldv ) ) + &v[(nrows_v-k)*lrv], ldv ) ) return -9; if( LAPACKE_zge_nancheck( matrix_layout, nrows_v-k, ncols_v, v, ldv ) ) return -9; } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { if( LAPACKE_ztr_nancheck( matrix_layout, 'u', 'u', k, v, ldv ) ) return -9; - if( LAPACKE_zge_nancheck( matrix_layout, nrows_v, ncols_v-k, &v[k], - ldv ) ) + if( LAPACKE_zge_nancheck( matrix_layout, nrows_v, ncols_v-k, + &v[k*lrv], ldv ) ) return -9; - } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'f' ) ) { + } else if( LAPACKE_lsame( storev, 'r' ) && LAPACKE_lsame( direct, 'b' ) ) { if( k > ncols_v ) { LAPACKE_xerbla( "LAPACKE_zlarfb", -8 ); return -8; } - if( LAPACKE_ztr_nancheck( matrix_layout, 'l', 'u', k, &v[ncols_v-k], - ldv ) ) + if( LAPACKE_ztr_nancheck( matrix_layout, 'l', 'u', k, + &v[(ncols_v-k)*lcv], ldv ) ) return -9; if( LAPACKE_zge_nancheck( matrix_layout, nrows_v, ncols_v-k, v, ldv ) ) return -9; diff --git a/lapack-netlib/LAPACKE/src/lapacke_zlarfg.c b/lapack-netlib/LAPACKE/src/lapacke_zlarfg.c index 14e587fcc..a566a08cb 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zlarfg.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zlarfg.c @@ -43,7 +43,7 @@ lapack_int LAPACKE_zlarfg( lapack_int n, lapack_complex_double* alpha, if( LAPACKE_z_nancheck( 1, alpha, 1 ) ) { return -2; } - if( LAPACKE_z_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_z_nancheck( n-1, x, incx ) ) { return -3; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zlarfx.c b/lapack-netlib/LAPACKE/src/lapacke_zlarfx.c index 1dd1f5204..b4ebf727e 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zlarfx.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zlarfx.c @@ -38,6 +38,7 @@ lapack_int LAPACKE_zlarfx( int matrix_layout, char side, lapack_int m, lapack_complex_double tau, lapack_complex_double* c, lapack_int ldc, lapack_complex_double* work ) { + lapack_int lv; if( matrix_layout != LAPACK_COL_MAJOR && matrix_layout != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_zlarfx", -1 ); return -1; @@ -51,7 +52,8 @@ lapack_int LAPACKE_zlarfx( int matrix_layout, char side, lapack_int m, if( LAPACKE_z_nancheck( 1, &tau, 1 ) ) { return -6; } - if( LAPACKE_z_nancheck( m, v, 1 ) ) { + lv = (LAPACKE_lsame( side, 'l' ) ? m : n); + if( LAPACKE_z_nancheck( lv, v, 1 ) ) { return -5; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zlassq.c b/lapack-netlib/LAPACKE/src/lapacke_zlassq.c index a218c9b62..b8972b974 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zlassq.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zlassq.c @@ -39,7 +39,7 @@ lapack_int LAPACKE_zlassq( lapack_int n, lapack_complex_double* x, #ifndef LAPACK_DISABLE_NAN_CHECK if( LAPACKE_get_nancheck() ) { /* Optionally check input vector `x` and in/out scalars `scale` and `sumsq` for NaNs */ - if( LAPACKE_z_nancheck( 1+(n-2)*ABS(incx), x, incx ) ) { + if( LAPACKE_z_nancheck( n, x, incx ) ) { return -2; } if( LAPACKE_d_nancheck( 1, scale, 1 ) ) { diff --git a/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_2stage_work.c b/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_2stage_work.c index f91c42257..db27e2873 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_2stage_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_2stage_work.c @@ -50,7 +50,6 @@ lapack_int LAPACKE_zsytrf_aa_2stage_work( int matrix_layout, char uplo, lapack_i } } else if( matrix_layout == LAPACK_ROW_MAJOR ) { lapack_int lda_t = MAX(1,n); - lapack_int ldb_t = MAX(1,n); lapack_complex_double* a_t = NULL; lapack_complex_double* tb_t = NULL; /* Check leading dimension(s) */ diff --git a/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_work.c b/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_work.c index 29d75319e..03726c63e 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_work.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zsytrf_aa_work.c @@ -41,7 +41,7 @@ lapack_int LAPACKE_zsytrf_aa_work( int matrix_layout, char uplo, lapack_int n, lapack_int info = 0; if( matrix_layout == LAPACK_COL_MAJOR ) { /* Call LAPACK function and adjust info */ - LAPACK_zsytrf( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); + LAPACK_zsytrf_aa( &uplo, &n, a, &lda, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } @@ -56,7 +56,7 @@ lapack_int LAPACKE_zsytrf_aa_work( int matrix_layout, char uplo, lapack_int n, } /* Query optimal working array(s) size if requested */ if( lwork == -1 ) { - LAPACK_zsytrf( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_zsytrf_aa( &uplo, &n, a, &lda_t, ipiv, work, &lwork, &info ); return (info < 0) ? (info - 1) : info; } /* Allocate memory for temporary array(s) */ @@ -69,7 +69,7 @@ lapack_int LAPACKE_zsytrf_aa_work( int matrix_layout, char uplo, lapack_int n, /* Transpose input matrices */ LAPACKE_zsy_trans( matrix_layout, uplo, n, a, lda, a_t, lda_t ); /* Call LAPACK function and adjust info */ - LAPACK_zsytrf( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); + LAPACK_zsytrf_aa( &uplo, &n, a_t, &lda_t, ipiv, work, &lwork, &info ); if( info < 0 ) { info = info - 1; } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zunmtr.c b/lapack-netlib/LAPACKE/src/lapacke_zunmtr.c index f8936cd5a..433385440 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zunmtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zunmtr.c @@ -58,7 +58,7 @@ lapack_int LAPACKE_zunmtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_zge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -10; } - if( LAPACKE_z_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_z_nancheck( r-1, tau, 1 ) ) { return -9; } } diff --git a/lapack-netlib/LAPACKE/src/lapacke_zupmtr.c b/lapack-netlib/LAPACKE/src/lapacke_zupmtr.c index d735c5561..80bbd9529 100644 --- a/lapack-netlib/LAPACKE/src/lapacke_zupmtr.c +++ b/lapack-netlib/LAPACKE/src/lapacke_zupmtr.c @@ -58,7 +58,7 @@ lapack_int LAPACKE_zupmtr( int matrix_layout, char side, char uplo, char trans, if( LAPACKE_zge_nancheck( matrix_layout, m, n, c, ldc ) ) { return -9; } - if( LAPACKE_z_nancheck( m-1, tau, 1 ) ) { + if( LAPACKE_z_nancheck( r-1, tau, 1 ) ) { return -8; } } diff --git a/lapack-netlib/SRC/Makefile b/lapack-netlib/SRC/Makefile index 9cc2ea51b..87a8f51e4 100644 --- a/lapack-netlib/SRC/Makefile +++ b/lapack-netlib/SRC/Makefile @@ -533,7 +533,9 @@ ZLASRC = $(filter-out $(ZLAPACKOBJS),$(ZLASRC_O)) DSLASRC = $(filter-out $(SLAPACKOBJS),$(DSLASRC_O)) ZCLASRC = $(filter-out $(CLAPACKOBJS),$(ZCLASRC_O)) -OPTS1 = $(filter-out -fopenmp, $(OPTS)) +#from commit 1046, supposedly related to mingw but breaks thread safety +#in insiduous ways on all platforms when used in place of OPTS below +#OPTS1 = $(filter-out -fopenmp, $(OPTS)) #end filter out @@ -550,6 +552,8 @@ endif all: ../$(LAPACKLIB) +.PHONY: ../$(LAPACKLIB) + ../$(LAPACKLIB): $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) $(ARCH) $(ARCHFLAGS) $@ $(ALLOBJ) $(ALLXOBJ) $(DEPRECATED) $(RANLIB) $@ @@ -597,10 +601,10 @@ clean: rm -f *.o DEPRECATED/*.o .f.o: - $(FORTRAN) $(OPTS1) -c -o $@ $< + $(FORTRAN) $(OPTS) -c -o $@ $< .F.o: - $(FORTRAN) $(OPTS1) -c $< -o $@ + $(FORTRAN) $(OPTS) -c $< -o $@ slaruv.o: slaruv.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< dlaruv.o: dlaruv.f ; $(FORTRAN) $(NOOPT) -c -o $@ $< diff --git a/lapack-netlib/SRC/cgejsv.f b/lapack-netlib/SRC/cgejsv.f index 8eb43cf50..a7b1c451c 100644 --- a/lapack-netlib/SRC/cgejsv.f +++ b/lapack-netlib/SRC/cgejsv.f @@ -701,7 +701,7 @@ LWSVDJ = MAX( 2 * N, 1 ) LWSVDJV = MAX( 2 * N, 1 ) * .. minimal REAL workspace length for CGEQP3, CPOCON, CGESVJ - LRWQP3 = N + LRWQP3 = 2 * N LRWCON = N LRWSVDJ = N IF ( LQUERY ) THEN @@ -939,7 +939,7 @@ END IF END IF MINWRK = MAX( 2, MINWRK ) - OPTWRK = MAX( 2, OPTWRK ) + OPTWRK = MAX( OPTWRK, MINWRK ) IF ( LWORK .LT. MINWRK .AND. (.NOT.LQUERY) ) INFO = - 17 IF ( LRWORK .LT. MINRWRK .AND. (.NOT.LQUERY) ) INFO = - 19 END IF diff --git a/lapack-netlib/SRC/chesv_aa.f b/lapack-netlib/SRC/chesv_aa.f index 0bf636b48..470f910bc 100644 --- a/lapack-netlib/SRC/chesv_aa.f +++ b/lapack-netlib/SRC/chesv_aa.f @@ -209,6 +209,8 @@ INFO = -5 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -8 + ELSE IF( LWORK.LT.MAX( 2*N, 3*N-2 ) .AND. .NOT.LQUERY ) THEN + INFO = -10 END IF * IF( INFO.EQ.0 ) THEN @@ -219,9 +221,6 @@ LWKOPT_HETRS = INT( WORK(1) ) LWKOPT = MAX( LWKOPT_HETRF, LWKOPT_HETRS ) WORK( 1 ) = LWKOPT - IF( LWORK.LT.LWKOPT .AND. .NOT.LQUERY ) THEN - INFO = -10 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/chesv_aa_2stage.f b/lapack-netlib/SRC/chesv_aa_2stage.f index 057d9c57a..05f6b7bb7 100644 --- a/lapack-netlib/SRC/chesv_aa_2stage.f +++ b/lapack-netlib/SRC/chesv_aa_2stage.f @@ -105,6 +105,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -124,7 +125,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -150,6 +151,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -233,19 +235,18 @@ INFO = -3 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -5 + ELSE IF( LTB.LT.( 4*N ) .AND. .NOT.TQUERY ) THEN + INFO = -7 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -11 + ELSE IF( LWORK.LT.N .AND. .NOT.WQUERY ) THEN + INFO = -13 END IF * IF( INFO.EQ.0 ) THEN CALL CHETRF_AA_2STAGE( UPLO, N, A, LDA, TB, -1, IPIV, $ IPIV2, WORK, -1, INFO ) LWKOPT = INT( WORK(1) ) - IF( LTB.LT.INT( TB(1) ) .AND. .NOT.TQUERY ) THEN - INFO = -7 - ELSE IF( LWORK.LT.LWKOPT .AND. .NOT.WQUERY ) THEN - INFO = -13 - END IF END IF * IF( INFO.NE.0 ) THEN @@ -270,6 +271,8 @@ END IF * WORK( 1 ) = LWKOPT +* + RETURN * * End of CHESV_AA_2STAGE * diff --git a/lapack-netlib/SRC/chetrd_hb2st.F b/lapack-netlib/SRC/chetrd_hb2st.F index b96e5db95..43da45640 100644 --- a/lapack-netlib/SRC/chetrd_hb2st.F +++ b/lapack-netlib/SRC/chetrd_hb2st.F @@ -280,8 +280,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -297,9 +297,9 @@ * * Determine the block size, the workspace size and the hous size. * - IB = ILAENV( 18, 'CHETRD_HB2ST', VECT, N, KD, -1, -1 ) - LHMIN = ILAENV( 19, 'CHETRD_HB2ST', VECT, N, KD, IB, -1 ) - LWMIN = ILAENV( 20, 'CHETRD_HB2ST', VECT, N, KD, IB, -1 ) + IB = ILAENV2STAGE( 2, 'CHETRD_HB2ST', VECT, N, KD, -1, -1 ) + LHMIN = ILAENV2STAGE( 3, 'CHETRD_HB2ST', VECT, N, KD, IB, -1 ) + LWMIN = ILAENV2STAGE( 4, 'CHETRD_HB2ST', VECT, N, KD, IB, -1 ) * IF( .NOT.AFTERS1 .AND. .NOT.LSAME( STAGE1, 'N' ) ) THEN INFO = -1 @@ -512,7 +512,7 @@ C END IF * * Call the kernel * -#if defined(_OPENMP) +#if defined(_OPENMP) && _OPENMP >= 201307 IF( TTYPE.NE.1 ) THEN !$OMP TASK DEPEND(in:WORK(MYID+SHIFT-1)) !$OMP$ DEPEND(in:WORK(MYID-1)) diff --git a/lapack-netlib/SRC/chetrd_he2hb.f b/lapack-netlib/SRC/chetrd_he2hb.f index fd8c3fbe0..e334532fe 100644 --- a/lapack-netlib/SRC/chetrd_he2hb.f +++ b/lapack-netlib/SRC/chetrd_he2hb.f @@ -285,8 +285,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -296,7 +296,7 @@ INFO = 0 UPPER = LSAME( UPLO, 'U' ) LQUERY = ( LWORK.EQ.-1 ) - LWMIN = ILAENV( 20, 'CHETRD_HE2HB', '', N, KD, -1, -1 ) + LWMIN = ILAENV2STAGE( 4, 'CHETRD_HE2HB', '', N, KD, -1, -1 ) IF( .NOT.UPPER .AND. .NOT.LSAME( UPLO, 'L' ) ) THEN INFO = -1 diff --git a/lapack-netlib/SRC/chetrf_aa_2stage.f b/lapack-netlib/SRC/chetrf_aa_2stage.f index 0fa2ae3a0..ce34d73cc 100644 --- a/lapack-netlib/SRC/chetrf_aa_2stage.f +++ b/lapack-netlib/SRC/chetrf_aa_2stage.f @@ -93,6 +93,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -112,7 +113,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -125,6 +126,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -658,6 +660,8 @@ c $ (J+1)*NB+1, (J+1)*NB+KB, IPIV, 1 ) * * Factor the band matrix CALL CGBTRF( N, N, NB, NB, TB, LDTB, IPIV2, INFO ) +* + RETURN * * End of CHETRF_AA_2STAGE * diff --git a/lapack-netlib/SRC/chetrs_aa_2stage.f b/lapack-netlib/SRC/chetrs_aa_2stage.f index 3f8576673..05d09275b 100644 --- a/lapack-netlib/SRC/chetrs_aa_2stage.f +++ b/lapack-netlib/SRC/chetrs_aa_2stage.f @@ -87,6 +87,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N. *> \endverbatim *> diff --git a/lapack-netlib/SRC/cla_syamv.f b/lapack-netlib/SRC/cla_syamv.f index e1d3df960..695b5e478 100644 --- a/lapack-netlib/SRC/cla_syamv.f +++ b/lapack-netlib/SRC/cla_syamv.f @@ -241,7 +241,7 @@ INFO = 10 END IF IF( INFO.NE.0 )THEN - CALL XERBLA( 'SSYMV ', INFO ) + CALL XERBLA( 'CLA_SYAMV', INFO ) RETURN END IF * diff --git a/lapack-netlib/SRC/claqr1.f b/lapack-netlib/SRC/claqr1.f index b76bedf60..977947196 100644 --- a/lapack-netlib/SRC/claqr1.f +++ b/lapack-netlib/SRC/claqr1.f @@ -142,6 +142,13 @@ CABS1( CDUM ) = ABS( REAL( CDUM ) ) + ABS( AIMAG( CDUM ) ) * .. * .. Executable Statements .. +* +* Quick return if possible +* + IF( N.NE.2 .AND. N.NE.3 ) THEN + RETURN + END IF +* IF( N.EQ.2 ) THEN S = CABS1( H( 1, 1 )-S2 ) + CABS1( H( 2, 1 ) ) IF( S.EQ.RZERO ) THEN diff --git a/lapack-netlib/SRC/csysv_aa.f b/lapack-netlib/SRC/csysv_aa.f index 9cd669d33..87be734cc 100644 --- a/lapack-netlib/SRC/csysv_aa.f +++ b/lapack-netlib/SRC/csysv_aa.f @@ -221,9 +221,6 @@ LWKOPT_SYTRS = INT( WORK(1) ) LWKOPT = MAX( LWKOPT_SYTRF, LWKOPT_SYTRS ) WORK( 1 ) = LWKOPT - IF( LWORK.LT.LWKOPT .AND. .NOT.LQUERY ) THEN - INFO = -10 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/csysv_aa_2stage.f b/lapack-netlib/SRC/csysv_aa_2stage.f index cba57fc3e..a13349824 100644 --- a/lapack-netlib/SRC/csysv_aa_2stage.f +++ b/lapack-netlib/SRC/csysv_aa_2stage.f @@ -105,6 +105,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -124,7 +125,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -150,6 +151,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -233,19 +235,18 @@ INFO = -3 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -5 + ELSE IF( LTB.LT.( 4*N ) .AND. .NOT.TQUERY ) THEN + INFO = -7 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -11 + ELSE IF( LWORK.LT.N .AND. .NOT.WQUERY ) THEN + INFO = -13 END IF * IF( INFO.EQ.0 ) THEN CALL CSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, -1, IPIV, $ IPIV2, WORK, -1, INFO ) LWKOPT = INT( WORK(1) ) - IF( LTB.LT.INT( TB(1) ) .AND. .NOT.TQUERY ) THEN - INFO = -7 - ELSE IF( LWORK.LT.LWKOPT .AND. .NOT.WQUERY ) THEN - INFO = -13 - END IF END IF * IF( INFO.NE.0 ) THEN @@ -270,6 +271,8 @@ END IF * WORK( 1 ) = LWKOPT +* + RETURN * * End of CSYSV_AA_2STAGE * diff --git a/lapack-netlib/SRC/csytrf_aa_2stage.f b/lapack-netlib/SRC/csytrf_aa_2stage.f index 0a6bfbe31..0d0bd156c 100644 --- a/lapack-netlib/SRC/csytrf_aa_2stage.f +++ b/lapack-netlib/SRC/csytrf_aa_2stage.f @@ -93,6 +93,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -112,7 +113,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -125,6 +126,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -662,6 +664,8 @@ c $ (J+1)*NB+1, (J+1)*NB+KB, IPIV, 1 ) * * Factor the band matrix CALL CGBTRF( N, N, NB, NB, TB, LDTB, IPIV2, INFO ) +* + RETURN * * End of CSYTRF_AA_2STAGE * diff --git a/lapack-netlib/SRC/csytri2.f b/lapack-netlib/SRC/csytri2.f index 4c6baaa3e..4bd8e4f99 100644 --- a/lapack-netlib/SRC/csytri2.f +++ b/lapack-netlib/SRC/csytri2.f @@ -96,11 +96,11 @@ *> LWORK is INTEGER *> The dimension of the array WORK. *> WORK is size >= (N+NB+1)*(NB+3) -*> If LDWORK = -1, then a workspace query is assumed; the routine +*> If LWORK = -1, then a workspace query is assumed; the routine *> calculates: *> - the optimal size of the WORK array, returns *> this value as the first entry of the WORK array, -*> - and no error message related to LDWORK is issued by XERBLA. +*> - and no error message related to LWORK is issued by XERBLA. *> \endverbatim *> *> \param[out] INFO @@ -163,7 +163,7 @@ UPPER = LSAME( UPLO, 'U' ) LQUERY = ( LWORK.EQ.-1 ) * Get blocksize - NBMAX = ILAENV( 1, 'CSYTRF', UPLO, N, -1, -1, -1 ) + NBMAX = ILAENV( 1, 'CSYTRI2', UPLO, N, -1, -1, -1 ) IF ( NBMAX .GE. N ) THEN MINSIZE = N ELSE diff --git a/lapack-netlib/SRC/csytrs_aa_2stage.f b/lapack-netlib/SRC/csytrs_aa_2stage.f index 03bccda82..d025c08fe 100644 --- a/lapack-netlib/SRC/csytrs_aa_2stage.f +++ b/lapack-netlib/SRC/csytrs_aa_2stage.f @@ -85,6 +85,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N. *> \endverbatim *> diff --git a/lapack-netlib/SRC/ctrevc3.f b/lapack-netlib/SRC/ctrevc3.f index c06b40477..a134c1a50 100644 --- a/lapack-netlib/SRC/ctrevc3.f +++ b/lapack-netlib/SRC/ctrevc3.f @@ -27,8 +27,8 @@ * .. * .. Array Arguments .. * LOGICAL SELECT( * ) -* REAL RWORK( * ) -* COMPLEX T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), +* REAL RWORK( * ) +* COMPLEX T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), * $ WORK( * ) * .. * @@ -258,17 +258,17 @@ * .. * .. Array Arguments .. LOGICAL SELECT( * ) - REAL RWORK( * ) - COMPLEX T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), + REAL RWORK( * ) + COMPLEX T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), $ WORK( * ) * .. * * ===================================================================== * * .. Parameters .. - REAL ZERO, ONE + REAL ZERO, ONE PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) - COMPLEX CZERO, CONE + COMPLEX CZERO, CONE PARAMETER ( CZERO = ( 0.0E+0, 0.0E+0 ), $ CONE = ( 1.0E+0, 0.0E+0 ) ) INTEGER NBMIN, NBMAX @@ -277,13 +277,13 @@ * .. Local Scalars .. LOGICAL ALLV, BOTHV, LEFTV, LQUERY, OVER, RIGHTV, SOMEV INTEGER I, II, IS, J, K, KI, IV, MAXWRK, NB - REAL OVFL, REMAX, SCALE, SMIN, SMLNUM, ULP, UNFL - COMPLEX CDUM + REAL OVFL, REMAX, SCALE, SMIN, SMLNUM, ULP, UNFL + COMPLEX CDUM * .. * .. External Functions .. LOGICAL LSAME INTEGER ILAENV, ICAMAX - REAL SLAMCH, SCASUM + REAL SLAMCH, SCASUM EXTERNAL LSAME, ILAENV, ICAMAX, SLAMCH, SCASUM * .. * .. External Subroutines .. diff --git a/lapack-netlib/SRC/dgelqt.f b/lapack-netlib/SRC/dgelqt.f index 2124f3dc3..5b4ee65b5 100644 --- a/lapack-netlib/SRC/dgelqt.f +++ b/lapack-netlib/SRC/dgelqt.f @@ -158,7 +158,7 @@ INTEGER I, IB, IINFO, K * .. * .. External Subroutines .. - EXTERNAL DGEQRT2, DGELQT3, DGEQRT3, DLARFB, XERBLA + EXTERNAL DGELQT3, DLARFB, XERBLA * .. * .. Executable Statements .. * diff --git a/lapack-netlib/SRC/dla_syamv.f b/lapack-netlib/SRC/dla_syamv.f index 29566a6e9..bb6dbe288 100644 --- a/lapack-netlib/SRC/dla_syamv.f +++ b/lapack-netlib/SRC/dla_syamv.f @@ -230,7 +230,7 @@ INFO = 10 END IF IF( INFO.NE.0 )THEN - CALL XERBLA( 'DSYMV ', INFO ) + CALL XERBLA( 'DLA_SYAMV', INFO ) RETURN END IF * diff --git a/lapack-netlib/SRC/dlaqr1.f b/lapack-netlib/SRC/dlaqr1.f index 81a462fb3..795b072ab 100644 --- a/lapack-netlib/SRC/dlaqr1.f +++ b/lapack-netlib/SRC/dlaqr1.f @@ -147,6 +147,13 @@ INTRINSIC ABS * .. * .. Executable Statements .. +* +* Quick return if possible +* + IF( N.NE.2 .AND. N.NE.3 ) THEN + RETURN + END IF +* IF( N.EQ.2 ) THEN S = ABS( H( 1, 1 )-SR2 ) + ABS( SI2 ) + ABS( H( 2, 1 ) ) IF( S.EQ.ZERO ) THEN diff --git a/lapack-netlib/SRC/dsysv_aa.f b/lapack-netlib/SRC/dsysv_aa.f index cbccd5e65..7192928c6 100644 --- a/lapack-netlib/SRC/dsysv_aa.f +++ b/lapack-netlib/SRC/dsysv_aa.f @@ -221,9 +221,6 @@ LWKOPT_SYTRS = INT( WORK(1) ) LWKOPT = MAX( LWKOPT_SYTRF, LWKOPT_SYTRS ) WORK( 1 ) = LWKOPT - IF( LWORK.LT.LWKOPT .AND. .NOT.LQUERY ) THEN - INFO = -10 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/dsysv_aa_2stage.f b/lapack-netlib/SRC/dsysv_aa_2stage.f index ac3c77d76..05e538f0b 100644 --- a/lapack-netlib/SRC/dsysv_aa_2stage.f +++ b/lapack-netlib/SRC/dsysv_aa_2stage.f @@ -107,6 +107,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -126,7 +127,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -152,6 +153,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -235,19 +237,18 @@ INFO = -3 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -5 + ELSE IF( LTB.LT.( 4*N ) .AND. .NOT.TQUERY ) THEN + INFO = -7 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -11 + ELSE IF( LWORK.LT.N .AND. .NOT.WQUERY ) THEN + INFO = -13 END IF * IF( INFO.EQ.0 ) THEN CALL DSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, -1, IPIV, $ IPIV2, WORK, -1, INFO ) LWKOPT = INT( WORK(1) ) - IF( LTB.LT.INT( TB(1) ) .AND. .NOT.TQUERY ) THEN - INFO = -7 - ELSE IF( LWORK.LT.LWKOPT .AND. .NOT.WQUERY ) THEN - INFO = -13 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/dsytrd_sb2st.F b/lapack-netlib/SRC/dsytrd_sb2st.F index 1e860004e..4d81fe226 100644 --- a/lapack-netlib/SRC/dsytrd_sb2st.F +++ b/lapack-netlib/SRC/dsytrd_sb2st.F @@ -277,8 +277,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -294,9 +294,9 @@ * * Determine the block size, the workspace size and the hous size. * - IB = ILAENV( 18, 'DSYTRD_SB2ST', VECT, N, KD, -1, -1 ) - LHMIN = ILAENV( 19, 'DSYTRD_SB2ST', VECT, N, KD, IB, -1 ) - LWMIN = ILAENV( 20, 'DSYTRD_SB2ST', VECT, N, KD, IB, -1 ) + IB = ILAENV2STAGE( 2, 'DSYTRD_SB2ST', VECT, N, KD, -1, -1 ) + LHMIN = ILAENV2STAGE( 3, 'DSYTRD_SB2ST', VECT, N, KD, IB, -1 ) + LWMIN = ILAENV2STAGE( 4, 'DSYTRD_SB2ST', VECT, N, KD, IB, -1 ) * IF( .NOT.AFTERS1 .AND. .NOT.LSAME( STAGE1, 'N' ) ) THEN INFO = -1 @@ -481,7 +481,7 @@ * * Call the kernel * -#if defined(_OPENMP) +#if defined(_OPENMP) && _OPENMP >= 201307 IF( TTYPE.NE.1 ) THEN !$OMP TASK DEPEND(in:WORK(MYID+SHIFT-1)) !$OMP$ DEPEND(in:WORK(MYID-1)) diff --git a/lapack-netlib/SRC/dsytrd_sy2sb.f b/lapack-netlib/SRC/dsytrd_sy2sb.f index 85337f792..e0a5debc5 100644 --- a/lapack-netlib/SRC/dsytrd_sy2sb.f +++ b/lapack-netlib/SRC/dsytrd_sy2sb.f @@ -285,8 +285,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -296,7 +296,7 @@ INFO = 0 UPPER = LSAME( UPLO, 'U' ) LQUERY = ( LWORK.EQ.-1 ) - LWMIN = ILAENV( 20, 'DSYTRD_SY2SB', '', N, KD, -1, -1 ) + LWMIN = ILAENV2STAGE( 4, 'DSYTRD_SY2SB', '', N, KD, -1, -1 ) IF( .NOT.UPPER .AND. .NOT.LSAME( UPLO, 'L' ) ) THEN INFO = -1 diff --git a/lapack-netlib/SRC/dsytrf_aa_2stage.f b/lapack-netlib/SRC/dsytrf_aa_2stage.f index 299130564..25fc1a2eb 100644 --- a/lapack-netlib/SRC/dsytrf_aa_2stage.f +++ b/lapack-netlib/SRC/dsytrf_aa_2stage.f @@ -93,6 +93,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -109,6 +110,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -128,10 +130,10 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the -*> row and column IPIV(k). +*> row and column IPIV2(k). *> \endverbatim *> *> \param[out] INFO @@ -191,7 +193,7 @@ EXTERNAL LSAME, ILAENV * .. * .. External Subroutines .. - EXTERNAL XERBLA, DCOPY, DLACGV, DLACPY, + EXTERNAL XERBLA, DCOPY, DLACPY, $ DLASET, DGBTRF, DGEMM, DGETRF, $ DSYGST, DSWAP, DTRSM * .. @@ -641,6 +643,8 @@ c $ (J+1)*NB+1, (J+1)*NB+KB, IPIV, 1 ) * * Factor the band matrix CALL DGBTRF( N, N, NB, NB, TB, LDTB, IPIV2, INFO ) +* + RETURN * * End of DSYTRF_AA_2STAGE * diff --git a/lapack-netlib/SRC/dsytri2.f b/lapack-netlib/SRC/dsytri2.f index 9aa21a854..23f8b9fa2 100644 --- a/lapack-netlib/SRC/dsytri2.f +++ b/lapack-netlib/SRC/dsytri2.f @@ -96,11 +96,11 @@ *> LWORK is INTEGER *> The dimension of the array WORK. *> WORK is size >= (N+NB+1)*(NB+3) -*> If LDWORK = -1, then a workspace query is assumed; the routine +*> If LWORK = -1, then a workspace query is assumed; the routine *> calculates: *> - the optimal size of the WORK array, returns *> this value as the first entry of the WORK array, -*> - and no error message related to LDWORK is issued by XERBLA. +*> - and no error message related to LWORK is issued by XERBLA. *> \endverbatim *> *> \param[out] INFO @@ -163,7 +163,7 @@ UPPER = LSAME( UPLO, 'U' ) LQUERY = ( LWORK.EQ.-1 ) * Get blocksize - NBMAX = ILAENV( 1, 'DSYTRF', UPLO, N, -1, -1, -1 ) + NBMAX = ILAENV( 1, 'DSYTRI2', UPLO, N, -1, -1, -1 ) IF ( NBMAX .GE. N ) THEN MINSIZE = N ELSE diff --git a/lapack-netlib/SRC/dsytrs_aa_2stage.f b/lapack-netlib/SRC/dsytrs_aa_2stage.f index caff5d4ad..bb283cb95 100644 --- a/lapack-netlib/SRC/dsytrs_aa_2stage.f +++ b/lapack-netlib/SRC/dsytrs_aa_2stage.f @@ -85,6 +85,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N. *> \endverbatim *> diff --git a/lapack-netlib/SRC/dtrevc3.f b/lapack-netlib/SRC/dtrevc3.f index 745f636d0..957baf4f0 100644 --- a/lapack-netlib/SRC/dtrevc3.f +++ b/lapack-netlib/SRC/dtrevc3.f @@ -45,9 +45,9 @@ *> The right eigenvector x and the left eigenvector y of T corresponding *> to an eigenvalue w are defined by: *> -*> T*x = w*x, (y**H)*T = w*(y**H) +*> T*x = w*x, (y**T)*T = w*(y**T) *> -*> where y**H denotes the conjugate transpose of y. +*> where y**T denotes the transpose of the vector y. *> The eigenvalues are not input to this routine, but are read directly *> from the diagonal blocks of T. *> diff --git a/lapack-netlib/SRC/iparmq.f b/lapack-netlib/SRC/iparmq.f index e576e0db0..a9212b3e0 100644 --- a/lapack-netlib/SRC/iparmq.f +++ b/lapack-netlib/SRC/iparmq.f @@ -104,13 +104,13 @@ *> *> \param[in] NAME *> \verbatim -*> NAME is character string +*> NAME is CHARACTER string *> Name of the calling subroutine *> \endverbatim *> *> \param[in] OPTS *> \verbatim -*> OPTS is character string +*> OPTS is CHARACTER string *> This is a concatenation of the string arguments to *> TTQRE. *> \endverbatim diff --git a/lapack-netlib/SRC/sgelss.f b/lapack-netlib/SRC/sgelss.f index 29380d4dc..84a882d2e 100644 --- a/lapack-netlib/SRC/sgelss.f +++ b/lapack-netlib/SRC/sgelss.f @@ -407,7 +407,7 @@ * Matrix all zero. Return zero solution. * CALL SLASET( 'F', MAX( M, N ), NRHS, ZERO, ZERO, B, LDB ) - CALL SLASET( 'F', MINMN, 1, ZERO, ZERO, S, 1 ) + CALL SLASET( 'F', MINMN, 1, ZERO, ZERO, S, MINMN ) RANK = 0 GO TO 70 END IF diff --git a/lapack-netlib/SRC/sla_syamv.f b/lapack-netlib/SRC/sla_syamv.f index d40e7bd95..4459f4d8b 100644 --- a/lapack-netlib/SRC/sla_syamv.f +++ b/lapack-netlib/SRC/sla_syamv.f @@ -230,7 +230,7 @@ INFO = 10 END IF IF( INFO.NE.0 )THEN - CALL XERBLA( 'SSYMV ', INFO ) + CALL XERBLA( 'SLA_SYAMV', INFO ) RETURN END IF * diff --git a/lapack-netlib/SRC/slaqr1.f b/lapack-netlib/SRC/slaqr1.f index 7d7d851ee..2de33849d 100644 --- a/lapack-netlib/SRC/slaqr1.f +++ b/lapack-netlib/SRC/slaqr1.f @@ -147,6 +147,13 @@ INTRINSIC ABS * .. * .. Executable Statements .. +* +* Quick return if possible +* + IF( N.NE.2 .AND. N.NE.3 ) THEN + RETURN + END IF +* IF( N.EQ.2 ) THEN S = ABS( H( 1, 1 )-SR2 ) + ABS( SI2 ) + ABS( H( 2, 1 ) ) IF( S.EQ.ZERO ) THEN diff --git a/lapack-netlib/SRC/ssysv_aa.f b/lapack-netlib/SRC/ssysv_aa.f index abf52b143..e470f5883 100644 --- a/lapack-netlib/SRC/ssysv_aa.f +++ b/lapack-netlib/SRC/ssysv_aa.f @@ -220,9 +220,6 @@ LWKOPT_SYTRS = INT( WORK(1) ) LWKOPT = MAX( LWKOPT_SYTRF, LWKOPT_SYTRS ) WORK( 1 ) = LWKOPT - IF( LWORK.LT.LWKOPT .AND. .NOT.LQUERY ) THEN - INFO = -10 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/ssysv_aa_2stage.f b/lapack-netlib/SRC/ssysv_aa_2stage.f index a738c7415..43d937141 100644 --- a/lapack-netlib/SRC/ssysv_aa_2stage.f +++ b/lapack-netlib/SRC/ssysv_aa_2stage.f @@ -106,6 +106,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -125,7 +126,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -151,6 +152,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -234,19 +236,18 @@ INFO = -3 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -5 + ELSE IF( LTB.LT.( 4*N ) .AND. .NOT.TQUERY ) THEN + INFO = -7 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -11 + ELSE IF( LWORK.LT.N .AND. .NOT.WQUERY ) THEN + INFO = -13 END IF * IF( INFO.EQ.0 ) THEN CALL SSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, -1, IPIV, $ IPIV2, WORK, -1, INFO ) LWKOPT = INT( WORK(1) ) - IF( LTB.LT.INT( TB(1) ) .AND. .NOT.TQUERY ) THEN - INFO = -7 - ELSE IF( LWORK.LT.LWKOPT .AND. .NOT.WQUERY ) THEN - INFO = -13 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/ssytrd_sb2st.F b/lapack-netlib/SRC/ssytrd_sb2st.F index 891ec9b53..0df1173e4 100644 --- a/lapack-netlib/SRC/ssytrd_sb2st.F +++ b/lapack-netlib/SRC/ssytrd_sb2st.F @@ -277,8 +277,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -294,9 +294,9 @@ * * Determine the block size, the workspace size and the hous size. * - IB = ILAENV( 18, 'SSYTRD_SB2ST', VECT, N, KD, -1, -1 ) - LHMIN = ILAENV( 19, 'SSYTRD_SB2ST', VECT, N, KD, IB, -1 ) - LWMIN = ILAENV( 20, 'SSYTRD_SB2ST', VECT, N, KD, IB, -1 ) + IB = ILAENV2STAGE( 2, 'SSYTRD_SB2ST', VECT, N, KD, -1, -1 ) + LHMIN = ILAENV2STAGE( 3, 'SSYTRD_SB2ST', VECT, N, KD, IB, -1 ) + LWMIN = ILAENV2STAGE( 4, 'SSYTRD_SB2ST', VECT, N, KD, IB, -1 ) * IF( .NOT.AFTERS1 .AND. .NOT.LSAME( STAGE1, 'N' ) ) THEN INFO = -1 @@ -481,7 +481,7 @@ * * Call the kernel * -#if defined(_OPENMP) +#if defined(_OPENMP) && _OPENMP >= 201307 IF( TTYPE.NE.1 ) THEN !$OMP TASK DEPEND(in:WORK(MYID+SHIFT-1)) !$OMP$ DEPEND(in:WORK(MYID-1)) diff --git a/lapack-netlib/SRC/ssytrd_sy2sb.f b/lapack-netlib/SRC/ssytrd_sy2sb.f index c01fe3598..272876700 100644 --- a/lapack-netlib/SRC/ssytrd_sy2sb.f +++ b/lapack-netlib/SRC/ssytrd_sy2sb.f @@ -285,8 +285,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -296,7 +296,7 @@ INFO = 0 UPPER = LSAME( UPLO, 'U' ) LQUERY = ( LWORK.EQ.-1 ) - LWMIN = ILAENV( 20, 'SSYTRD_SY2SB', '', N, KD, -1, -1 ) + LWMIN = ILAENV2STAGE( 4, 'SSYTRD_SY2SB', '', N, KD, -1, -1 ) IF( .NOT.UPPER .AND. .NOT.LSAME( UPLO, 'L' ) ) THEN INFO = -1 diff --git a/lapack-netlib/SRC/ssytrf_aa_2stage.f b/lapack-netlib/SRC/ssytrf_aa_2stage.f index be6809dfa..0e0f6edb7 100644 --- a/lapack-netlib/SRC/ssytrf_aa_2stage.f +++ b/lapack-netlib/SRC/ssytrf_aa_2stage.f @@ -93,6 +93,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -112,7 +113,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -125,6 +126,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -191,7 +193,7 @@ EXTERNAL LSAME, ILAENV * .. * .. External Subroutines .. - EXTERNAL XERBLA, SCOPY, SLACGV, SLACPY, + EXTERNAL XERBLA, SCOPY, SLACPY, $ SLASET, SGBTRF, SGEMM, SGETRF, $ SSYGST, SSWAP, STRSM * .. @@ -641,6 +643,8 @@ c $ (J+1)*NB+1, (J+1)*NB+KB, IPIV, 1 ) * * Factor the band matrix CALL SGBTRF( N, N, NB, NB, TB, LDTB, IPIV2, INFO ) +* + RETURN * * End of SSYTRF_AA_2STAGE * diff --git a/lapack-netlib/SRC/ssytri2.f b/lapack-netlib/SRC/ssytri2.f index 97b539005..4b9ea4e7b 100644 --- a/lapack-netlib/SRC/ssytri2.f +++ b/lapack-netlib/SRC/ssytri2.f @@ -96,11 +96,11 @@ *> LWORK is INTEGER *> The dimension of the array WORK. *> WORK is size >= (N+NB+1)*(NB+3) -*> If LDWORK = -1, then a workspace query is assumed; the routine +*> If LWORK = -1, then a workspace query is assumed; the routine *> calculates: *> - the optimal size of the WORK array, returns *> this value as the first entry of the WORK array, -*> - and no error message related to LDWORK is issued by XERBLA. +*> - and no error message related to LWORK is issued by XERBLA. *> \endverbatim *> *> \param[out] INFO diff --git a/lapack-netlib/SRC/ssytrs_aa_2stage.f b/lapack-netlib/SRC/ssytrs_aa_2stage.f index c9c7181f2..d271b9481 100644 --- a/lapack-netlib/SRC/ssytrs_aa_2stage.f +++ b/lapack-netlib/SRC/ssytrs_aa_2stage.f @@ -85,6 +85,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N. *> \endverbatim *> diff --git a/lapack-netlib/SRC/strevc3.f b/lapack-netlib/SRC/strevc3.f index 0df1189f0..525978071 100644 --- a/lapack-netlib/SRC/strevc3.f +++ b/lapack-netlib/SRC/strevc3.f @@ -27,7 +27,7 @@ * .. * .. Array Arguments .. * LOGICAL SELECT( * ) -* REAL T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), +* REAL T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), * $ WORK( * ) * .. * @@ -45,9 +45,9 @@ *> The right eigenvector x and the left eigenvector y of T corresponding *> to an eigenvalue w are defined by: *> -*> T*x = w*x, (y**H)*T = w*(y**H) +*> T*x = w*x, (y**T)*T = w*(y**T) *> -*> where y**H denotes the conjugate transpose of y. +*> where y**T denotes the transpose of the vector y. *> The eigenvalues are not input to this routine, but are read directly *> from the diagonal blocks of T. *> @@ -251,14 +251,14 @@ * .. * .. Array Arguments .. LOGICAL SELECT( * ) - REAL T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), + REAL T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), $ WORK( * ) * .. * * ===================================================================== * * .. Parameters .. - REAL ZERO, ONE + REAL ZERO, ONE PARAMETER ( ZERO = 0.0E+0, ONE = 1.0E+0 ) INTEGER NBMIN, NBMAX PARAMETER ( NBMIN = 8, NBMAX = 128 ) @@ -268,7 +268,7 @@ $ RIGHTV, SOMEV INTEGER I, IERR, II, IP, IS, J, J1, J2, JNXT, K, KI, $ IV, MAXWRK, NB, KI2 - REAL BETA, BIGNUM, EMAX, OVFL, REC, REMAX, SCALE, + REAL BETA, BIGNUM, EMAX, OVFL, REC, REMAX, SCALE, $ SMIN, SMLNUM, ULP, UNFL, VCRIT, VMAX, WI, WR, $ XNORM * .. diff --git a/lapack-netlib/SRC/zgejsv.f b/lapack-netlib/SRC/zgejsv.f index e8418c680..d553da90b 100644 --- a/lapack-netlib/SRC/zgejsv.f +++ b/lapack-netlib/SRC/zgejsv.f @@ -704,7 +704,7 @@ LWSVDJ = MAX( 2 * N, 1 ) LWSVDJV = MAX( 2 * N, 1 ) * .. minimal REAL workspace length for ZGEQP3, ZPOCON, ZGESVJ - LRWQP3 = N + LRWQP3 = 2 * N LRWCON = N LRWSVDJ = N IF ( LQUERY ) THEN @@ -942,7 +942,7 @@ END IF END IF MINWRK = MAX( 2, MINWRK ) - OPTWRK = MAX( 2, OPTWRK ) + OPTWRK = MAX( MINWRK, OPTWRK ) IF ( LWORK .LT. MINWRK .AND. (.NOT.LQUERY) ) INFO = - 17 IF ( LRWORK .LT. MINRWRK .AND. (.NOT.LQUERY) ) INFO = - 19 END IF diff --git a/lapack-netlib/SRC/zhesv_aa.f b/lapack-netlib/SRC/zhesv_aa.f index bbd0fdff4..8511f0e7d 100644 --- a/lapack-netlib/SRC/zhesv_aa.f +++ b/lapack-netlib/SRC/zhesv_aa.f @@ -209,6 +209,8 @@ INFO = -5 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -8 + ELSE IF( LWORK.LT.MAX(2*N, 3*N-2) .AND. .NOT.LQUERY ) THEN + INFO = -10 END IF * IF( INFO.EQ.0 ) THEN @@ -219,9 +221,6 @@ LWKOPT_HETRS = INT( WORK(1) ) LWKOPT = MAX( LWKOPT_HETRF, LWKOPT_HETRS ) WORK( 1 ) = LWKOPT - IF( LWORK.LT.LWKOPT .AND. .NOT.LQUERY ) THEN - INFO = -10 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/zhesv_aa_2stage.f b/lapack-netlib/SRC/zhesv_aa_2stage.f index a34440029..ed221dc69 100644 --- a/lapack-netlib/SRC/zhesv_aa_2stage.f +++ b/lapack-netlib/SRC/zhesv_aa_2stage.f @@ -106,6 +106,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -125,7 +126,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -151,6 +152,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -240,19 +242,18 @@ INFO = -3 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -5 + ELSE IF( LTB.LT.( 4*N ) .AND. .NOT.TQUERY ) THEN + INFO = -7 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -11 + ELSE IF( LWORK.LT.N .AND. .NOT.WQUERY ) THEN + INFO = -13 END IF * IF( INFO.EQ.0 ) THEN CALL ZHETRF_AA_2STAGE( UPLO, N, A, LDA, TB, -1, IPIV, $ IPIV2, WORK, -1, INFO ) LWKOPT = INT( WORK(1) ) - IF( LTB.LT.INT( TB(1) ) .AND. .NOT.TQUERY ) THEN - INFO = -7 - ELSE IF( LWORK.LT.LWKOPT .AND. .NOT.WQUERY ) THEN - INFO = -13 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/zhetrd_hb2st.F b/lapack-netlib/SRC/zhetrd_hb2st.F index 07390623a..86122cccc 100644 --- a/lapack-netlib/SRC/zhetrd_hb2st.F +++ b/lapack-netlib/SRC/zhetrd_hb2st.F @@ -280,8 +280,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -297,9 +297,9 @@ * * Determine the block size, the workspace size and the hous size. * - IB = ILAENV( 18, 'ZHETRD_HB2ST', VECT, N, KD, -1, -1 ) - LHMIN = ILAENV( 19, 'ZHETRD_HB2ST', VECT, N, KD, IB, -1 ) - LWMIN = ILAENV( 20, 'ZHETRD_HB2ST', VECT, N, KD, IB, -1 ) + IB = ILAENV2STAGE( 2, 'ZHETRD_HB2ST', VECT, N, KD, -1, -1 ) + LHMIN = ILAENV2STAGE( 3, 'ZHETRD_HB2ST', VECT, N, KD, IB, -1 ) + LWMIN = ILAENV2STAGE( 4, 'ZHETRD_HB2ST', VECT, N, KD, IB, -1 ) * IF( .NOT.AFTERS1 .AND. .NOT.LSAME( STAGE1, 'N' ) ) THEN INFO = -1 @@ -512,7 +512,8 @@ C END IF * * Call the kernel * -#if defined(_OPENMP) +#if defined(_OPENMP) && _OPENMP >= 201307 + IF( TTYPE.NE.1 ) THEN !$OMP TASK DEPEND(in:WORK(MYID+SHIFT-1)) !$OMP$ DEPEND(in:WORK(MYID-1)) diff --git a/lapack-netlib/SRC/zhetrd_he2hb.f b/lapack-netlib/SRC/zhetrd_he2hb.f index e35578b42..e33bf4b2b 100644 --- a/lapack-netlib/SRC/zhetrd_he2hb.f +++ b/lapack-netlib/SRC/zhetrd_he2hb.f @@ -285,8 +285,8 @@ * .. * .. External Functions .. LOGICAL LSAME - INTEGER ILAENV - EXTERNAL LSAME, ILAENV + INTEGER ILAENV2STAGE + EXTERNAL LSAME, ILAENV2STAGE * .. * .. Executable Statements .. * @@ -296,7 +296,7 @@ INFO = 0 UPPER = LSAME( UPLO, 'U' ) LQUERY = ( LWORK.EQ.-1 ) - LWMIN = ILAENV( 20, 'ZHETRD_HE2HB', '', N, KD, -1, -1 ) + LWMIN = ILAENV2STAGE( 4, 'ZHETRD_HE2HB', '', N, KD, -1, -1 ) IF( .NOT.UPPER .AND. .NOT.LSAME( UPLO, 'L' ) ) THEN INFO = -1 diff --git a/lapack-netlib/SRC/zhetrf_aa_2stage.f b/lapack-netlib/SRC/zhetrf_aa_2stage.f index 4d62198d6..73c0ebe9a 100644 --- a/lapack-netlib/SRC/zhetrf_aa_2stage.f +++ b/lapack-netlib/SRC/zhetrf_aa_2stage.f @@ -93,6 +93,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -112,7 +113,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -125,6 +126,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -657,6 +659,8 @@ c $ (J+1)*NB+1, (J+1)*NB+KB, IPIV, 1 ) * * Factor the band matrix CALL ZGBTRF( N, N, NB, NB, TB, LDTB, IPIV2, INFO ) +* + RETURN * * End of ZHETRF_AA_2STAGE * diff --git a/lapack-netlib/SRC/zhetrs_aa_2stage.f b/lapack-netlib/SRC/zhetrs_aa_2stage.f index 02e17476f..7fcee1118 100644 --- a/lapack-netlib/SRC/zhetrs_aa_2stage.f +++ b/lapack-netlib/SRC/zhetrs_aa_2stage.f @@ -69,7 +69,7 @@ *> *> \param[in] A *> \verbatim -*> A is COMPLEX*16array, dimension (LDA,N) +*> A is COMPLEX*16 array, dimension (LDA,N) *> Details of factors computed by ZHETRF_AA_2STAGE. *> \endverbatim *> @@ -81,12 +81,13 @@ *> *> \param[out] TB *> \verbatim -*> TB is COMPLEX*16array, dimension (LTB) +*> TB is COMPLEX*16 array, dimension (LTB) *> Details of factors computed by ZHETRF_AA_2STAGE. *> \endverbatim *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N. *> \endverbatim *> @@ -106,7 +107,7 @@ *> *> \param[in,out] B *> \verbatim -*> B is COMPLEX*16array, dimension (LDB,NRHS) +*> B is COMPLEX*16 array, dimension (LDB,NRHS) *> On entry, the right hand side matrix B. *> On exit, the solution matrix X. *> \endverbatim diff --git a/lapack-netlib/SRC/zla_syamv.f b/lapack-netlib/SRC/zla_syamv.f index 02958bef3..cfdb3cdc8 100644 --- a/lapack-netlib/SRC/zla_syamv.f +++ b/lapack-netlib/SRC/zla_syamv.f @@ -241,7 +241,7 @@ INFO = 10 END IF IF( INFO.NE.0 )THEN - CALL XERBLA( 'DSYMV ', INFO ) + CALL XERBLA( 'ZLA_SYAMV', INFO ) RETURN END IF * diff --git a/lapack-netlib/SRC/zlaqr1.f b/lapack-netlib/SRC/zlaqr1.f index 03afb87aa..34341cb10 100644 --- a/lapack-netlib/SRC/zlaqr1.f +++ b/lapack-netlib/SRC/zlaqr1.f @@ -142,6 +142,13 @@ CABS1( CDUM ) = ABS( DBLE( CDUM ) ) + ABS( DIMAG( CDUM ) ) * .. * .. Executable Statements .. +* +* Quick return if possible +* + IF( N.NE.2 .AND. N.NE.3 ) THEN + RETURN + END IF +* IF( N.EQ.2 ) THEN S = CABS1( H( 1, 1 )-S2 ) + CABS1( H( 2, 1 ) ) IF( S.EQ.RZERO ) THEN diff --git a/lapack-netlib/SRC/zsysv_aa.f b/lapack-netlib/SRC/zsysv_aa.f index 10693c731..325d07c54 100644 --- a/lapack-netlib/SRC/zsysv_aa.f +++ b/lapack-netlib/SRC/zsysv_aa.f @@ -221,9 +221,6 @@ LWKOPT_SYTRS = INT( WORK(1) ) LWKOPT = MAX( LWKOPT_SYTRF, LWKOPT_SYTRS ) WORK( 1 ) = LWKOPT - IF( LWORK.LT.LWKOPT .AND. .NOT.LQUERY ) THEN - INFO = -10 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/zsysv_aa_2stage.f b/lapack-netlib/SRC/zsysv_aa_2stage.f index fcf9bc870..029ed587d 100644 --- a/lapack-netlib/SRC/zsysv_aa_2stage.f +++ b/lapack-netlib/SRC/zsysv_aa_2stage.f @@ -105,6 +105,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -124,7 +125,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -150,6 +151,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -233,19 +235,18 @@ INFO = -3 ELSE IF( LDA.LT.MAX( 1, N ) ) THEN INFO = -5 + ELSE IF( LTB.LT.( 4*N ) .AND. .NOT.TQUERY ) THEN + INFO = -7 ELSE IF( LDB.LT.MAX( 1, N ) ) THEN INFO = -11 + ELSE IF( LWORK.LT.N .AND. .NOT.WQUERY ) THEN + INFO = -13 END IF * IF( INFO.EQ.0 ) THEN CALL ZSYTRF_AA_2STAGE( UPLO, N, A, LDA, TB, -1, IPIV, $ IPIV2, WORK, -1, INFO ) LWKOPT = INT( WORK(1) ) - IF( LTB.LT.INT( TB(1) ) .AND. .NOT.TQUERY ) THEN - INFO = -7 - ELSE IF( LWORK.LT.LWKOPT .AND. .NOT.WQUERY ) THEN - INFO = -13 - END IF END IF * IF( INFO.NE.0 ) THEN diff --git a/lapack-netlib/SRC/zsytrf_aa_2stage.f b/lapack-netlib/SRC/zsytrf_aa_2stage.f index 1f916726e..d3486c1a7 100644 --- a/lapack-netlib/SRC/zsytrf_aa_2stage.f +++ b/lapack-netlib/SRC/zsytrf_aa_2stage.f @@ -93,6 +93,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N, internally *> used to select NB such that LTB >= (3*NB+1)*N. *> @@ -112,7 +113,7 @@ *> *> \param[out] IPIV2 *> \verbatim -*> IPIV is INTEGER array, dimension (N) +*> IPIV2 is INTEGER array, dimension (N) *> On exit, it contains the details of the interchanges, i.e., *> the row and column k of T were interchanged with the *> row and column IPIV(k). @@ -125,6 +126,7 @@ *> *> \param[in] LWORK *> \verbatim +*> LWORK is INTEGER *> The size of WORK. LWORK >= N, internally used to select NB *> such that LWORK >= N*NB. *> @@ -662,6 +664,8 @@ c $ (J+1)*NB+1, (J+1)*NB+KB, IPIV, 1 ) * * Factor the band matrix CALL ZGBTRF( N, N, NB, NB, TB, LDTB, IPIV2, INFO ) +* + RETURN * * End of ZSYTRF_AA_2STAGE * diff --git a/lapack-netlib/SRC/zsytri2.f b/lapack-netlib/SRC/zsytri2.f index d5aabd43a..e7303c90b 100644 --- a/lapack-netlib/SRC/zsytri2.f +++ b/lapack-netlib/SRC/zsytri2.f @@ -163,7 +163,7 @@ UPPER = LSAME( UPLO, 'U' ) LQUERY = ( LWORK.EQ.-1 ) * Get blocksize - NBMAX = ILAENV( 1, 'ZSYTRF', UPLO, N, -1, -1, -1 ) + NBMAX = ILAENV( 1, 'ZSYTRI2', UPLO, N, -1, -1, -1 ) IF ( NBMAX .GE. N ) THEN MINSIZE = N ELSE diff --git a/lapack-netlib/SRC/zsytrs_aa_2stage.f b/lapack-netlib/SRC/zsytrs_aa_2stage.f index c5d894753..fa15eee90 100644 --- a/lapack-netlib/SRC/zsytrs_aa_2stage.f +++ b/lapack-netlib/SRC/zsytrs_aa_2stage.f @@ -85,6 +85,7 @@ *> *> \param[in] LTB *> \verbatim +*> LTB is INTEGER *> The size of the array TB. LTB >= 4*N. *> \endverbatim *> diff --git a/lapack-netlib/TESTING/LIN/dchksy_aa_2stage.f b/lapack-netlib/TESTING/LIN/dchksy_aa_2stage.f index 5698bcf94..f6d990d1c 100644 --- a/lapack-netlib/TESTING/LIN/dchksy_aa_2stage.f +++ b/lapack-netlib/TESTING/LIN/dchksy_aa_2stage.f @@ -218,7 +218,7 @@ * .. * .. External Subroutines .. EXTERNAL ALAERH, ALAHD, ALASUM, DERRSY, DLACPY, DLARHS, - $ DLATB4, DLATMS, DPOT02, DSYTRF_AA_2STAGE + $ DLATB4, DLATMS, DPOT02, DSYTRF_AA_2STAGE, $ DSYTRS_AA_2STAGE, XLAENV * .. * .. Intrinsic Functions .. diff --git a/lapack-netlib/TESTING/LIN/ddrvsy_aa_2stage.f b/lapack-netlib/TESTING/LIN/ddrvsy_aa_2stage.f index 0be321eb0..898422654 100644 --- a/lapack-netlib/TESTING/LIN/ddrvsy_aa_2stage.f +++ b/lapack-netlib/TESTING/LIN/ddrvsy_aa_2stage.f @@ -204,7 +204,7 @@ * .. External Subroutines .. EXTERNAL ALADHD, ALAERH, ALASVM, XLAENV, DERRVX, $ DGET04, DLACPY, DLARHS, DLATB4, DLATMS, - $ DSYSV_AA_2STAGE, CHET01_AA, DPOT02, + $ DSYSV_AA_2STAGE, DPOT02, $ DSYTRF_AA_2STAGE * .. * .. Scalars in Common .. diff --git a/lapack-netlib/TESTING/LIN/sdrvsy_aa_2stage.f b/lapack-netlib/TESTING/LIN/sdrvsy_aa_2stage.f index d8d9dc0a9..70e8ff6b8 100644 --- a/lapack-netlib/TESTING/LIN/sdrvsy_aa_2stage.f +++ b/lapack-netlib/TESTING/LIN/sdrvsy_aa_2stage.f @@ -203,7 +203,7 @@ * .. * .. External Subroutines .. EXTERNAL ALADHD, ALAERH, ALASVM, XLAENV, SERRVX, - $ CGET04, SLACPY, SLARHS, SLATB4, SLATMS, + $ SLACPY, SLARHS, SLATB4, SLATMS, $ SSYSV_AA_2STAGE, SSYT01_AA, SPOT02, $ SSYTRF_AA_2STAGE * .. diff --git a/lapack-netlib/TESTING/LIN/zchksy_aa_2stage.f b/lapack-netlib/TESTING/LIN/zchksy_aa_2stage.f index d4d8c2939..87fc47f71 100644 --- a/lapack-netlib/TESTING/LIN/zchksy_aa_2stage.f +++ b/lapack-netlib/TESTING/LIN/zchksy_aa_2stage.f @@ -217,8 +217,8 @@ DOUBLE PRECISION RESULT( NTESTS ) * .. * .. External Subroutines .. - EXTERNAL ALAERH, ALAHD, ALASUM, CERRSY, ZLACPY, ZLARHS, - $ CLATB4, ZLATMS, ZSYT02, ZSYT01, + EXTERNAL ALAERH, ALAHD, ALASUM, ZERRSY, ZLACPY, ZLARHS, + $ ZLATB4, ZLATMS, ZSYT02, ZSYT01, $ ZSYTRF_AA_2STAGE, ZSYTRS_AA_2STAGE, $ XLAENV * .. diff --git a/lapack-netlib/TESTING/MATGEN/Makefile b/lapack-netlib/TESTING/MATGEN/Makefile index e20004c2f..a1d784fa5 100644 --- a/lapack-netlib/TESTING/MATGEN/Makefile +++ b/lapack-netlib/TESTING/MATGEN/Makefile @@ -57,6 +57,8 @@ all: ../../$(TMGLIB) ALLOBJ = $(SMATGEN) $(CMATGEN) $(SCATGEN) $(DMATGEN) $(ZMATGEN) \ $(DZATGEN) +.PHONY: ../../$(TMGLIB) + ../../$(TMGLIB): $(ALLOBJ) $(ARCH) $(ARCHFLAGS) $@ $^ $(RANLIB) $@ diff --git a/lapack-netlib/lapack_testing.py b/lapack-netlib/lapack_testing.py index 3c917482d..5d07e1e87 100755 --- a/lapack-netlib/lapack_testing.py +++ b/lapack-netlib/lapack_testing.py @@ -257,16 +257,16 @@ for dtype in range_prec: else: if dtest==16: # LIN TESTS - cmdbase="xlintst"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" + cmdbase="LIN/xlintst"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" elif dtest==17: # PROTO LIN TESTS - cmdbase="xlintst"+letter+dtypes[0][dtype-1]+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" + cmdbase="LIN/xlintst"+letter+dtypes[0][dtype-1]+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" elif dtest==18: # PROTO LIN TESTS - cmdbase="xlintstrf"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" + cmdbase="LIN/xlintstrf"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" else: # EIG TESTS - cmdbase="xeigtst"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" + cmdbase="EIG/xeigtst"+letter+" < "+dtests[0][dtest]+".in > "+dtests[2][dtest]+".out" if (not just_errors and not short_summary): print("Testing "+name+" "+dtests[1][dtest]+"-"+cmdbase, end=' ') # Run the process: either to read the file or run the LAPACK testing diff --git a/lapack/getrf/getrf_parallel.c b/lapack/getrf/getrf_parallel.c index db8c836e0..591ce4a99 100644 --- a/lapack/getrf/getrf_parallel.c +++ b/lapack/getrf/getrf_parallel.c @@ -67,6 +67,26 @@ double sqrt(double); #undef GETRF_FACTOR #define GETRF_FACTOR 1.00 + +#if defined(USE_PTHREAD_LOCK) +static pthread_mutex_t getrf_lock = PTHREAD_MUTEX_INITIALIZER; +#elif defined(USE_PTHREAD_SPINLOCK) +static pthread_spinlock_t getrf_lock = 0; +#else +static BLASULONG getrf_lock = 0UL; +#endif + +#if defined(USE_PTHREAD_LOCK) +static pthread_mutex_t getrf_flag_lock = PTHREAD_MUTEX_INITIALIZER; +#elif defined(USE_PTHREAD_SPINLOCK) +static pthread_spinlock_t getrf_flag_lock = 0; +#else +static BLASULONG getrf_flag_lock = 0UL; +#endif + + + + static __inline BLASLONG FORMULA1(BLASLONG M, BLASLONG N, BLASLONG IS, BLASLONG BK, BLASLONG T) { double m = (double)(M - IS - BK); @@ -99,7 +119,11 @@ static void inner_basic_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *ra FLOAT *d = (FLOAT *)args -> b + (k + k * lda) * COMPSIZE; FLOAT *sbb = sb; +#if __STDC_VERSION__ >= 201112L + _Atomic BLASLONG *flag = (_Atomic BLASLONG *)args -> d; +#else volatile BLASLONG *flag = (volatile BLASLONG *)args -> d; +#endif blasint *ipiv = (blasint *)args -> c; @@ -177,7 +201,12 @@ static void inner_basic_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *ra /* Non blocking implementation */ typedef struct { - volatile BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; +#if __STDC_VERSION__ >= 201112L + _Atomic +#else + volatile +#endif + BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; } job_t; #define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); @@ -216,9 +245,12 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * FLOAT *sbb= sb; blasint *ipiv = (blasint *)args -> c; - + BLASLONG jw; +#if __STDC_VERSION__ >= 201112L + _Atomic BLASLONG *flag = (_Atomic BLASLONG *)args -> d; +#else volatile BLASLONG *flag = (volatile BLASLONG *)args -> d; - +#endif if (args -> a == NULL) { TRSM_ILTCOPY(k, k, (FLOAT *)args -> b, lda, 0, sb); sbb = (FLOAT *)((((BLASULONG)(sb + k * k * COMPSIZE) + GEMM_ALIGN) & ~GEMM_ALIGN) + GEMM_OFFSET_B); @@ -245,8 +277,20 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * for (xxx = n_from, bufferside = 0; xxx < n_to; xxx += div_n, bufferside ++) { for (i = 0; i < args -> nthreads; i++) +#if 1 + { + LOCK_COMMAND(&getrf_lock); + jw = job[mypos].working[i][CACHE_LINE_SIZE * bufferside]; + UNLOCK_COMMAND(&getrf_lock); + do { + LOCK_COMMAND(&getrf_lock); + jw = job[mypos].working[i][CACHE_LINE_SIZE * bufferside]; + UNLOCK_COMMAND(&getrf_lock); + } while (jw); + } +#else while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {}; - +#endif for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj){ min_jj = MIN(n_to, xxx + div_n) - jjs; if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; @@ -283,18 +327,23 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * b + (is + jjs * lda) * COMPSIZE, lda, is); } } - MB; - for (i = 0; i < args -> nthreads; i++) + for (i = 0; i < args -> nthreads; i++) { + LOCK_COMMAND(&getrf_lock); job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; - + UNLOCK_COMMAND(&getrf_lock); + } } + LOCK_COMMAND(&getrf_flag_lock); flag[mypos * CACHE_LINE_SIZE] = 0; + UNLOCK_COMMAND(&getrf_flag_lock); if (m == 0) { for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { + LOCK_COMMAND(&getrf_lock); job[mypos].working[mypos][CACHE_LINE_SIZE * xxx] = 0; + UNLOCK_COMMAND(&getrf_lock); } } @@ -318,7 +367,18 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { if ((current != mypos) && (!is)) { +#if 1 + LOCK_COMMAND(&getrf_lock); + jw = job[current].working[mypos][CACHE_LINE_SIZE * bufferside]; + UNLOCK_COMMAND(&getrf_lock); + do { + LOCK_COMMAND(&getrf_lock); + jw = job[current].working[mypos][CACHE_LINE_SIZE * bufferside]; + UNLOCK_COMMAND(&getrf_lock); + } while (jw == 0); +#else while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {}; +#endif } KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), k, @@ -327,7 +387,9 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * MB; if (is + min_i >= m) { + LOCK_COMMAND(&getrf_lock); job[current].working[mypos][CACHE_LINE_SIZE * bufferside] = 0; + UNLOCK_COMMAND(&getrf_lock); } } @@ -339,7 +401,18 @@ static int inner_advanced_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG * for (i = 0; i < args -> nthreads; i++) { for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { +#if 1 + LOCK_COMMAND(&getrf_lock); + jw = job[mypos].working[i][CACHE_LINE_SIZE *xxx]; + UNLOCK_COMMAND(&getrf_lock); + do { + LOCK_COMMAND(&getrf_lock); + jw = job[mypos].working[i][CACHE_LINE_SIZE *xxx]; + UNLOCK_COMMAND(&getrf_lock); + } while(jw != 0); +#else while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {}; +#endif } } @@ -374,11 +447,17 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, BLASLONG i, j, k, is, bk; BLASLONG num_cpu; + BLASLONG f; #ifdef _MSC_VER BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE]; #else - volatile BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); +#if __STDC_VERSION__ >= 201112L + _Atomic +#else + volatile +#endif + BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); #endif #ifndef COMPLEX @@ -501,11 +580,13 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, if (mm >= nn) { width = blas_quickdivide(nn + args -> nthreads - num_cpu, args -> nthreads - num_cpu - 1); + if (width == 0) width = nn; if (nn < width) width = nn; nn -= width; range_N[num_cpu + 1] = range_N[num_cpu] + width; width = blas_quickdivide(mm + args -> nthreads - num_cpu, args -> nthreads - num_cpu - 1); + if (width == 0) width = mm; if (mm < width) width = mm; if (nn <= 0) width = mm; mm -= width; @@ -514,11 +595,13 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, } else { width = blas_quickdivide(mm + args -> nthreads - num_cpu, args -> nthreads - num_cpu - 1); + if (width == 0) width = mm; if (mm < width) width = mm; mm -= width; range_M[num_cpu + 1] = range_M[num_cpu] + width; width = blas_quickdivide(nn + args -> nthreads - num_cpu, args -> nthreads - num_cpu - 1); + if (width == 0) width = nn; if (nn < width) width = nn; if (mm <= 0) width = nn; nn -= width; @@ -561,7 +644,6 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, range_n_new[1] = offset + is + bk; if (num_cpu > 0) { - queue[num_cpu - 1].next = NULL; exec_blas_async(0, &queue[0]); @@ -572,8 +654,20 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, if (iinfo && !info) info = iinfo + is; - for (i = 0; i < num_cpu; i ++) while (flag[i * CACHE_LINE_SIZE]) {}; - + for (i = 0; i < num_cpu; i ++) { +#if 1 + LOCK_COMMAND(&getrf_flag_lock); + f=flag[i*CACHE_LINE_SIZE]; + UNLOCK_COMMAND(&getrf_flag_lock); + while (f!=0) { + LOCK_COMMAND(&getrf_flag_lock); + f=flag[i*CACHE_LINE_SIZE]; + UNLOCK_COMMAND(&getrf_flag_lock); + }; +#else + while (flag[i*CACHE_LINE_SIZE]) {}; +#endif + } TRSM_ILTCOPY(bk, bk, a + (is + is * lda) * COMPSIZE, lda, 0, sb); } else { @@ -634,8 +728,12 @@ blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, BLASLONG range[MAX_CPU_NUMBER + 1]; BLASLONG width, nn, num_cpu; - - volatile BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); +#if __STDC_VERSION__ >= 201112L + _Atomic +#else + volatile +#endif + BLASLONG flag[MAX_CPU_NUMBER * CACHE_LINE_SIZE] __attribute__((aligned(128))); #ifndef COMPLEX #ifdef XDOUBLE diff --git a/lapack/getrf/potrf_parallel.c b/lapack/getrf/potrf_parallel.c new file mode 100644 index 000000000..c2fee6bd1 --- /dev/null +++ b/lapack/getrf/potrf_parallel.c @@ -0,0 +1,664 @@ +/*********************************************************************/ +/* Copyright 2009, 2010 The University of Texas at Austin. */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* 1. Redistributions of source code must retain the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials */ +/* provided with the distribution. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ +/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ +/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ +/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ +/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ +/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ +/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ +/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* */ +/* The views and conclusions contained in the software and */ +/* documentation are those of the authors and should not be */ +/* interpreted as representing official policies, either expressed */ +/* or implied, of The University of Texas at Austin. */ +/*********************************************************************/ + +#include +#include "common.h" + +#ifndef USE_SIMPLE_THREADED_LEVEL3 + +//The array of job_t may overflow the stack. +//Instead, use malloc to alloc job_t. +#if MAX_CPU_NUMBER > BLAS3_MEM_ALLOC_THRESHOLD +#define USE_ALLOC_HEAP +#endif + + +static FLOAT dm1 = -1.; + +#ifndef KERNEL_FUNC +#ifndef LOWER +#define KERNEL_FUNC SYRK_KERNEL_U +#else +#define KERNEL_FUNC SYRK_KERNEL_L +#endif +#endif + +#ifndef LOWER +#ifndef COMPLEX +#define TRSM_KERNEL TRSM_KERNEL_LT +#else +#define TRSM_KERNEL TRSM_KERNEL_LC +#endif +#else +#ifndef COMPLEX +#define TRSM_KERNEL TRSM_KERNEL_RN +#else +#define TRSM_KERNEL TRSM_KERNEL_RR +#endif +#endif + +#ifndef CACHE_LINE_SIZE +#define CACHE_LINE_SIZE 8 +#endif + +#ifndef DIVIDE_RATE +#define DIVIDE_RATE 2 +#endif + +#ifndef SWITCH_RATIO +#define SWITCH_RATIO 2 +#endif + +#ifndef LOWER +#define TRANS +#endif + +#ifndef SYRK_LOCAL +#if !defined(LOWER) && !defined(TRANS) +#define SYRK_LOCAL SYRK_UN +#elif !defined(LOWER) && defined(TRANS) +#define SYRK_LOCAL SYRK_UT +#elif defined(LOWER) && !defined(TRANS) +#define SYRK_LOCAL SYRK_LN +#else +#define SYRK_LOCAL SYRK_LT +#endif +#endif + +typedef struct { +#if __STDC_VERSION__ >= 201112L + _Atomic +#else + volatile +#endif + BLASLONG working[MAX_CPU_NUMBER][CACHE_LINE_SIZE * DIVIDE_RATE]; +} job_t; + + +#ifndef KERNEL_OPERATION +#ifndef COMPLEX +#define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \ + KERNEL_FUNC(M, N, K, ALPHA[0], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y)) +#else +#define KERNEL_OPERATION(M, N, K, ALPHA, SA, SB, C, LDC, X, Y) \ + KERNEL_FUNC(M, N, K, ALPHA[0], ALPHA[1], SA, SB, (FLOAT *)(C) + ((X) + (Y) * LDC) * COMPSIZE, LDC, (X) - (Y)) +#endif +#endif + +#ifndef ICOPY_OPERATION +#ifndef TRANS +#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ITCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); +#else +#define ICOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_INCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); +#endif +#endif + +#ifndef OCOPY_OPERATION +#ifdef TRANS +#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_ONCOPY(M, N, (FLOAT *)(A) + ((X) + (Y) * (LDA)) * COMPSIZE, LDA, BUFFER); +#else +#define OCOPY_OPERATION(M, N, A, LDA, X, Y, BUFFER) GEMM_OTCOPY(M, N, (FLOAT *)(A) + ((Y) + (X) * (LDA)) * COMPSIZE, LDA, BUFFER); +#endif +#endif + +#ifndef S +#define S args -> a +#endif +#ifndef A +#define A args -> b +#endif +#ifndef C +#define C args -> c +#endif +#ifndef LDA +#define LDA args -> lda +#endif +#ifndef N +#define N args -> m +#endif +#ifndef K +#define K args -> k +#endif + +static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){ + + FLOAT *buffer[DIVIDE_RATE]; + + BLASLONG k, lda; + BLASLONG m_from, m_to; + + FLOAT *alpha; + FLOAT *a, *c; + job_t *job = (job_t *)args -> common; + BLASLONG xxx, bufferside; + + BLASLONG jjs, min_jj; + BLASLONG is, min_i, div_n; + + BLASLONG i, current; + + k = K; + + a = (FLOAT *)A; + c = (FLOAT *)C; + + lda = LDA; + + alpha = (FLOAT *)args -> alpha; + + m_from = range_n[mypos + 0]; + m_to = range_n[mypos + 1]; + +#if 0 + fprintf(stderr, "Thread[%ld] m_from : %ld m_to : %ld\n", mypos, m_from, m_to); +#endif + + div_n = (((m_to - m_from + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; + + buffer[0] = (FLOAT *)((((BLASULONG)(sb + k * k * COMPSIZE) + GEMM_ALIGN) & ~GEMM_ALIGN) + GEMM_OFFSET_B); + for (i = 1; i < DIVIDE_RATE; i++) { + buffer[i] = buffer[i - 1] + GEMM_Q * div_n * COMPSIZE; + } + +#ifndef LOWER + TRSM_IUNCOPY(k, k, (FLOAT *)S, lda, 0, sb); +#else + TRSM_OLTCOPY(k, k, (FLOAT *)S, lda, 0, sb); +#endif + + for (xxx = m_from, bufferside = 0; xxx < m_to; xxx += div_n, bufferside ++) { + + for(jjs = xxx; jjs < MIN(m_to, xxx + div_n); jjs += min_jj){ + + min_jj = MIN(m_to, xxx + div_n) - jjs; + +#ifndef LOWER + if (min_jj > GEMM_UNROLL_MN) min_jj = GEMM_UNROLL_MN; +#else + if (min_jj > GEMM_P) min_jj = GEMM_P; +#endif + +#ifndef LOWER + OCOPY_OPERATION (k, min_jj, a, lda, 0, jjs, buffer[bufferside] + k * (jjs - xxx) * COMPSIZE); + + TRSM_KERNEL (k, min_jj, k, dm1, +#ifdef COMPLEX + ZERO, +#endif + sb, + buffer[bufferside] + k * (jjs - xxx) * COMPSIZE, + a + jjs * lda * COMPSIZE, lda, 0); +#else + ICOPY_OPERATION (k, min_jj, a, lda, 0, jjs, buffer[bufferside] + k * (jjs - xxx) * COMPSIZE); + + TRSM_KERNEL (min_jj, k, k, dm1, +#ifdef COMPLEX + ZERO, +#endif + buffer[bufferside] + k * (jjs - xxx) * COMPSIZE, + sb, + a + jjs * COMPSIZE, lda, 0); +#endif + } + +#ifndef LOWER + for (i = 0; i <= mypos; i++) + job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; +#else + for (i = mypos; i < args -> nthreads; i++) + job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; +#endif + + WMB; + } + + min_i = m_to - m_from; + + if (min_i >= GEMM_P * 2) { + min_i = GEMM_P; + } else + if (min_i > GEMM_P) { + min_i = (((min_i + 1) / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; + } + +#ifndef LOWER + ICOPY_OPERATION(k, min_i, a, lda, 0, m_from, sa); +#else + OCOPY_OPERATION(k, min_i, a, lda, 0, m_from, sa); +#endif + + current = mypos; + +#ifndef LOWER + while (current < args -> nthreads) +#else + while (current >= 0) +#endif + { + div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; + + for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { + + /* thread has to wait */ + if (current != mypos) while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; + + KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), k, alpha, + sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], + c, lda, m_from, xxx); + + if (m_from + min_i >= m_to) { + job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; + WMB; + } + } + +#ifndef LOWER + current ++; +#else + current --; +#endif + } + + for(is = m_from + min_i; is < m_to; is += min_i){ + min_i = m_to - is; + + if (min_i >= GEMM_P * 2) { + min_i = GEMM_P; + } else + if (min_i > GEMM_P) { + min_i = (((min_i + 1) / 2 + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; + } + +#ifndef LOWER + ICOPY_OPERATION(k, min_i, a, lda, 0, is, sa); +#else + OCOPY_OPERATION(k, min_i, a, lda, 0, is, sa); +#endif + + current = mypos; + +#ifndef LOWER + while (current < args -> nthreads) +#else + while (current >= 0) +#endif + { + div_n = (((range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE + GEMM_UNROLL_MN - 1)/GEMM_UNROLL_MN) * GEMM_UNROLL_MN; + + for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { + + KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), k, alpha, + sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], + c, lda, is, xxx); + + if (is + min_i >= m_to) { + job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; + WMB; + } + } +#ifndef LOWER + current ++; +#else + current --; +#endif + } + } + + for (i = 0; i < args -> nthreads; i++) { + if (i != mypos) { + for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { + while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;}; + } + } + } + + return 0; + } + +static int thread_driver(blas_arg_t *args, FLOAT *sa, FLOAT *sb){ + + blas_arg_t newarg; + +#ifndef USE_ALLOC_HEAP + job_t job[MAX_CPU_NUMBER]; +#else + job_t * job = NULL; +#endif + + blas_queue_t queue[MAX_CPU_NUMBER]; + + BLASLONG range[MAX_CPU_NUMBER + 100]; + + BLASLONG num_cpu; + + BLASLONG nthreads = args -> nthreads; + + BLASLONG width, i, j, k; + BLASLONG n, n_from, n_to; + int mode, mask; + double dnum; + +#ifndef COMPLEX +#ifdef XDOUBLE + mode = BLAS_XDOUBLE | BLAS_REAL; + mask = MAX(QGEMM_UNROLL_M, QGEMM_UNROLL_N) - 1; +#elif defined(DOUBLE) + mode = BLAS_DOUBLE | BLAS_REAL; + mask = MAX(DGEMM_UNROLL_M, DGEMM_UNROLL_N) - 1; +#else + mode = BLAS_SINGLE | BLAS_REAL; + mask = MAX(SGEMM_UNROLL_M, SGEMM_UNROLL_N) - 1; +#endif +#else +#ifdef XDOUBLE + mode = BLAS_XDOUBLE | BLAS_COMPLEX; + mask = MAX(XGEMM_UNROLL_M, XGEMM_UNROLL_N) - 1; +#elif defined(DOUBLE) + mode = BLAS_DOUBLE | BLAS_COMPLEX; + mask = MAX(ZGEMM_UNROLL_M, ZGEMM_UNROLL_N) - 1; +#else + mode = BLAS_SINGLE | BLAS_COMPLEX; + mask = MAX(CGEMM_UNROLL_M, CGEMM_UNROLL_N) - 1; +#endif +#endif + + newarg.m = args -> m; + newarg.k = args -> k; + newarg.a = args -> a; + newarg.b = args -> b; + newarg.c = args -> c; + newarg.lda = args -> lda; + newarg.alpha = args -> alpha; + +#ifdef USE_ALLOC_HEAP + job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t)); + if(job==NULL){ + fprintf(stderr, "OpenBLAS: malloc failed in %s\n", __func__); + exit(1); + } +#endif + + newarg.common = (void *)job; + + n_from = 0; + n_to = args -> m; + +#ifndef LOWER + + range[MAX_CPU_NUMBER] = n_to - n_from; + range[0] = 0; + num_cpu = 0; + i = 0; + n = n_to - n_from; + + dnum = (double)n * (double)n /(double)nthreads; + + while (i < n){ + + if (nthreads - num_cpu > 1) { + + double di = (double)i; + + width = ((((BLASLONG)(sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1)); + + if (num_cpu == 0) width = n - (((n - width)/(mask+1)) * (mask+1)); + + if ((width > n - i) || (width < mask)) width = n - i; + + } else { + width = n - i; + } + + range[MAX_CPU_NUMBER - num_cpu - 1] = range[MAX_CPU_NUMBER - num_cpu] - width; + + queue[num_cpu].mode = mode; + queue[num_cpu].routine = inner_thread; + queue[num_cpu].args = &newarg; + queue[num_cpu].range_m = NULL; + + queue[num_cpu].sa = NULL; + queue[num_cpu].sb = NULL; + queue[num_cpu].next = &queue[num_cpu + 1]; + + num_cpu ++; + i += width; + } + + for (i = 0; i < num_cpu; i ++) queue[i].range_n = &range[MAX_CPU_NUMBER - num_cpu]; + +#else + + range[0] = 0; + num_cpu = 0; + i = 0; + n = n_to - n_from; + + dnum = (double)n * (double)n /(double)nthreads; + + while (i < n){ + + if (nthreads - num_cpu > 1) { + + double di = (double)i; + + width = ((((BLASLONG)(sqrt(di * di + dnum) - di) + mask)/(mask+1)) * (mask+1)); + + if ((width > n - i) || (width < mask)) width = n - i; + + } else { + width = n - i; + } + + range[num_cpu + 1] = range[num_cpu] + width; + + queue[num_cpu].mode = mode; + queue[num_cpu].routine = inner_thread; + queue[num_cpu].args = &newarg; + queue[num_cpu].range_m = NULL; + queue[num_cpu].range_n = range; + queue[num_cpu].sa = NULL; + queue[num_cpu].sb = NULL; + queue[num_cpu].next = &queue[num_cpu + 1]; + + num_cpu ++; + i += width; + } + +#endif + + newarg.nthreads = num_cpu; + + if (num_cpu) { + + for (j = 0; j < num_cpu; j++) { + for (i = 0; i < num_cpu; i++) { + for (k = 0; k < DIVIDE_RATE; k++) { + job[j].working[i][CACHE_LINE_SIZE * k] = 0; + } + } + } + + queue[0].sa = sa; + queue[0].sb = sb; + queue[num_cpu - 1].next = NULL; + + exec_blas(num_cpu, queue); + } + +#ifdef USE_ALLOC_HEAP + free(job); +#endif + + return 0; +} + +#endif + +blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG myid) { + + BLASLONG n, bk, i, blocking, lda; + BLASLONG info; + int mode; + blas_arg_t newarg; + FLOAT *a; + FLOAT alpha[2] = { -ONE, ZERO}; + +#ifndef COMPLEX +#ifdef XDOUBLE + mode = BLAS_XDOUBLE | BLAS_REAL; +#elif defined(DOUBLE) + mode = BLAS_DOUBLE | BLAS_REAL; +#else + mode = BLAS_SINGLE | BLAS_REAL; +#endif +#else +#ifdef XDOUBLE + mode = BLAS_XDOUBLE | BLAS_COMPLEX; +#elif defined(DOUBLE) + mode = BLAS_DOUBLE | BLAS_COMPLEX; +#else + mode = BLAS_SINGLE | BLAS_COMPLEX; +#endif +#endif + + if (args -> nthreads == 1) { +#ifndef LOWER + info = POTRF_U_SINGLE(args, NULL, NULL, sa, sb, 0); +#else + info = POTRF_L_SINGLE(args, NULL, NULL, sa, sb, 0); +#endif + return info; + } + + n = args -> n; + a = (FLOAT *)args -> a; + lda = args -> lda; + + if (range_n) n = range_n[1] - range_n[0]; + + if (n <= GEMM_UNROLL_N * 2) { +#ifndef LOWER + info = POTRF_U_SINGLE(args, NULL, range_n, sa, sb, 0); +#else + info = POTRF_L_SINGLE(args, NULL, range_n, sa, sb, 0); +#endif + return info; + } + + newarg.lda = lda; + newarg.ldb = lda; + newarg.ldc = lda; + newarg.alpha = alpha; + newarg.beta = NULL; + newarg.nthreads = args -> nthreads; + + blocking = ((n / 2 + GEMM_UNROLL_N - 1)/GEMM_UNROLL_N) * GEMM_UNROLL_N; + if (blocking > GEMM_Q) blocking = GEMM_Q; + + for (i = 0; i < n; i += blocking) { + bk = n - i; + if (bk > blocking) bk = blocking; + + newarg.m = bk; + newarg.n = bk; + newarg.a = a + (i + i * lda) * COMPSIZE; + + info = CNAME(&newarg, NULL, NULL, sa, sb, 0); + if (info) return info + i; + + if (n - i - bk > 0) { +#ifndef USE_SIMPLE_THREADED_LEVEL3 + newarg.m = n - i - bk; + newarg.k = bk; +#ifndef LOWER + newarg.b = a + ( i + (i + bk) * lda) * COMPSIZE; +#else + newarg.b = a + ((i + bk) + i * lda) * COMPSIZE; +#endif + newarg.c = a + ((i + bk) + (i + bk) * lda) * COMPSIZE; + + thread_driver(&newarg, sa, sb); +#else + +#ifndef LOWER + newarg.m = bk; + newarg.n = n - i - bk; + newarg.a = a + (i + i * lda) * COMPSIZE; + newarg.b = a + (i + (i + bk) * lda) * COMPSIZE; + + gemm_thread_n(mode | BLAS_TRANSA_T, + &newarg, NULL, NULL, (void *)TRSM_LCUN, sa, sb, args -> nthreads); + + newarg.n = n - i - bk; + newarg.k = bk; + newarg.a = a + ( i + (i + bk) * lda) * COMPSIZE; + newarg.c = a + ((i + bk) + (i + bk) * lda) * COMPSIZE; + +#if 0 + HERK_THREAD_UC(&newarg, NULL, NULL, sa, sb, 0); +#else + syrk_thread(mode | BLAS_TRANSA_N | BLAS_TRANSB_T, + &newarg, NULL, NULL, (void *)HERK_UC, sa, sb, args -> nthreads); +#endif +#else + newarg.m = n - i - bk; + newarg.n = bk; + newarg.a = a + (i + i * lda) * COMPSIZE; + newarg.b = a + (i + bk + i * lda) * COMPSIZE; + + gemm_thread_m(mode | BLAS_RSIDE | BLAS_TRANSA_T | BLAS_UPLO, + &newarg, NULL, NULL, (void *)TRSM_RCLN, sa, sb, args -> nthreads); + + newarg.n = n - i - bk; + newarg.k = bk; + newarg.a = a + (i + bk + i * lda) * COMPSIZE; + newarg.c = a + (i + bk + (i + bk) * lda) * COMPSIZE; + +#if 0 + HERK_THREAD_LN(&newarg, NULL, NULL, sa, sb, 0); +#else + syrk_thread(mode | BLAS_TRANSA_N | BLAS_TRANSB_T | BLAS_UPLO, + &newarg, NULL, NULL, (void *)HERK_LN, sa, sb, args -> nthreads); +#endif +#endif + +#endif + } + } + return 0; +} diff --git a/param.h b/param.h index 189cdc4a0..fa6730208 100644 --- a/param.h +++ b/param.h @@ -1507,7 +1507,129 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 8 -#define SWITCH_RATIO 4 +#define SWITCH_RATIO 32 +#define GEMM_PREFERED_SIZE 16 + +#ifdef ARCH_X86 + +#define SGEMM_DEFAULT_UNROLL_M 4 +#define DGEMM_DEFAULT_UNROLL_M 2 +#define QGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_M 2 +#define ZGEMM_DEFAULT_UNROLL_M 1 +#define XGEMM_DEFAULT_UNROLL_M 1 + +#define SGEMM_DEFAULT_UNROLL_N 4 +#define DGEMM_DEFAULT_UNROLL_N 4 +#define QGEMM_DEFAULT_UNROLL_N 2 +#define CGEMM_DEFAULT_UNROLL_N 2 +#define ZGEMM_DEFAULT_UNROLL_N 2 +#define XGEMM_DEFAULT_UNROLL_N 1 + +#else + +#define SGEMM_DEFAULT_UNROLL_M 16 +#define DGEMM_DEFAULT_UNROLL_M 4 +#define QGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_M 8 +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define XGEMM_DEFAULT_UNROLL_M 1 + +#define SGEMM_DEFAULT_UNROLL_N 4 +#define DGEMM_DEFAULT_UNROLL_N 8 +#define QGEMM_DEFAULT_UNROLL_N 2 +#define CGEMM_DEFAULT_UNROLL_N 2 +#define ZGEMM_DEFAULT_UNROLL_N 2 +#define XGEMM_DEFAULT_UNROLL_N 1 + +#define SGEMM_DEFAULT_UNROLL_MN 32 +#define DGEMM_DEFAULT_UNROLL_MN 32 +#endif + +#ifdef ARCH_X86 + +#define SGEMM_DEFAULT_P 512 +#define SGEMM_DEFAULT_R sgemm_r +#define DGEMM_DEFAULT_P 512 +#define DGEMM_DEFAULT_R dgemm_r +#define QGEMM_DEFAULT_P 504 +#define QGEMM_DEFAULT_R qgemm_r +#define CGEMM_DEFAULT_P 128 +#define CGEMM_DEFAULT_R 1024 +#define ZGEMM_DEFAULT_P 512 +#define ZGEMM_DEFAULT_R zgemm_r +#define XGEMM_DEFAULT_P 252 +#define XGEMM_DEFAULT_R xgemm_r +#define SGEMM_DEFAULT_Q 256 +#define DGEMM_DEFAULT_Q 256 +#define QGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 256 +#define ZGEMM_DEFAULT_Q 192 +#define XGEMM_DEFAULT_Q 128 + +#else + +#define SGEMM_DEFAULT_P 768 +#define DGEMM_DEFAULT_P 512 +#define CGEMM_DEFAULT_P 384 +#define ZGEMM_DEFAULT_P 256 + +#ifdef WINDOWS_ABI +#define SGEMM_DEFAULT_Q 320 +#define DGEMM_DEFAULT_Q 128 +#else +#define SGEMM_DEFAULT_Q 384 +#define DGEMM_DEFAULT_Q 256 +#endif +#define CGEMM_DEFAULT_Q 192 +#define ZGEMM_DEFAULT_Q 128 + +#define SGEMM_DEFAULT_R sgemm_r +#define DGEMM_DEFAULT_R 13824 +#define CGEMM_DEFAULT_R cgemm_r +#define ZGEMM_DEFAULT_R zgemm_r + +#define QGEMM_DEFAULT_Q 128 +#define QGEMM_DEFAULT_P 504 +#define QGEMM_DEFAULT_R qgemm_r +#define XGEMM_DEFAULT_P 252 +#define XGEMM_DEFAULT_R xgemm_r +#define XGEMM_DEFAULT_Q 128 + +#define CGEMM3M_DEFAULT_UNROLL_N 8 +#define CGEMM3M_DEFAULT_UNROLL_M 4 +#define ZGEMM3M_DEFAULT_UNROLL_N 8 +#define ZGEMM3M_DEFAULT_UNROLL_M 2 + +#define CGEMM3M_DEFAULT_P 448 +#define ZGEMM3M_DEFAULT_P 224 +#define XGEMM3M_DEFAULT_P 112 +#define CGEMM3M_DEFAULT_Q 224 +#define ZGEMM3M_DEFAULT_Q 224 +#define XGEMM3M_DEFAULT_Q 224 +#define CGEMM3M_DEFAULT_R 12288 +#define ZGEMM3M_DEFAULT_R 12288 +#define XGEMM3M_DEFAULT_R 12288 + +#endif + + +#endif + +#ifdef SKYLAKEX + +#define SNUMOPT 16 +#define DNUMOPT 8 + +#define GEMM_DEFAULT_OFFSET_A 0 +#define GEMM_DEFAULT_OFFSET_B 0 +#define GEMM_DEFAULT_ALIGN 0x03fffUL + +#define SYMV_P 8 + +#define SWITCH_RATIO 32 +#define GEMM_PREFERED_SIZE 32 +#define USE_SGEMM_KERNEL_DIRECT 1 #ifdef ARCH_X86 @@ -2291,7 +2413,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #endif -#if defined(P5600) || defined(I6400) || defined(P6600) || defined(I6500) +#if defined(P5600) || defined(MIPS1004K) || defined(I6400) || defined(P6600) || defined(I6500) #define SNUMOPT 2 #define DNUMOPT 2 @@ -2423,8 +2545,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SYMV_P 16 #endif +// Common ARMv8 parameters +#if defined(ARMV8) -#if defined(CORTEXA57) #define SNUMOPT 2 #define DNUMOPT 2 @@ -2432,6 +2555,44 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define GEMM_DEFAULT_OFFSET_B 0 #define GEMM_DEFAULT_ALIGN 0x03fffUL +#define SYMV_P 16 + +// Darwin / Cross +#if defined(OS_DARWIN) && defined(CROSS) + +#define SGEMM_DEFAULT_UNROLL_M 2 +#define SGEMM_DEFAULT_UNROLL_N 2 + +#define DGEMM_DEFAULT_UNROLL_M 2 +#define DGEMM_DEFAULT_UNROLL_N 2 + +#define CGEMM_DEFAULT_UNROLL_M 2 +#define CGEMM_DEFAULT_UNROLL_N 2 + +#define ZGEMM_DEFAULT_UNROLL_M 2 +#define ZGEMM_DEFAULT_UNROLL_N 2 + +#define SGEMM_DEFAULT_P 128 +#define DGEMM_DEFAULT_P 128 +#define CGEMM_DEFAULT_P 96 +#define ZGEMM_DEFAULT_P 64 + +#define SGEMM_DEFAULT_Q 240 +#define DGEMM_DEFAULT_Q 120 +#define CGEMM_DEFAULT_Q 120 +#define ZGEMM_DEFAULT_Q 120 + +#define SGEMM_DEFAULT_R 12288 +#define DGEMM_DEFAULT_R 8192 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 + +#else // Linux / Native + +#if defined(CORTEXA53) || defined(CORTEXA57) || \ + defined(CORTEXA72) || defined(CORTEXA73) || \ + defined(FALKOR) + #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4 @@ -2459,17 +2620,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_R 4096 #define ZGEMM_DEFAULT_R 2048 - -#define SYMV_P 16 -#endif - -#if defined(ARMV8) -#define SNUMOPT 2 -#define DNUMOPT 2 - -#define GEMM_DEFAULT_OFFSET_A 0 -#define GEMM_DEFAULT_OFFSET_B 0 -#define GEMM_DEFAULT_ALIGN 0x03fffUL +#elif defined(THUNDERX) #define SGEMM_DEFAULT_UNROLL_M 4 #define SGEMM_DEFAULT_UNROLL_N 4 @@ -2498,56 +2649,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define CGEMM_DEFAULT_R 4096 #define ZGEMM_DEFAULT_R 4096 - -#define SYMV_P 16 -#endif - -#if defined(THUNDERX) -#define SNUMOPT 2 -#define DNUMOPT 2 - -#define GEMM_DEFAULT_OFFSET_A 0 -#define GEMM_DEFAULT_OFFSET_B 0 -#define GEMM_DEFAULT_ALIGN 0x03fffUL - -#define SGEMM_DEFAULT_UNROLL_M 4 -#define SGEMM_DEFAULT_UNROLL_N 4 - -#define DGEMM_DEFAULT_UNROLL_M 2 -#define DGEMM_DEFAULT_UNROLL_N 2 - -#define CGEMM_DEFAULT_UNROLL_M 2 -#define CGEMM_DEFAULT_UNROLL_N 2 - -#define ZGEMM_DEFAULT_UNROLL_M 2 -#define ZGEMM_DEFAULT_UNROLL_N 2 - -#define SGEMM_DEFAULT_P 128 -#define DGEMM_DEFAULT_P 128 -#define CGEMM_DEFAULT_P 96 -#define ZGEMM_DEFAULT_P 64 - -#define SGEMM_DEFAULT_Q 240 -#define DGEMM_DEFAULT_Q 120 -#define CGEMM_DEFAULT_Q 120 -#define ZGEMM_DEFAULT_Q 120 - -#define SGEMM_DEFAULT_R 12288 -#define DGEMM_DEFAULT_R 8192 -#define CGEMM_DEFAULT_R 4096 -#define ZGEMM_DEFAULT_R 4096 - - -#define SYMV_P 16 -#endif - -#if defined(THUNDERX2T99) || defined(VULCAN) -#define SNUMOPT 2 -#define DNUMOPT 2 - -#define GEMM_DEFAULT_OFFSET_A 0 -#define GEMM_DEFAULT_OFFSET_B 0 -#define GEMM_DEFAULT_ALIGN 0x03fffUL +#elif defined(THUNDERX2T99) #define SGEMM_DEFAULT_UNROLL_M 16 #define SGEMM_DEFAULT_UNROLL_N 4 @@ -2561,23 +2663,55 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define ZGEMM_DEFAULT_UNROLL_M 4 #define ZGEMM_DEFAULT_UNROLL_N 4 -#define SGEMM_DEFAULT_P sgemm_p -#define DGEMM_DEFAULT_P dgemm_p -#define CGEMM_DEFAULT_P cgemm_p -#define ZGEMM_DEFAULT_P zgemm_p +#define SGEMM_DEFAULT_P 128 +#define DGEMM_DEFAULT_P 160 +#define CGEMM_DEFAULT_P 128 +#define ZGEMM_DEFAULT_P 128 -#define SGEMM_DEFAULT_Q sgemm_q -#define DGEMM_DEFAULT_Q dgemm_q -#define CGEMM_DEFAULT_Q cgemm_q -#define ZGEMM_DEFAULT_Q zgemm_q +#define SGEMM_DEFAULT_Q 352 +#define DGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 224 +#define ZGEMM_DEFAULT_Q 112 -#define SGEMM_DEFAULT_R sgemm_r -#define DGEMM_DEFAULT_R dgemm_r -#define CGEMM_DEFAULT_R cgemm_r -#define ZGEMM_DEFAULT_R zgemm_r +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 -#define SYMV_P 16 -#endif +#else // Other/undetected ARMv8 cores + +#define SGEMM_DEFAULT_UNROLL_M 16 +#define SGEMM_DEFAULT_UNROLL_N 4 + +#define DGEMM_DEFAULT_UNROLL_M 8 +#define DGEMM_DEFAULT_UNROLL_N 4 + +#define CGEMM_DEFAULT_UNROLL_M 8 +#define CGEMM_DEFAULT_UNROLL_N 4 + +#define ZGEMM_DEFAULT_UNROLL_M 4 +#define ZGEMM_DEFAULT_UNROLL_N 4 + +#define SGEMM_DEFAULT_P 128 +#define DGEMM_DEFAULT_P 160 +#define CGEMM_DEFAULT_P 128 +#define ZGEMM_DEFAULT_P 128 + +#define SGEMM_DEFAULT_Q 352 +#define DGEMM_DEFAULT_Q 128 +#define CGEMM_DEFAULT_Q 224 +#define ZGEMM_DEFAULT_Q 112 + +#define SGEMM_DEFAULT_R 4096 +#define DGEMM_DEFAULT_R 4096 +#define CGEMM_DEFAULT_R 4096 +#define ZGEMM_DEFAULT_R 4096 + +#endif // Cores + +#endif // Linux / Darwin + +#endif // ARMv8 #if defined(ARMV5) #define SNUMOPT 2 diff --git a/test/Makefile b/test/Makefile index 65fb6f438..074411b05 100644 --- a/test/Makefile +++ b/test/Makefile @@ -122,8 +122,13 @@ endif FLDFLAGS = $(FFLAGS:-fPIC=) $(LDFLAGS) -CEXTRALIB = - +ifeq ($(USE_OPENMP), 1) +ifeq ($(F_COMPILER), GFORTRAN) +ifeq ($(C_COMPILER), CLANG) +CEXTRALIB = -lomp +endif +endif +endif sblat1 : sblat1.$(SUFFIX) ../$(LIBNAME) $(FC) $(FLDFLAGS) -o sblat1 sblat1.$(SUFFIX) ../$(LIBNAME) $(EXTRALIB) $(CEXTRALIB) diff --git a/test/dblat1.f b/test/dblat1.f index 5a45d69f4..f3255fef4 100644 --- a/test/dblat1.f +++ b/test/dblat1.f @@ -1,12 +1,54 @@ +*> \brief \b DBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM DBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the DOUBLE PRECISION Level 1 BLAS. +*> +*> Based upon the original BLAS test routine together with: +*> F06EAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup double_blas_testing +* +* ===================================================================== PROGRAM DBLAT1 -* Test program for the DOUBLE PRECISION Level 1 BLAS. -* Based upon the original BLAS test routine together with: -* F06EAF Example Program Text +* +* -- Reference BLAS test routine (version 3.8.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* * .. Parameters .. INTEGER NOUT PARAMETER (NOUT=6) * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. DOUBLE PRECISION SFAC @@ -14,31 +56,30 @@ * .. External Subroutines .. EXTERNAL CHECK0, CHECK1, CHECK2, CHECK3, HEADER * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA SFAC/9.765625D-4/ * .. Executable Statements .. WRITE (NOUT,99999) - DO 20 IC = 1, 10 + DO 20 IC = 1, 13 ICASE = IC CALL HEADER * -* .. Initialize PASS, INCX, INCY, and MODE for a new case. .. -* .. the value 9999 for INCX, INCY or MODE will appear in the .. +* .. Initialize PASS, INCX, and INCY for a new case. .. +* .. the value 9999 for INCX or INCY will appear in the .. * .. detailed output, if any, for cases that do not involve .. * .. these parameters .. * PASS = .TRUE. INCX = 9999 INCY = 9999 - MODE = 9999 - IF (ICASE.EQ.3) THEN + IF (ICASE.EQ.3 .OR. ICASE.EQ.11) THEN CALL CHECK0(SFAC) ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR. + ICASE.EQ.10) THEN CALL CHECK1(SFAC) ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR. - + ICASE.EQ.6) THEN + + ICASE.EQ.6 .OR. ICASE.EQ.12 .OR. ICASE.EQ.13) THEN CALL CHECK2(SFAC) ELSE IF (ICASE.EQ.4) THEN CALL CHECK3(SFAC) @@ -56,12 +97,12 @@ INTEGER NOUT PARAMETER (NOUT=6) * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Arrays .. - CHARACTER*6 L(10) + CHARACTER*6 L(13) * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA L(1)/' DDOT '/ DATA L(2)/'DAXPY '/ @@ -73,6 +114,9 @@ DATA L(8)/'DASUM '/ DATA L(9)/'DSCAL '/ DATA L(10)/'IDAMAX'/ + DATA L(11)/'DROTMG'/ + DATA L(12)/'DROTM '/ + DATA L(13)/'DSDOT '/ * .. Executable Statements .. WRITE (NOUT,99999) ICASE, L(ICASE) RETURN @@ -86,18 +130,18 @@ * .. Scalar Arguments .. DOUBLE PRECISION SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. - DOUBLE PRECISION D12, SA, SB, SC, SS - INTEGER K + DOUBLE PRECISION SA, SB, SC, SS, D12 + INTEGER I, K * .. Local Arrays .. DOUBLE PRECISION DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8), - + DS1(8) + $ DS1(8), DAB(4,9), DTEMP(9), DTRUE(9,9) * .. External Subroutines .. - EXTERNAL DROTG, STEST1 + EXTERNAL DROTG, DROTMG, STEST, STEST1 * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA DA1/0.3D0, 0.4D0, -0.3D0, -0.4D0, -0.3D0, 0.0D0, + 0.0D0, 1.0D0/ @@ -111,7 +155,52 @@ + 0.0D0, 1.0D0, 1.0D0/ DATA DBTRUE/0.0D0, 0.6D0, 0.0D0, -0.6D0, 0.0D0, + 0.0D0, 1.0D0, 0.0D0/ - DATA D12/4096.0D0/ +* INPUT FOR MODIFIED GIVENS + DATA DAB/ .1D0,.3D0,1.2D0,.2D0, + A .7D0, .2D0, .6D0, 4.2D0, + B 0.D0,0.D0,0.D0,0.D0, + C 4.D0, -1.D0, 2.D0, 4.D0, + D 6.D-10, 2.D-2, 1.D5, 10.D0, + E 4.D10, 2.D-2, 1.D-5, 10.D0, + F 2.D-10, 4.D-2, 1.D5, 10.D0, + G 2.D10, 4.D-2, 1.D-5, 10.D0, + H 4.D0, -2.D0, 8.D0, 4.D0 / +* TRUE RESULTS FOR MODIFIED GIVENS + DATA DTRUE/0.D0,0.D0, 1.3D0, .2D0, 0.D0,0.D0,0.D0, .5D0, 0.D0, + A 0.D0,0.D0, 4.5D0, 4.2D0, 1.D0, .5D0, 0.D0,0.D0,0.D0, + B 0.D0,0.D0,0.D0,0.D0, -2.D0, 0.D0,0.D0,0.D0,0.D0, + C 0.D0,0.D0,0.D0, 4.D0, -1.D0, 0.D0,0.D0,0.D0,0.D0, + D 0.D0, 15.D-3, 0.D0, 10.D0, -1.D0, 0.D0, -1.D-4, + E 0.D0, 1.D0, + F 0.D0,0.D0, 6144.D-5, 10.D0, -1.D0, 4096.D0, -1.D6, + G 0.D0, 1.D0, + H 0.D0,0.D0,15.D0,10.D0,-1.D0, 5.D-5, 0.D0,1.D0,0.D0, + I 0.D0,0.D0, 15.D0, 10.D0, -1. D0, 5.D5, -4096.D0, + J 1.D0, 4096.D-6, + K 0.D0,0.D0, 7.D0, 4.D0, 0.D0,0.D0, -.5D0, -.25D0, 0.D0/ +* 4096 = 2 ** 12 + DATA D12 /4096.D0/ + DTRUE(1,1) = 12.D0 / 130.D0 + DTRUE(2,1) = 36.D0 / 130.D0 + DTRUE(7,1) = -1.D0 / 6.D0 + DTRUE(1,2) = 14.D0 / 75.D0 + DTRUE(2,2) = 49.D0 / 75.D0 + DTRUE(9,2) = 1.D0 / 7.D0 + DTRUE(1,5) = 45.D-11 * (D12 * D12) + DTRUE(3,5) = 4.D5 / (3.D0 * D12) + DTRUE(6,5) = 1.D0 / D12 + DTRUE(8,5) = 1.D4 / (3.D0 * D12) + DTRUE(1,6) = 4.D10 / (1.5D0 * D12 * D12) + DTRUE(2,6) = 2.D-2 / 1.5D0 + DTRUE(8,6) = 5.D-7 * D12 + DTRUE(1,7) = 4.D0 / 150.D0 + DTRUE(2,7) = (2.D-10 / 1.5D0) * (D12 * D12) + DTRUE(7,7) = -DTRUE(6,5) + DTRUE(9,7) = 1.D4 / D12 + DTRUE(1,8) = DTRUE(1,7) + DTRUE(2,8) = 2.D10 / (1.5D0 * D12 * D12) + DTRUE(1,9) = 32.D0 / 7.D0 + DTRUE(2,9) = -16.D0 / 7.D0 * .. Executable Statements .. * * Compute true values which cannot be prestored @@ -134,6 +223,15 @@ CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC) CALL STEST1(SC,DC1(K),DC1(K),SFAC) CALL STEST1(SS,DS1(K),DS1(K),SFAC) + ELSEIF (ICASE.EQ.11) THEN +* .. DROTMG .. + DO I=1,4 + DTEMP(I)= DAB(I,K) + DTEMP(I+4) = 0.0 + END DO + DTEMP(9) = 0.0 + CALL DROTMG(DTEMP(1),DTEMP(2),DTEMP(3),DTEMP(4),DTEMP(5)) + CALL STEST(9,DTEMP,DTRUE(1,K),DTRUE(1,K),SFAC) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK0' STOP @@ -148,7 +246,7 @@ * .. Scalar Arguments .. DOUBLE PRECISION SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. INTEGER I, LEN, NP1 @@ -165,7 +263,7 @@ * .. Intrinsic Functions .. INTRINSIC MAX * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA SA/0.3D0, -1.0D0, 0.0D0, 1.0D0, 0.3D0, 0.3D0, + 0.3D0, 0.3D0, 0.3D0, 0.3D0/ @@ -212,11 +310,11 @@ IF (ICASE.EQ.7) THEN * .. DNRM2 .. STEMP(1) = DTRUE1(NP1) - CALL STEST1(DNRM2(N,SX,INCX),STEMP,STEMP,SFAC) + CALL STEST1(DNRM2(N,SX,INCX),STEMP(1),STEMP,SFAC) ELSE IF (ICASE.EQ.8) THEN * .. DASUM .. STEMP(1) = DTRUE3(NP1) - CALL STEST1(DASUM(N,SX,INCX),STEMP,STEMP,SFAC) + CALL STEST1(DASUM(N,SX,INCX),STEMP(1),STEMP,SFAC) ELSE IF (ICASE.EQ.9) THEN * .. DSCAL .. CALL DSCAL(N,SA((INCX-1)*5+NP1),SX,INCX) @@ -242,27 +340,40 @@ * .. Scalar Arguments .. DOUBLE PRECISION SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. - DOUBLE PRECISION SA, SC, SS - INTEGER I, J, KI, KN, KSIZE, LENX, LENY, MX, MY + DOUBLE PRECISION SA + INTEGER I, J, KI, KN, KNI, KPAR, KSIZE, LENX, LENY, + $ MX, MY * .. Local Arrays .. DOUBLE PRECISION DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4), - + DT8(7,4,4), DT9X(7,4,4), DT9Y(7,4,4), DX1(7), - + DY1(7), SSIZE1(4), SSIZE2(14,2), STX(7), STY(7), - + SX(7), SY(7) + $ DT8(7,4,4), DX1(7), + $ DY1(7), SSIZE1(4), SSIZE2(14,2), SSIZE(7), + $ STX(7), STY(7), SX(7), SY(7), + $ DPAR(5,4), DT19X(7,4,16),DT19XA(7,4,4), + $ DT19XB(7,4,4), DT19XC(7,4,4),DT19XD(7,4,4), + $ DT19Y(7,4,16), DT19YA(7,4,4),DT19YB(7,4,4), + $ DT19YC(7,4,4), DT19YD(7,4,4), DTEMP(5) INTEGER INCXS(4), INCYS(4), LENS(4,2), NS(4) * .. External Functions .. - DOUBLE PRECISION DDOT - EXTERNAL DDOT + DOUBLE PRECISION DDOT, DSDOT + EXTERNAL DDOT, DSDOT * .. External Subroutines .. - EXTERNAL DAXPY, DCOPY, DSWAP, STEST, STEST1 + EXTERNAL DAXPY, DCOPY, DROTM, DSWAP, STEST, STEST1, + $ TESTDSDOT * .. Intrinsic Functions .. INTRINSIC ABS, MIN * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. + EQUIVALENCE (DT19X(1,1,1),DT19XA(1,1,1)),(DT19X(1,1,5), + A DT19XB(1,1,1)),(DT19X(1,1,9),DT19XC(1,1,1)), + B (DT19X(1,1,13),DT19XD(1,1,1)) + EQUIVALENCE (DT19Y(1,1,1),DT19YA(1,1,1)),(DT19Y(1,1,5), + A DT19YB(1,1,1)),(DT19Y(1,1,9),DT19YC(1,1,1)), + B (DT19Y(1,1,13),DT19YD(1,1,1)) + DATA SA/0.3D0/ DATA INCXS/1, 2, -2, -1/ DATA INCYS/1, -2, 1, -2/ @@ -272,7 +383,6 @@ + -0.4D0/ DATA DY1/0.5D0, -0.9D0, 0.3D0, 0.7D0, -0.6D0, 0.2D0, + 0.8D0/ - DATA SC, SS/0.8D0, 0.6D0/ DATA DT7/0.0D0, 0.30D0, 0.21D0, 0.62D0, 0.0D0, + 0.30D0, -0.07D0, 0.85D0, 0.0D0, 0.30D0, -0.79D0, + -0.74D0, 0.0D0, 0.30D0, 0.33D0, 1.27D0/ @@ -295,44 +405,6 @@ + 0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.0D0, 0.0D0, + 0.0D0, 0.0D0, 0.68D0, -0.9D0, 0.33D0, 0.7D0, + -0.75D0, 0.2D0, 1.04D0/ - DATA DT9X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.78D0, -0.46D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.78D0, -0.46D0, -0.22D0, - + 1.06D0, 0.0D0, 0.0D0, 0.0D0, 0.6D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.78D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.66D0, 0.1D0, -0.1D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.96D0, 0.1D0, -0.76D0, 0.8D0, 0.90D0, - + -0.3D0, -0.02D0, 0.6D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.78D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, -0.06D0, 0.1D0, - + -0.1D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.90D0, - + 0.1D0, -0.22D0, 0.8D0, 0.18D0, -0.3D0, -0.02D0, - + 0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.78D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.78D0, 0.26D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.78D0, 0.26D0, -0.76D0, 1.12D0, - + 0.0D0, 0.0D0, 0.0D0/ - DATA DT9Y/0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.04D0, -0.78D0, 0.54D0, - + 0.08D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.04D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.7D0, - + -0.9D0, -0.12D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.64D0, -0.9D0, -0.30D0, 0.7D0, -0.18D0, 0.2D0, - + 0.28D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.04D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.7D0, -1.08D0, 0.0D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.64D0, -1.26D0, - + 0.54D0, 0.20D0, 0.0D0, 0.0D0, 0.0D0, 0.5D0, - + 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.04D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, - + 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.0D0, 0.0D0, - + 0.0D0, 0.0D0, 0.04D0, -0.9D0, 0.18D0, 0.7D0, - + -0.18D0, 0.2D0, 0.16D0/ DATA DT10X/0.6D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + 0.0D0, 0.5D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, 0.0D0, + 0.0D0, 0.5D0, -0.9D0, 0.0D0, 0.0D0, 0.0D0, @@ -375,6 +447,150 @@ + 0.0D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, + 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, 1.17D0, + 1.17D0, 1.17D0, 1.17D0/ +* +* FOR DROTM +* + DATA DPAR/-2.D0, 0.D0,0.D0,0.D0,0.D0, + A -1.D0, 2.D0, -3.D0, -4.D0, 5.D0, + B 0.D0, 0.D0, 2.D0, -3.D0, 0.D0, + C 1.D0, 5.D0, 2.D0, 0.D0, -4.D0/ +* TRUE X RESULTS F0R ROTATIONS DROTM + DATA DT19XA/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I -.8D0, 3.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J -.9D0, 2.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K 3.5D0, -.4D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, 0.D0,0.D0,0.D0, + M -.8D0, 3.8D0, -2.2D0, -1.2D0, 0.D0,0.D0,0.D0, + N -.9D0, 2.8D0, -1.4D0, -1.3D0, 0.D0,0.D0,0.D0, + O 3.5D0, -.4D0, -2.2D0, 4.7D0, 0.D0,0.D0,0.D0/ +* + DATA DT19XB/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, -.5D0, 0.D0,0.D0,0.D0,0.D0, + I 0.D0, .1D0, -3.0D0, 0.D0,0.D0,0.D0,0.D0, + J -.3D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + K 3.3D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, .9D0, -.3D0, -.4D0, + M -2.0D0, .1D0, 1.4D0, .8D0, .6D0, -.3D0, -2.8D0, + N -1.8D0, .1D0, 1.3D0, .8D0, 0.D0, -.3D0, -1.9D0, + O 3.8D0, .1D0, -3.1D0, .8D0, 4.8D0, -.3D0, -1.5D0 / +* + DATA DT19XC/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, -.5D0, 0.D0,0.D0,0.D0,0.D0, + I 4.8D0, .1D0, -3.0D0, 0.D0,0.D0,0.D0,0.D0, + J 3.3D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + K 2.1D0, .1D0, -2.0D0, 0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, .9D0, -.3D0, -.4D0, + M -1.6D0, .1D0, -2.2D0, .8D0, 5.4D0, -.3D0, -2.8D0, + N -1.5D0, .1D0, -1.4D0, .8D0, 3.6D0, -.3D0, -1.9D0, + O 3.7D0, .1D0, -2.2D0, .8D0, 3.6D0, -.3D0, -1.5D0 / +* + DATA DT19XD/.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .6D0, .1D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I -.8D0, -1.0D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J -.9D0, -.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K 3.5D0, .8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .6D0, .1D0, -.5D0, .8D0, 0.D0,0.D0,0.D0, + M -.8D0, -1.0D0, 1.4D0, -1.6D0, 0.D0,0.D0,0.D0, + N -.9D0, -.8D0, 1.3D0, -1.6D0, 0.D0,0.D0,0.D0, + O 3.5D0, .8D0, -3.1D0, 4.8D0, 0.D0,0.D0,0.D0/ +* TRUE Y RESULTS FOR ROTATIONS DROTM + DATA DT19YA/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I .7D0, -4.8D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J 1.7D0, -.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K -2.6D0, 3.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, 0.D0,0.D0,0.D0, + M .7D0, -4.8D0, 3.0D0, 1.1D0, 0.D0,0.D0,0.D0, + N 1.7D0, -.7D0, -.7D0, 2.3D0, 0.D0,0.D0,0.D0, + O -2.6D0, 3.5D0, -.7D0, -3.6D0, 0.D0,0.D0,0.D0/ +* + DATA DT19YB/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, .3D0, 0.D0,0.D0,0.D0,0.D0, + I 4.0D0, -.9D0, -.3D0, 0.D0,0.D0,0.D0,0.D0, + J -.5D0, -.9D0, 1.5D0, 0.D0,0.D0,0.D0,0.D0, + K -1.5D0, -.9D0, -1.8D0, 0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, -.6D0, .2D0, .8D0, + M 3.7D0, -.9D0, -1.2D0, .7D0, -1.5D0, .2D0, 2.2D0, + N -.3D0, -.9D0, 2.1D0, .7D0, -1.6D0, .2D0, 2.0D0, + O -1.6D0, -.9D0, -2.1D0, .7D0, 2.9D0, .2D0, -3.8D0 / +* + DATA DT19YC/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + I 4.0D0, -6.3D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + J -.5D0, .3D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + K -1.5D0, 3.0D0, 0.D0,0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, 0.D0,0.D0,0.D0, + M 3.7D0, -7.2D0, 3.0D0, 1.7D0, 0.D0,0.D0,0.D0, + N -.3D0, .9D0, -.7D0, 1.9D0, 0.D0,0.D0,0.D0, + O -1.6D0, 2.7D0, -.7D0, -3.4D0, 0.D0,0.D0,0.D0/ +* + DATA DT19YD/.5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + A .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + B .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + C .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + D .5D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + E .7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + F 1.7D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + G -2.6D0, 0.D0,0.D0,0.D0,0.D0,0.D0,0.D0, + H .5D0, -.9D0, .3D0, 0.D0,0.D0,0.D0,0.D0, + I .7D0, -.9D0, 1.2D0, 0.D0,0.D0,0.D0,0.D0, + J 1.7D0, -.9D0, .5D0, 0.D0,0.D0,0.D0,0.D0, + K -2.6D0, -.9D0, -1.3D0, 0.D0,0.D0,0.D0,0.D0, + L .5D0, -.9D0, .3D0, .7D0, -.6D0, .2D0, .8D0, + M .7D0, -.9D0, 1.2D0, .7D0, -1.5D0, .2D0, 1.6D0, + N 1.7D0, -.9D0, .5D0, .7D0, -1.6D0, .2D0, 2.4D0, + O -2.6D0, -.9D0, -1.3D0, .7D0, 2.9D0, .2D0, -4.0D0 / +* * .. Executable Statements .. * DO 120 KI = 1, 4 @@ -421,6 +637,39 @@ 80 CONTINUE CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0D0) CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0D0) + ELSE IF (ICASE.EQ.12) THEN +* .. DROTM .. + KNI=KN+4*(KI-1) + DO KPAR=1,4 + DO I=1,7 + SX(I) = DX1(I) + SY(I) = DY1(I) + STX(I)= DT19X(I,KPAR,KNI) + STY(I)= DT19Y(I,KPAR,KNI) + END DO +* + DO I=1,5 + DTEMP(I) = DPAR(I,KPAR) + END DO +* + DO I=1,LENX + SSIZE(I)=STX(I) + END DO +* SEE REMARK ABOVE ABOUT DT11X(1,2,7) +* AND DT11X(5,3,8). + IF ((KPAR .EQ. 2) .AND. (KNI .EQ. 7)) + $ SSIZE(1) = 2.4D0 + IF ((KPAR .EQ. 3) .AND. (KNI .EQ. 8)) + $ SSIZE(5) = 1.8D0 +* + CALL DROTM(N,SX,INCX,SY,INCY,DTEMP) + CALL STEST(LENX,SX,STX,SSIZE,SFAC) + CALL STEST(LENY,SY,STY,STY,SFAC) + END DO + ELSE IF (ICASE.EQ.13) THEN +* .. DSDOT .. + CALL TESTDSDOT(REAL(DSDOT(N,REAL(SX),INCX,REAL(SY),INCY)), + $ REAL(DT7(KN,KI)),REAL(SSIZE1(KN)), .3125E-1) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' STOP @@ -436,10 +685,10 @@ * .. Scalar Arguments .. DOUBLE PRECISION SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. - DOUBLE PRECISION SA, SC, SS + DOUBLE PRECISION SC, SS INTEGER I, K, KI, KN, KSIZE, LENX, LENY, MX, MY * .. Local Arrays .. DOUBLE PRECISION COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4), @@ -454,9 +703,8 @@ * .. Intrinsic Functions .. INTRINSIC ABS, MIN * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. - DATA SA/0.3D0/ DATA INCXS/1, 2, -2, -1/ DATA INCYS/1, -2, 1, -2/ DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ @@ -647,14 +895,15 @@ * * .. Parameters .. INTEGER NOUT - PARAMETER (NOUT=6) + DOUBLE PRECISION ZERO + PARAMETER (NOUT=6, ZERO=0.0D0) * .. Scalar Arguments .. DOUBLE PRECISION SFAC INTEGER LEN * .. Array Arguments .. DOUBLE PRECISION SCOMP(LEN), SSIZE(LEN), STRUE(LEN) * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. DOUBLE PRECISION SD @@ -665,12 +914,12 @@ * .. Intrinsic Functions .. INTRINSIC ABS * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Executable Statements .. * DO 40 I = 1, LEN SD = SCOMP(I) - STRUE(I) - IF (SDIFF(ABS(SSIZE(I))+ABS(SFAC*SD),ABS(SSIZE(I))).EQ.0.0D0) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + GO TO 40 * * HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). @@ -680,16 +929,64 @@ PASS = .FALSE. WRITE (NOUT,99999) WRITE (NOUT,99998) - 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, I, SCOMP(I), + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, I, SCOMP(I), + STRUE(I), SD, SSIZE(I) 40 CONTINUE RETURN * 99999 FORMAT (' FAIL') -99998 FORMAT (/' CASE N INCX INCY MODE I ', +99998 FORMAT (/' CASE N INCX INCY I ', + ' COMP(I) TRUE(I) DIFFERENCE', + ' SIZE(I)',/1X) -99997 FORMAT (1X,I4,I3,3I5,I3,2D36.8,2D12.4) +99997 FORMAT (1X,I4,I3,2I5,I3,2D36.8,2D12.4) + END + SUBROUTINE TESTDSDOT(SCOMP,STRUE,SSIZE,SFAC) +* ********************************* STEST ************************** +* +* THIS SUBR COMPARES ARRAYS SCOMP() AND STRUE() OF LENGTH LEN TO +* SEE IF THE TERM BY TERM DIFFERENCES, MULTIPLIED BY SFAC, ARE +* NEGLIGIBLE. +* +* C. L. LAWSON, JPL, 1974 DEC 10 +* +* .. Parameters .. + INTEGER NOUT + REAL ZERO + PARAMETER (NOUT=6, ZERO=0.0E0) +* .. Scalar Arguments .. + REAL SFAC, SCOMP, SSIZE, STRUE +* .. Scalars in Common .. + INTEGER ICASE, INCX, INCY, N + LOGICAL PASS +* .. Local Scalars .. + REAL SD +* .. Intrinsic Functions .. + INTRINSIC ABS +* .. Common blocks .. + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS +* .. Executable Statements .. +* + SD = SCOMP - STRUE + IF (ABS(SFAC*SD) .LE. ABS(SSIZE) * EPSILON(ZERO)) + + GO TO 40 +* +* HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). +* + IF ( .NOT. PASS) GO TO 20 +* PRINT FAIL MESSAGE AND HEADER. + PASS = .FALSE. + WRITE (NOUT,99999) + WRITE (NOUT,99998) + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, SCOMP, + + STRUE, SD, SSIZE + 40 CONTINUE + RETURN +* +99999 FORMAT (' FAIL') +99998 FORMAT (/' CASE N INCX INCY ', + + ' COMP(I) TRUE(I) DIFFERENCE', + + ' SIZE(I)',/1X) +99997 FORMAT (1X,I4,I3,1I5,I3,2E36.8,2E12.4) END SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** @@ -739,12 +1036,12 @@ * .. Scalar Arguments .. INTEGER ICOMP, ITRUE * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. INTEGER ID * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Executable Statements .. * IF (ICOMP.EQ.ITRUE) GO TO 40 @@ -757,13 +1054,13 @@ WRITE (NOUT,99999) WRITE (NOUT,99998) 20 ID = ICOMP - ITRUE - WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, ICOMP, ITRUE, ID + WRITE (NOUT,99997) ICASE, N, INCX, INCY, ICOMP, ITRUE, ID 40 CONTINUE RETURN * 99999 FORMAT (' FAIL') -99998 FORMAT (/' CASE N INCX INCY MODE ', +99998 FORMAT (/' CASE N INCX INCY ', + ' COMP TRUE DIFFERENCE', + /1X) -99997 FORMAT (1X,I4,I3,3I5,2I36,I12) +99997 FORMAT (1X,I4,I3,2I5,2I36,I12) END diff --git a/test/sblat1.f b/test/sblat1.f index a982d1852..a5c1c6af6 100644 --- a/test/sblat1.f +++ b/test/sblat1.f @@ -1,12 +1,54 @@ +*> \brief \b SBLAT1 +* +* =========== DOCUMENTATION =========== +* +* Online html documentation available at +* http://www.netlib.org/lapack/explore-html/ +* +* Definition: +* =========== +* +* PROGRAM SBLAT1 +* +* +*> \par Purpose: +* ============= +*> +*> \verbatim +*> +*> Test program for the REAL Level 1 BLAS. +*> +*> Based upon the original BLAS test routine together with: +*> F06EAF Example Program Text +*> \endverbatim +* +* Authors: +* ======== +* +*> \author Univ. of Tennessee +*> \author Univ. of California Berkeley +*> \author Univ. of Colorado Denver +*> \author NAG Ltd. +* +*> \date April 2012 +* +*> \ingroup single_blas_testing +* +* ===================================================================== PROGRAM SBLAT1 -* Test program for the REAL Level 1 BLAS. -* Based upon the original BLAS test routine together with: -* F06EAF Example Program Text +* +* -- Reference BLAS test routine (version 3.8.0) -- +* -- Reference BLAS is a software package provided by Univ. of Tennessee, -- +* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- +* April 2012 +* +* ===================================================================== +* * .. Parameters .. INTEGER NOUT PARAMETER (NOUT=6) * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. REAL SFAC @@ -14,31 +56,30 @@ * .. External Subroutines .. EXTERNAL CHECK0, CHECK1, CHECK2, CHECK3, HEADER * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA SFAC/9.765625E-4/ * .. Executable Statements .. WRITE (NOUT,99999) - DO 20 IC = 1, 10 + DO 20 IC = 1, 13 ICASE = IC CALL HEADER * -* .. Initialize PASS, INCX, INCY, and MODE for a new case. .. -* .. the value 9999 for INCX, INCY or MODE will appear in the .. +* .. Initialize PASS, INCX, and INCY for a new case. .. +* .. the value 9999 for INCX or INCY will appear in the .. * .. detailed output, if any, for cases that do not involve .. * .. these parameters .. * PASS = .TRUE. INCX = 9999 INCY = 9999 - MODE = 9999 - IF (ICASE.EQ.3) THEN + IF (ICASE.EQ.3 .OR. ICASE.EQ.11) THEN CALL CHECK0(SFAC) ELSE IF (ICASE.EQ.7 .OR. ICASE.EQ.8 .OR. ICASE.EQ.9 .OR. + ICASE.EQ.10) THEN CALL CHECK1(SFAC) ELSE IF (ICASE.EQ.1 .OR. ICASE.EQ.2 .OR. ICASE.EQ.5 .OR. - + ICASE.EQ.6) THEN + + ICASE.EQ.6 .OR. ICASE.EQ.12 .OR. ICASE.EQ.13) THEN CALL CHECK2(SFAC) ELSE IF (ICASE.EQ.4) THEN CALL CHECK3(SFAC) @@ -56,12 +97,12 @@ INTEGER NOUT PARAMETER (NOUT=6) * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Arrays .. - CHARACTER*6 L(10) + CHARACTER*6 L(13) * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA L(1)/' SDOT '/ DATA L(2)/'SAXPY '/ @@ -73,6 +114,9 @@ DATA L(8)/'SASUM '/ DATA L(9)/'SSCAL '/ DATA L(10)/'ISAMAX'/ + DATA L(11)/'SROTMG'/ + DATA L(12)/'SROTM '/ + DATA L(13)/'SDSDOT'/ * .. Executable Statements .. WRITE (NOUT,99999) ICASE, L(ICASE) RETURN @@ -86,18 +130,18 @@ * .. Scalar Arguments .. REAL SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. REAL D12, SA, SB, SC, SS - INTEGER K + INTEGER I, K * .. Local Arrays .. REAL DA1(8), DATRUE(8), DB1(8), DBTRUE(8), DC1(8), - + DS1(8) + + DS1(8), DAB(4,9), DTEMP(9), DTRUE(9,9) * .. External Subroutines .. - EXTERNAL SROTG, STEST1 + EXTERNAL SROTG, SROTMG, STEST, STEST1 * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA DA1/0.3E0, 0.4E0, -0.3E0, -0.4E0, -0.3E0, 0.0E0, + 0.0E0, 1.0E0/ @@ -111,7 +155,52 @@ + 0.0E0, 1.0E0, 1.0E0/ DATA DBTRUE/0.0E0, 0.6E0, 0.0E0, -0.6E0, 0.0E0, + 0.0E0, 1.0E0, 0.0E0/ - DATA D12/4096.0E0/ +* INPUT FOR MODIFIED GIVENS + DATA DAB/ .1E0,.3E0,1.2E0,.2E0, + A .7E0, .2E0, .6E0, 4.2E0, + B 0.E0,0.E0,0.E0,0.E0, + C 4.E0, -1.E0, 2.E0, 4.E0, + D 6.E-10, 2.E-2, 1.E5, 10.E0, + E 4.E10, 2.E-2, 1.E-5, 10.E0, + F 2.E-10, 4.E-2, 1.E5, 10.E0, + G 2.E10, 4.E-2, 1.E-5, 10.E0, + H 4.E0, -2.E0, 8.E0, 4.E0 / +* TRUE RESULTS FOR MODIFIED GIVENS + DATA DTRUE/0.E0,0.E0, 1.3E0, .2E0, 0.E0,0.E0,0.E0, .5E0, 0.E0, + A 0.E0,0.E0, 4.5E0, 4.2E0, 1.E0, .5E0, 0.E0,0.E0,0.E0, + B 0.E0,0.E0,0.E0,0.E0, -2.E0, 0.E0,0.E0,0.E0,0.E0, + C 0.E0,0.E0,0.E0, 4.E0, -1.E0, 0.E0,0.E0,0.E0,0.E0, + D 0.E0, 15.E-3, 0.E0, 10.E0, -1.E0, 0.E0, -1.E-4, + E 0.E0, 1.E0, + F 0.E0,0.E0, 6144.E-5, 10.E0, -1.E0, 4096.E0, -1.E6, + G 0.E0, 1.E0, + H 0.E0,0.E0,15.E0,10.E0,-1.E0, 5.E-5, 0.E0,1.E0,0.E0, + I 0.E0,0.E0, 15.E0, 10.E0, -1. E0, 5.E5, -4096.E0, + J 1.E0, 4096.E-6, + K 0.E0,0.E0, 7.E0, 4.E0, 0.E0,0.E0, -.5E0, -.25E0, 0.E0/ +* 4096 = 2 ** 12 + DATA D12 /4096.E0/ + DTRUE(1,1) = 12.E0 / 130.E0 + DTRUE(2,1) = 36.E0 / 130.E0 + DTRUE(7,1) = -1.E0 / 6.E0 + DTRUE(1,2) = 14.E0 / 75.E0 + DTRUE(2,2) = 49.E0 / 75.E0 + DTRUE(9,2) = 1.E0 / 7.E0 + DTRUE(1,5) = 45.E-11 * (D12 * D12) + DTRUE(3,5) = 4.E5 / (3.E0 * D12) + DTRUE(6,5) = 1.E0 / D12 + DTRUE(8,5) = 1.E4 / (3.E0 * D12) + DTRUE(1,6) = 4.E10 / (1.5E0 * D12 * D12) + DTRUE(2,6) = 2.E-2 / 1.5E0 + DTRUE(8,6) = 5.E-7 * D12 + DTRUE(1,7) = 4.E0 / 150.E0 + DTRUE(2,7) = (2.E-10 / 1.5E0) * (D12 * D12) + DTRUE(7,7) = -DTRUE(6,5) + DTRUE(9,7) = 1.E4 / D12 + DTRUE(1,8) = DTRUE(1,7) + DTRUE(2,8) = 2.E10 / (1.5E0 * D12 * D12) + DTRUE(1,9) = 32.E0 / 7.E0 + DTRUE(2,9) = -16.E0 / 7.E0 * .. Executable Statements .. * * Compute true values which cannot be prestored @@ -134,6 +223,15 @@ CALL STEST1(SB,DBTRUE(K),DBTRUE(K),SFAC) CALL STEST1(SC,DC1(K),DC1(K),SFAC) CALL STEST1(SS,DS1(K),DS1(K),SFAC) + ELSEIF (ICASE.EQ.11) THEN +* .. SROTMG .. + DO I=1,4 + DTEMP(I)= DAB(I,K) + DTEMP(I+4) = 0.0 + END DO + DTEMP(9) = 0.0 + CALL SROTMG(DTEMP(1),DTEMP(2),DTEMP(3),DTEMP(4),DTEMP(5)) + CALL STEST(9,DTEMP,DTRUE(1,K),DTRUE(1,K),SFAC) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK0' STOP @@ -148,7 +246,7 @@ * .. Scalar Arguments .. REAL SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. INTEGER I, LEN, NP1 @@ -165,7 +263,7 @@ * .. Intrinsic Functions .. INTRINSIC MAX * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. DATA SA/0.3E0, -1.0E0, 0.0E0, 1.0E0, 0.3E0, 0.3E0, + 0.3E0, 0.3E0, 0.3E0, 0.3E0/ @@ -212,11 +310,11 @@ IF (ICASE.EQ.7) THEN * .. SNRM2 .. STEMP(1) = DTRUE1(NP1) - CALL STEST1(SNRM2(N,SX,INCX),STEMP,STEMP,SFAC) + CALL STEST1(SNRM2(N,SX,INCX),STEMP(1),STEMP,SFAC) ELSE IF (ICASE.EQ.8) THEN * .. SASUM .. STEMP(1) = DTRUE3(NP1) - CALL STEST1(SASUM(N,SX,INCX),STEMP,STEMP,SFAC) + CALL STEST1(SASUM(N,SX,INCX),STEMP(1),STEMP,SFAC) ELSE IF (ICASE.EQ.9) THEN * .. SSCAL .. CALL SSCAL(N,SA((INCX-1)*5+NP1),SX,INCX) @@ -242,27 +340,40 @@ * .. Scalar Arguments .. REAL SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. - REAL SA, SC, SS - INTEGER I, J, KI, KN, KSIZE, LENX, LENY, MX, MY + REAL SA + INTEGER I, J, KI, KN, KNI, KPAR, KSIZE, LENX, LENY, + $ MX, MY * .. Local Arrays .. REAL DT10X(7,4,4), DT10Y(7,4,4), DT7(4,4), - + DT8(7,4,4), DT9X(7,4,4), DT9Y(7,4,4), DX1(7), - + DY1(7), SSIZE1(4), SSIZE2(14,2), STX(7), STY(7), - + SX(7), SY(7) + $ DT8(7,4,4), DX1(7), + $ DY1(7), SSIZE1(4), SSIZE2(14,2), SSIZE3(4), + $ SSIZE(7), STX(7), STY(7), SX(7), SY(7), + $ DPAR(5,4), DT19X(7,4,16),DT19XA(7,4,4), + $ DT19XB(7,4,4), DT19XC(7,4,4),DT19XD(7,4,4), + $ DT19Y(7,4,16), DT19YA(7,4,4),DT19YB(7,4,4), + $ DT19YC(7,4,4), DT19YD(7,4,4), DTEMP(5), + $ ST7B(4,4) INTEGER INCXS(4), INCYS(4), LENS(4,2), NS(4) * .. External Functions .. - REAL SDOT - EXTERNAL SDOT + REAL SDOT, SDSDOT + EXTERNAL SDOT, SDSDOT * .. External Subroutines .. - EXTERNAL SAXPY, SCOPY, SSWAP, STEST, STEST1 + EXTERNAL SAXPY, SCOPY, SROTM, SSWAP, STEST, STEST1 * .. Intrinsic Functions .. INTRINSIC ABS, MIN * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. + EQUIVALENCE (DT19X(1,1,1),DT19XA(1,1,1)),(DT19X(1,1,5), + A DT19XB(1,1,1)),(DT19X(1,1,9),DT19XC(1,1,1)), + B (DT19X(1,1,13),DT19XD(1,1,1)) + EQUIVALENCE (DT19Y(1,1,1),DT19YA(1,1,1)),(DT19Y(1,1,5), + A DT19YB(1,1,1)),(DT19Y(1,1,9),DT19YC(1,1,1)), + B (DT19Y(1,1,13),DT19YD(1,1,1)) + DATA SA/0.3E0/ DATA INCXS/1, 2, -2, -1/ DATA INCYS/1, -2, 1, -2/ @@ -272,10 +383,11 @@ + -0.4E0/ DATA DY1/0.5E0, -0.9E0, 0.3E0, 0.7E0, -0.6E0, 0.2E0, + 0.8E0/ - DATA SC, SS/0.8E0, 0.6E0/ DATA DT7/0.0E0, 0.30E0, 0.21E0, 0.62E0, 0.0E0, + 0.30E0, -0.07E0, 0.85E0, 0.0E0, 0.30E0, -0.79E0, + -0.74E0, 0.0E0, 0.30E0, 0.33E0, 1.27E0/ + DATA ST7B/ .1, .4, .31, .72, .1, .4, .03, .95, + + .1, .4, -.69, -.64, .1, .4, .43, 1.37/ DATA DT8/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + 0.0E0, 0.68E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + 0.0E0, 0.0E0, 0.68E0, -0.87E0, 0.0E0, 0.0E0, @@ -295,44 +407,6 @@ + 0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.0E0, 0.0E0, + 0.0E0, 0.0E0, 0.68E0, -0.9E0, 0.33E0, 0.7E0, + -0.75E0, 0.2E0, 1.04E0/ - DATA DT9X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.78E0, -0.46E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.78E0, -0.46E0, -0.22E0, - + 1.06E0, 0.0E0, 0.0E0, 0.0E0, 0.6E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.78E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.66E0, 0.1E0, -0.1E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.96E0, 0.1E0, -0.76E0, 0.8E0, 0.90E0, - + -0.3E0, -0.02E0, 0.6E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.78E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, -0.06E0, 0.1E0, - + -0.1E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.90E0, - + 0.1E0, -0.22E0, 0.8E0, 0.18E0, -0.3E0, -0.02E0, - + 0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.78E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.78E0, 0.26E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.78E0, 0.26E0, -0.76E0, 1.12E0, - + 0.0E0, 0.0E0, 0.0E0/ - DATA DT9Y/0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.04E0, -0.78E0, 0.54E0, - + 0.08E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.04E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.7E0, - + -0.9E0, -0.12E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.64E0, -0.9E0, -0.30E0, 0.7E0, -0.18E0, 0.2E0, - + 0.28E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.04E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.7E0, -1.08E0, 0.0E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.64E0, -1.26E0, - + 0.54E0, 0.20E0, 0.0E0, 0.0E0, 0.0E0, 0.5E0, - + 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.04E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, - + 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.0E0, 0.0E0, - + 0.0E0, 0.0E0, 0.04E0, -0.9E0, 0.18E0, 0.7E0, - + -0.18E0, 0.2E0, 0.16E0/ DATA DT10X/0.6E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + 0.0E0, 0.5E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, 0.0E0, + 0.0E0, 0.5E0, -0.9E0, 0.0E0, 0.0E0, 0.0E0, @@ -375,6 +449,151 @@ + 0.0E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, 1.17E0, + 1.17E0, 1.17E0, 1.17E0/ + DATA SSIZE3/ .1, .4, 1.7, 3.3 / +* +* FOR DROTM +* + DATA DPAR/-2.E0, 0.E0,0.E0,0.E0,0.E0, + A -1.E0, 2.E0, -3.E0, -4.E0, 5.E0, + B 0.E0, 0.E0, 2.E0, -3.E0, 0.E0, + C 1.E0, 5.E0, 2.E0, 0.E0, -4.E0/ +* TRUE X RESULTS F0R ROTATIONS DROTM + DATA DT19XA/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I -.8E0, 3.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J -.9E0, 2.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K 3.5E0, -.4E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, 0.E0,0.E0,0.E0, + M -.8E0, 3.8E0, -2.2E0, -1.2E0, 0.E0,0.E0,0.E0, + N -.9E0, 2.8E0, -1.4E0, -1.3E0, 0.E0,0.E0,0.E0, + O 3.5E0, -.4E0, -2.2E0, 4.7E0, 0.E0,0.E0,0.E0/ +* + DATA DT19XB/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, -.5E0, 0.E0,0.E0,0.E0,0.E0, + I 0.E0, .1E0, -3.0E0, 0.E0,0.E0,0.E0,0.E0, + J -.3E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + K 3.3E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, .9E0, -.3E0, -.4E0, + M -2.0E0, .1E0, 1.4E0, .8E0, .6E0, -.3E0, -2.8E0, + N -1.8E0, .1E0, 1.3E0, .8E0, 0.E0, -.3E0, -1.9E0, + O 3.8E0, .1E0, -3.1E0, .8E0, 4.8E0, -.3E0, -1.5E0 / +* + DATA DT19XC/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, -.5E0, 0.E0,0.E0,0.E0,0.E0, + I 4.8E0, .1E0, -3.0E0, 0.E0,0.E0,0.E0,0.E0, + J 3.3E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + K 2.1E0, .1E0, -2.0E0, 0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, .9E0, -.3E0, -.4E0, + M -1.6E0, .1E0, -2.2E0, .8E0, 5.4E0, -.3E0, -2.8E0, + N -1.5E0, .1E0, -1.4E0, .8E0, 3.6E0, -.3E0, -1.9E0, + O 3.7E0, .1E0, -2.2E0, .8E0, 3.6E0, -.3E0, -1.5E0 / +* + DATA DT19XD/.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .6E0, .1E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I -.8E0, -1.0E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J -.9E0, -.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K 3.5E0, .8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .6E0, .1E0, -.5E0, .8E0, 0.E0,0.E0,0.E0, + M -.8E0, -1.0E0, 1.4E0, -1.6E0, 0.E0,0.E0,0.E0, + N -.9E0, -.8E0, 1.3E0, -1.6E0, 0.E0,0.E0,0.E0, + O 3.5E0, .8E0, -3.1E0, 4.8E0, 0.E0,0.E0,0.E0/ +* TRUE Y RESULTS FOR ROTATIONS DROTM + DATA DT19YA/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I .7E0, -4.8E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J 1.7E0, -.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K -2.6E0, 3.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, 0.E0,0.E0,0.E0, + M .7E0, -4.8E0, 3.0E0, 1.1E0, 0.E0,0.E0,0.E0, + N 1.7E0, -.7E0, -.7E0, 2.3E0, 0.E0,0.E0,0.E0, + O -2.6E0, 3.5E0, -.7E0, -3.6E0, 0.E0,0.E0,0.E0/ +* + DATA DT19YB/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, .3E0, 0.E0,0.E0,0.E0,0.E0, + I 4.0E0, -.9E0, -.3E0, 0.E0,0.E0,0.E0,0.E0, + J -.5E0, -.9E0, 1.5E0, 0.E0,0.E0,0.E0,0.E0, + K -1.5E0, -.9E0, -1.8E0, 0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, -.6E0, .2E0, .8E0, + M 3.7E0, -.9E0, -1.2E0, .7E0, -1.5E0, .2E0, 2.2E0, + N -.3E0, -.9E0, 2.1E0, .7E0, -1.6E0, .2E0, 2.0E0, + O -1.6E0, -.9E0, -2.1E0, .7E0, 2.9E0, .2E0, -3.8E0 / +* + DATA DT19YC/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + I 4.0E0, -6.3E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + J -.5E0, .3E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + K -1.5E0, 3.0E0, 0.E0,0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, 0.E0,0.E0,0.E0, + M 3.7E0, -7.2E0, 3.0E0, 1.7E0, 0.E0,0.E0,0.E0, + N -.3E0, .9E0, -.7E0, 1.9E0, 0.E0,0.E0,0.E0, + O -1.6E0, 2.7E0, -.7E0, -3.4E0, 0.E0,0.E0,0.E0/ +* + DATA DT19YD/.5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + A .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + B .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + C .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + D .5E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + E .7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + F 1.7E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + G -2.6E0, 0.E0,0.E0,0.E0,0.E0,0.E0,0.E0, + H .5E0, -.9E0, .3E0, 0.E0,0.E0,0.E0,0.E0, + I .7E0, -.9E0, 1.2E0, 0.E0,0.E0,0.E0,0.E0, + J 1.7E0, -.9E0, .5E0, 0.E0,0.E0,0.E0,0.E0, + K -2.6E0, -.9E0, -1.3E0, 0.E0,0.E0,0.E0,0.E0, + L .5E0, -.9E0, .3E0, .7E0, -.6E0, .2E0, .8E0, + M .7E0, -.9E0, 1.2E0, .7E0, -1.5E0, .2E0, 1.6E0, + N 1.7E0, -.9E0, .5E0, .7E0, -1.6E0, .2E0, 2.4E0, + O -2.6E0, -.9E0, -1.3E0, .7E0, 2.9E0, .2E0, -4.0E0 / +* * .. Executable Statements .. * DO 120 KI = 1, 4 @@ -421,6 +640,39 @@ 80 CONTINUE CALL STEST(LENX,SX,STX,SSIZE2(1,1),1.0E0) CALL STEST(LENY,SY,STY,SSIZE2(1,1),1.0E0) + ELSEIF (ICASE.EQ.12) THEN +* .. SROTM .. + KNI=KN+4*(KI-1) + DO KPAR=1,4 + DO I=1,7 + SX(I) = DX1(I) + SY(I) = DY1(I) + STX(I)= DT19X(I,KPAR,KNI) + STY(I)= DT19Y(I,KPAR,KNI) + END DO +* + DO I=1,5 + DTEMP(I) = DPAR(I,KPAR) + END DO +* + DO I=1,LENX + SSIZE(I)=STX(I) + END DO +* SEE REMARK ABOVE ABOUT DT11X(1,2,7) +* AND DT11X(5,3,8). + IF ((KPAR .EQ. 2) .AND. (KNI .EQ. 7)) + $ SSIZE(1) = 2.4E0 + IF ((KPAR .EQ. 3) .AND. (KNI .EQ. 8)) + $ SSIZE(5) = 1.8E0 +* + CALL SROTM(N,SX,INCX,SY,INCY,DTEMP) + CALL STEST(LENX,SX,STX,SSIZE,SFAC) + CALL STEST(LENY,SY,STY,STY,SFAC) + END DO + ELSEIF (ICASE.EQ.13) THEN +* .. SDSROT .. + CALL STEST1 (SDSDOT(N,.1,SX,INCX,SY,INCY), + $ ST7B(KN,KI),SSIZE3(KN),SFAC) ELSE WRITE (NOUT,*) ' Shouldn''t be here in CHECK2' STOP @@ -436,10 +688,10 @@ * .. Scalar Arguments .. REAL SFAC * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. - REAL SA, SC, SS + REAL SC, SS INTEGER I, K, KI, KN, KSIZE, LENX, LENY, MX, MY * .. Local Arrays .. REAL COPYX(5), COPYY(5), DT9X(7,4,4), DT9Y(7,4,4), @@ -454,9 +706,8 @@ * .. Intrinsic Functions .. INTRINSIC ABS, MIN * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Data statements .. - DATA SA/0.3E0/ DATA INCXS/1, 2, -2, -1/ DATA INCYS/1, -2, 1, -2/ DATA LENS/1, 1, 2, 4, 1, 1, 3, 7/ @@ -647,14 +898,15 @@ * * .. Parameters .. INTEGER NOUT - PARAMETER (NOUT=6) + REAL ZERO + PARAMETER (NOUT=6, ZERO=0.0E0) * .. Scalar Arguments .. REAL SFAC INTEGER LEN * .. Array Arguments .. REAL SCOMP(LEN), SSIZE(LEN), STRUE(LEN) * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. REAL SD @@ -665,12 +917,12 @@ * .. Intrinsic Functions .. INTRINSIC ABS * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Executable Statements .. * DO 40 I = 1, LEN SD = SCOMP(I) - STRUE(I) - IF (SDIFF(ABS(SSIZE(I))+ABS(SFAC*SD),ABS(SSIZE(I))).EQ.0.0E0) + IF (ABS(SFAC*SD) .LE. ABS(SSIZE(I))*EPSILON(ZERO)) + GO TO 40 * * HERE SCOMP(I) IS NOT CLOSE TO STRUE(I). @@ -680,16 +932,16 @@ PASS = .FALSE. WRITE (NOUT,99999) WRITE (NOUT,99998) - 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, I, SCOMP(I), + 20 WRITE (NOUT,99997) ICASE, N, INCX, INCY, I, SCOMP(I), + STRUE(I), SD, SSIZE(I) 40 CONTINUE RETURN * 99999 FORMAT (' FAIL') -99998 FORMAT (/' CASE N INCX INCY MODE I ', +99998 FORMAT (/' CASE N INCX INCY I ', + ' COMP(I) TRUE(I) DIFFERENCE', + ' SIZE(I)',/1X) -99997 FORMAT (1X,I4,I3,3I5,I3,2E36.8,2E12.4) +99997 FORMAT (1X,I4,I3,2I5,I3,2E36.8,2E12.4) END SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) * ************************* STEST1 ***************************** @@ -739,12 +991,12 @@ * .. Scalar Arguments .. INTEGER ICOMP, ITRUE * .. Scalars in Common .. - INTEGER ICASE, INCX, INCY, MODE, N + INTEGER ICASE, INCX, INCY, N LOGICAL PASS * .. Local Scalars .. INTEGER ID * .. Common blocks .. - COMMON /COMBLA/ICASE, N, INCX, INCY, MODE, PASS + COMMON /COMBLA/ICASE, N, INCX, INCY, PASS * .. Executable Statements .. * IF (ICOMP.EQ.ITRUE) GO TO 40 @@ -757,13 +1009,13 @@ WRITE (NOUT,99999) WRITE (NOUT,99998) 20 ID = ICOMP - ITRUE - WRITE (NOUT,99997) ICASE, N, INCX, INCY, MODE, ICOMP, ITRUE, ID + WRITE (NOUT,99997) ICASE, N, INCX, INCY, ICOMP, ITRUE, ID 40 CONTINUE RETURN * 99999 FORMAT (' FAIL') -99998 FORMAT (/' CASE N INCX INCY MODE ', +99998 FORMAT (/' CASE N INCX INCY ', + ' COMP TRUE DIFFERENCE', + /1X) -99997 FORMAT (1X,I4,I3,3I5,2I36,I12) +99997 FORMAT (1X,I4,I3,2I5,2I36,I12) END diff --git a/utest/CMakeLists.txt b/utest/CMakeLists.txt index a7f3871c3..1b426afe7 100644 --- a/utest/CMakeLists.txt +++ b/utest/CMakeLists.txt @@ -7,9 +7,33 @@ else () set(OpenBLAS_utest_src utest_main.c test_amax.c + test_rotmg.c + test_rot.c + test_axpy.c + test_dsdot.c + test_swap.c ) endif () +# crashing on travis cl with an error code suggesting resource not found +if (NOT MSVC) +set(OpenBLAS_utest_src + ${OpenBLAS_utest_src} + test_dotu.c + ) +endif () + +# known to hang with the native Windows and Android threads +# FIXME needs checking if this works on any of the other platforms +if (NOT USE_OPENMP) +if (OS_CYGWIN_NT OR OS_LINUX) +set(OpenBLAS_utest_src + ${OpenBLAS_utest_src} + test_fork.c + ) +endif() +endif() + if (NOT NO_LAPACK) set(OpenBLAS_utest_src ${OpenBLAS_utest_src} diff --git a/utest/Makefile b/utest/Makefile index ce809e3de..e40b3c6db 100644 --- a/utest/Makefile +++ b/utest/Makefile @@ -8,13 +8,21 @@ UTESTBIN=openblas_utest include $(TOPDIR)/Makefile.system -OBJS=utest_main.o test_amax.o -#test_rot.o test_swap.o test_axpy.o test_dotu.o test_rotmg.o test_dsdot.o test_fork.o +OBJS=utest_main.o test_amax.o test_rotmg.o test_axpy.o test_dotu.o test_dsdot.o test_swap.o test_rot.o +#test_rot.o test_swap.o test_axpy.o test_dotu.o test_dsdot.o test_fork.o ifneq ($(NO_LAPACK), 1) OBJS += test_potrs.o endif +#this does not work with OpenMP nor with native Windows or Android threads +# FIXME TBD if this works on OSX, SunOS, POWER and zarch +ifndef USE_OPENMP +ifeq ($(OSNAME), $(filter $(OSNAME),Linux CYGWIN_NT)) +OBJS += test_fork.o +endif +endif + all : run_test $(UTESTBIN): $(OBJS) diff --git a/utest/ctest.h b/utest/ctest.h index 1deea32f6..f297dafba 100644 --- a/utest/ctest.h +++ b/utest/ctest.h @@ -84,7 +84,7 @@ struct ctest { #endif #if _MSC_VER < 1900 -#define snprintf _snprintf_s +#define snprintf _snprintf #endif #ifndef __cplusplus diff --git a/utest/test_axpy.c b/utest/test_axpy.c index 696eb7a51..603043073 100644 --- a/utest/test_axpy.c +++ b/utest/test_axpy.c @@ -31,88 +31,81 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **********************************************************************************/ -#include "common_utest.h" +#include "openblas_utest.h" -void test_daxpy_inc_0(void) +CTEST(axpy,daxpy_inc_0) { - int i; - int N=8,incX=0,incY=0; + blasint i; + blasint N=8,incX=0,incY=0; double a=0.25; double x1[]={1.0,3.0,5.0,7.0,1.0,3.0,5.0,7.0}; double y1[]={2.0,4.0,6.0,8.0,2.0,4.0,6.0,8.0}; + double x2[]={1.0,3.0,5.0,7.0,1.0,3.0,5.0,7.0}; - double y2[]={2.0,4.0,6.0,8.0,2.0,4.0,6.0,8.0}; + double y2[]={4.0,4.0,6.0,8.0,2.0,4.0,6.0,8.0}; //OpenBLAS BLASFUNC(daxpy)(&N,&a,x1,&incX,y1,&incY); - //reference - BLASFUNC_REF(daxpy)(&N,&a,x2,&incX,y2,&incY); for(i=0; i +#include "openblas_utest.h" -void test_zdotu_n_1(void) +CTEST( zdotu,zdotu_n_1) { - int N=1,incX=1,incY=1; + blasint N=1,incX=1,incY=1; double x1[]={1.0,1.0}; double y1[]={1.0,2.0}; - double x2[]={1.0,1.0}; - double y2[]={1.0,2.0}; - double _Complex result1=0.0; - double _Complex result2=0.0; - //OpenBLAS + + openblas_complex_double result1=openblas_make_complex_double(0.0,0.0); + openblas_complex_double result2=openblas_make_complex_double(-1.0000,3.0000); +#ifdef RETURN_BY_STACK + BLASFUNC(zdotu)(&result1,&N,x1,&incX,y1,&incY); +#else result1=BLASFUNC(zdotu)(&N,x1,&incX,y1,&incY); - //reference - result2=BLASFUNC_REF(zdotu)(&N,x2,&incX,y2,&incY); - - CU_ASSERT_DOUBLE_EQUAL(creal(result1), creal(result2), CHECK_EPS); - CU_ASSERT_DOUBLE_EQUAL(cimag(result1), cimag(result2), CHECK_EPS); -// printf("\%lf,%lf\n",creal(result1),cimag(result1)); - +#endif + +#ifdef OPENBLAS_COMPLEX_STRUCT + ASSERT_DBL_NEAR_TOL(result2.real, result1.real, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(result2.imag, result1.imag, DOUBLE_EPS); +#else + ASSERT_DBL_NEAR_TOL(creal(result2), creal(result1), DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(cimag(result2), cimag(result1), DOUBLE_EPS); +#endif + } -void test_zdotu_offset_1(void) +CTEST(zdotu, zdotu_offset_1) { - int N=1,incX=1,incY=1; + blasint N=1,incX=1,incY=1; double x1[]={1.0,2.0,3.0,4.0}; double y1[]={5.0,6.0,7.0,8.0}; - double x2[]={1.0,2.0,3.0,4.0}; - double y2[]={5.0,6.0,7.0,8.0}; - double _Complex result1=0.0; - double _Complex result2=0.0; - //OpenBLAS + + openblas_complex_double result1=openblas_make_complex_double(0.0,0.0); + openblas_complex_double result2=openblas_make_complex_double(-9.0,32.0); +#ifdef RETURN_BY_STACK + BLASFUNC(zdotu)(&result1,&N,x1+1,&incX,y1+1,&incY); +#else result1=BLASFUNC(zdotu)(&N,x1+1,&incX,y1+1,&incY); - //reference - result2=BLASFUNC_REF(zdotu)(&N,x2+1,&incX,y2+1,&incY); - - CU_ASSERT_DOUBLE_EQUAL(creal(result1), creal(result2), CHECK_EPS); - CU_ASSERT_DOUBLE_EQUAL(cimag(result1), cimag(result2), CHECK_EPS); -// printf("\%lf,%lf\n",creal(result1),cimag(result1)); +#endif + +#ifdef OPENBLAS_COMPLEX_STRUCT + ASSERT_DBL_NEAR_TOL(result2.real, result1.real, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(result2.imag, result1.imag, DOUBLE_EPS); +#else + ASSERT_DBL_NEAR_TOL(creal(result2), creal(result1), DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(cimag(result2), cimag(result1), DOUBLE_EPS); +#endif } - diff --git a/utest/test_dsdot.c b/utest/test_dsdot.c index 536153c7e..d58b398a8 100644 --- a/utest/test_dsdot.c +++ b/utest/test_dsdot.c @@ -31,21 +31,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **********************************************************************************/ -#include "common_utest.h" +#include "openblas_utest.h" -void test_dsdot_n_1() +CTEST(dsdot,dsdot_n_1) { - float x= 0.172555164; - float y= -0.0138700781; - int incx=1; - int incy=1; - int n=1; + float x= 0.172555164F; + float y= -0.0138700781F; + blasint incx=1; + blasint incy=1; + blasint n=1; - double res1=0.0f, res2=0.0f; + double res1=0.0f, res2=-0.00239335360107; res1=BLASFUNC(dsdot)(&n, &x, &incx, &y, &incy); - res2=BLASFUNC_REF(dsdot)(&n, &x, &incx, &y, &incy); - - CU_ASSERT_DOUBLE_EQUAL(res1, res2, CHECK_EPS); + ASSERT_DBL_NEAR_TOL(res2, res1, DOUBLE_EPS); } diff --git a/utest/test_fork.c b/utest/test_fork.c index e6603902e..0b90407b1 100644 --- a/utest/test_fork.c +++ b/utest/test_fork.c @@ -13,9 +13,9 @@ met: notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" @@ -31,10 +31,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **********************************************************************************/ -#ifndef OS_WINDOWS -#include "common_utest.h" +#include #include #include +#include "openblas_utest.h" void* xmalloc(size_t n) { @@ -48,19 +48,21 @@ void* xmalloc(size_t n) } } -void check_dgemm(double *a, double *b, double *result, double *expected, int n) +void check_dgemm(double *a, double *b, double *result, double *expected, blasint n) { + char trans1 = 'T'; + char trans2 = 'N'; + double zerod = 0, oned = 1; int i; - cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, n, n, n, - 1.0, a, n, b, n, 0.0, result, n); + BLASFUNC(dgemm)(&trans1, &trans2, &n, &n, &n, &oned, a, &n, b, &n, &zerod, result, &n); for(i = 0; i < n * n; ++i) { - CU_ASSERT_DOUBLE_EQUAL(expected[i], result[i], CHECK_EPS); + ASSERT_DBL_NEAR_TOL(expected[i], result[i], DOUBLE_EPS); } } -void test_fork_safety(void) +CTEST(fork, safety) { - int n = 1000; + blasint n = 1000; int i; double *a, *b, *c, *d; @@ -84,12 +86,14 @@ void test_fork_safety(void) // Compute a DGEMM product in the parent process prior to forking to // ensure that the OpenBLAS thread pool is initialized. - cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, n, n, n, - 1.0, a, n, b, n, 0.0, c, n); + char trans1 = 'T'; + char trans2 = 'N'; + double zerod = 0, oned = 1; + BLASFUNC(dgemm)(&trans1, &trans2, &n, &n, &n, &oned, a, &n, b, &n, &zerod, c, &n); fork_pid = fork(); if (fork_pid == -1) { - CU_FAIL("Failed to fork process."); + CTEST_ERR("Failed to fork process."); } else if (fork_pid == 0) { // Compute a DGEMM product in the child process to check that the // thread pool as been properly been reinitialized after the fork. @@ -99,7 +103,7 @@ void test_fork_safety(void) // recursively fork_pid_nested = fork(); if (fork_pid_nested == -1) { - CU_FAIL("Failed to fork process."); + CTEST_ERR("Failed to fork process."); exit(1); } else if (fork_pid_nested == 0) { check_dgemm(a, b, d, c, n); @@ -108,8 +112,8 @@ void test_fork_safety(void) check_dgemm(a, b, d, c, n); int child_status = 0; pid_t wait_pid = wait(&child_status); - CU_ASSERT(wait_pid == fork_pid_nested); - CU_ASSERT(WEXITSTATUS (child_status) == 0); + ASSERT_EQUAL(wait_pid, fork_pid_nested); + ASSERT_EQUAL(0, WEXITSTATUS (child_status)); exit(0); } } else { @@ -117,8 +121,7 @@ void test_fork_safety(void) // Wait for the child to finish and check the exit code. int child_status = 0; pid_t wait_pid = wait(&child_status); - CU_ASSERT(wait_pid == fork_pid); - CU_ASSERT(WEXITSTATUS (child_status) == 0); + ASSERT_EQUAL(wait_pid, fork_pid); + ASSERT_EQUAL(0, WEXITSTATUS (child_status)); } } -#endif diff --git a/utest/test_rot.c b/utest/test_rot.c index b8f9f177a..cf72ad22d 100644 --- a/utest/test_rot.c +++ b/utest/test_rot.c @@ -31,88 +31,82 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **********************************************************************************/ -#include "common_utest.h" +#include "openblas_utest.h" -void test_drot_inc_0(void) +CTEST(rot,drot_inc_0) { - int i=0; - int N=4,incX=0,incY=0; + blasint i=0; + blasint N=4,incX=0,incY=0; double c=0.25,s=0.5; double x1[]={1.0,3.0,5.0,7.0}; double y1[]={2.0,4.0,6.0,8.0}; - double x2[]={1.0,3.0,5.0,7.0}; - double y2[]={2.0,4.0,6.0,8.0}; + double x2[]={-0.21484375000000,3.0,5.0,7.0}; + double y2[]={ 0.03906250000000,4.0,6.0,8.0}; + //OpenBLAS BLASFUNC(drot)(&N,x1,&incX,y1,&incY,&c,&s); - //reference - BLASFUNC_REF(drot)(&N,x2,&incX,y2,&incY,&c,&s); for(i=0; i +#include #define CTEST_MAIN #define CTEST_SEGFAULT @@ -49,13 +50,490 @@ CTEST(amax, samax){ ASSERT_DBL_NEAR_TOL((double)(tr_max), (double)(te_max), SINGLE_EPS); } +CTEST (drotmg,rotmg) +{ + double te_d1, tr_d1; + double te_d2, tr_d2; + double te_x1, tr_x1; + double te_y1, tr_y1; + double te_param[5]; + double tr_param[5]; + int i=0; + // original test case for libGoto bug fixed by feb2014 rewrite + te_d1= 0.21149573940783739; + te_d2= 0.046892057172954082; + te_x1= -0.42272687517106533; + te_y1= 0.42211309121921659; + + + for(i=0; i<5; i++){ + te_param[i]=tr_param[i]=0.0; + } + + //reference values as calculated by netlib blas + + tr_d1= 0.1732048; + tr_d2= 0.03840234; + tr_x1= -0.516180; + tr_y1= 0.422113; + tr_d1= 0.17320483687975; + tr_d2= 0.03840233915037; + tr_x1= -0.51618034832329; + tr_y1= 0.42211309121922; + + tr_param[0]= 0.0; + tr_param[1]= 0.0; + tr_param[2]= 0.99854803659786; + tr_param[3]= -0.22139439665872; + tr_param[4]= 0.0; + + BLASFUNC(drotmg)(&te_d1, &te_d2, &te_x1, &te_y1, te_param); + ASSERT_DBL_NEAR_TOL(tr_d1, te_d1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_d2, te_d2, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_x1, te_x1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_y1, te_y1, DOUBLE_EPS); + + for(i=0; i<5; i++){ + ASSERT_DBL_NEAR_TOL(tr_param[i], te_param[i], DOUBLE_EPS); + } +} + +CTEST (drotmg,rotmg_issue1452) +{ + double te_d1, tr_d1; + double te_d2, tr_d2; + double te_x1, tr_x1; + double te_y1, tr_y1; + double te_param[5]; + double tr_param[5]; + int i=0; + + // from issue #1452 + te_d1 = 5.9e-8; + te_d2 = 5.960464e-8; + te_x1 = 1.0; + te_y1 = 150.0; + + for(i=0; i<5; i++){ + te_param[i]=tr_param[i]=0.0; + } + te_param[3]=1./4096.; + //reference values as calculated by gonum blas with rotmg rewritten to Hopkins' algorithm + tr_d1= 0.99995592822897; + tr_d2= 0.98981219860583; + tr_x1= 0.03662270484346; + tr_y1= 150.000000000000; + + tr_param[0]= -1.0; + tr_param[1]= 0.00000161109346; + tr_param[2]= -0.00024414062500; + tr_param[3]= 0.00024414062500; + tr_param[4]= 0.00000162760417; + + //OpenBLAS + BLASFUNC(drotmg)(&te_d1, &te_d2, &te_x1, &te_y1, te_param); + + ASSERT_DBL_NEAR_TOL(tr_d1, te_d1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_d2, te_d2, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_x1, te_x1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_y1, te_y1, DOUBLE_EPS); + + for(i=0; i<5; i++){ + ASSERT_DBL_NEAR_TOL(tr_param[i], te_param[i], DOUBLE_EPS); + } + +} + +CTEST(drotmg, rotmg_D1eqD2_X1eqX2) +{ + double te_d1, tr_d1; + double te_d2, tr_d2; + double te_x1, tr_x1; + double te_y1, tr_y1; + double te_param[5]; + double tr_param[5]; + int i=0; + te_d1= tr_d1=2.; + te_d2= tr_d2=2.; + te_x1= tr_x1=8.; + te_y1= tr_y1=8.; + + for(i=0; i<5; i++){ + te_param[i]=tr_param[i]=0.0; + } + + //reference values as calculated by netlib blas + tr_d1= 1.0; + tr_d2= 1.0; + tr_x1= 16.0; + tr_y1= 8.0; + + tr_param[0]=1.0; + tr_param[1]=1.0; + tr_param[2]=0.0; + tr_param[3]=0.0; + tr_param[4]=1.0; + + //OpenBLAS + BLASFUNC(drotmg)(&te_d1, &te_d2, &te_x1, &te_y1, te_param); + + ASSERT_DBL_NEAR_TOL(tr_d1, te_d1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_d2, te_d2, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_x1, te_x1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_y1, te_y1, DOUBLE_EPS); + + for(i=0; i<5; i++){ + ASSERT_DBL_NEAR_TOL(tr_param[i], te_param[i], DOUBLE_EPS); + } +} + +CTEST(drotmg, drotmg_D1_big_D2_big_flag_zero) +{ + double te_d1, tr_d1; + double te_d2, tr_d2; + double te_x1, tr_x1; + double te_y1, tr_y1; + double te_param[5]={1.,4096.,-4096.,1.,4096.}; + double tr_param[5]={-1.,4096.,-3584.,1792.,4096.}; + int i=0; + te_d1= tr_d1=1600000000.; + te_d2= tr_d2=800000000.; + te_x1= tr_x1=8.; + te_y1= tr_y1=7.; + + + //reference values as calculated by gonum + tr_d1= 68.96627824858757; + tr_d2= 34.483139124293785; + tr_x1= 45312.; + tr_y1= 7.0; + + + //OpenBLAS + BLASFUNC(drotmg)(&te_d1, &te_d2, &te_x1, &te_y1, te_param); + + ASSERT_DBL_NEAR_TOL(tr_d1, te_d1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_d2, te_d2, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_x1, te_x1, DOUBLE_EPS); + ASSERT_DBL_NEAR_TOL(tr_y1, te_y1, DOUBLE_EPS); + + for(i=0; i<5; i++){ + ASSERT_DBL_NEAR_TOL(tr_param[i], te_param[i], DOUBLE_EPS); + } +} + +CTEST(axpy,daxpy_inc_0) +{ + blasint i; + blasint N=8,incX=0,incY=0; + double a=0.25; + double x1[]={1.0,3.0,5.0,7.0,1.0,3.0,5.0,7.0}; + double y1[]={2.0,4.0,6.0,8.0,2.0,4.0,6.0,8.0}; + + double x2[]={1.0,3.0,5.0,7.0,1.0,3.0,5.0,7.0}; + double y2[]={4.0,4.0,6.0,8.0,2.0,4.0,6.0,8.0}; + + //OpenBLAS + BLASFUNC(daxpy)(&N,&a,x1,&incX,y1,&incY); + + for(i=0; i