Merge pull request #4854 from OpenMathLib/develop
merge develop in preparation of the 0.3.28 release
This commit is contained in:
commit
5ef8b19646
26
.cirrus.yml
26
.cirrus.yml
|
@ -41,7 +41,7 @@ macos_instance:
|
|||
# - make CC=gcc-11 FC=gfortran-11 USE_OPENMP=1
|
||||
|
||||
macos_instance:
|
||||
image: ghcr.io/cirruslabs/macos-monterey-xcode:latest
|
||||
image: ghcr.io/cirruslabs/macos-sonoma-xcode:latest
|
||||
task:
|
||||
name: AppleM1/LLVM x86_64 xbuild
|
||||
compile_script:
|
||||
|
@ -58,8 +58,8 @@ task:
|
|||
- export VALID_ARCHS="i386 x86_64"
|
||||
- xcrun --sdk macosx --show-sdk-path
|
||||
- xcodebuild -version
|
||||
- export CC=/Applications/Xcode-15.3.0.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
- export CFLAGS="-O2 -unwindlib=none -Wno-macro-redefined -isysroot /Applications/Xcode-15.3.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX14.4.sdk -arch x86_64"
|
||||
- export CC=/Applications/Xcode_15.4.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
- export CFLAGS="-O2 -unwindlib=none -Wno-macro-redefined -isysroot /Applications/Xcode_15.4.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX14.5.sdk -arch x86_64"
|
||||
- make TARGET=CORE2 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1 RANLIB="ls -l"
|
||||
always:
|
||||
config_artifacts:
|
||||
|
@ -70,7 +70,7 @@ task:
|
|||
# type: application/octet-streamm
|
||||
|
||||
macos_instance:
|
||||
image: ghcr.io/cirruslabs/macos-monterey-xcode:latest
|
||||
image: ghcr.io/cirruslabs/macos-sonoma-xcode:latest
|
||||
task:
|
||||
name: AppleM1/LLVM armv8-ios xbuild
|
||||
compile_script:
|
||||
|
@ -78,8 +78,10 @@ task:
|
|||
- export #PATH=/opt/homebrew/opt/llvm/bin:$PATH
|
||||
- export #LDFLAGS="-L/opt/homebrew/opt/llvm/lib"
|
||||
- export #CPPFLAGS="-I/opt/homebrew/opt/llvm/include"
|
||||
- export CC=/Applications/Xcode-15.3.0.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
- export CFLAGS="-O2 -unwindlib=none -Wno-macro-redefined -isysroot /Applications/Xcode-15.3.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS17.4.sdk -arch arm64 -miphoneos-version-min=10.0"
|
||||
- export CC=/Applications/Xcode_15.4.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
- export CFLAGS="-O2 -unwindlib=none -Wno-macro-redefined -isysroot /Applications/Xcode_15.4.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS17.5.sdk -arch arm64 -miphoneos-version-min=10.0"
|
||||
- xcrun --sdk iphoneos --show-sdk-path
|
||||
- ls -l /Applications
|
||||
- make TARGET=ARMV8 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1 CROSS=1
|
||||
always:
|
||||
config_artifacts:
|
||||
|
@ -96,11 +98,11 @@ task:
|
|||
- export #LDFLAGS="-L/opt/homebrew/opt/llvm/lib"
|
||||
- export #CPPFLAGS="-I/opt/homebrew/opt/llvm/include"
|
||||
- ls /System/Volumes/Data/opt/homebrew
|
||||
- ls -l /System/Volumes/Data/opt/homebrew/Caskroom/
|
||||
- ls -l /System/Volumes/Data/opt/homebrew/Caskroom/android-ndk
|
||||
- find /System/Volumes/Data/opt/homebrew -name "armv7a-linux-androideabi*-ranlib"
|
||||
- #export CC=/Applications/Xcode-13.4.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
- #export CFLAGS="-O2 -unwindlib=none -Wno-macro-redefined -isysroot /Applications/Xcode-13.4.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.0.sdk -arch arm64 -miphoneos-version-min=10.0"
|
||||
- export CC=/System/Volumes/Data/opt/homebrew/Caskroom/android-ndk/26c/AndroidNDK*.app/Contents/NDK/toolchains/llvm/prebuilt/darwin-x86_64/bin/armv7a-linux-androideabi23-clang
|
||||
- export CC=/System/Volumes/Data/opt/homebrew/Caskroom/android-ndk/26d/AndroidNDK*.app/Contents/NDK/toolchains/llvm/prebuilt/darwin-x86_64/bin/armv7a-linux-androideabi23-clang
|
||||
- make TARGET=ARMV7 ARM_SOFTFP_ABI=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1 RANLIB="ls -l"
|
||||
always:
|
||||
config_artifacts:
|
||||
|
@ -132,7 +134,7 @@ task:
|
|||
FreeBSD_task:
|
||||
name: FreeBSD-gcc12
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
image_family: freebsd-13-3
|
||||
install_script:
|
||||
- pkg update -f && pkg upgrade -y && pkg install -y gmake gcc
|
||||
compile_script:
|
||||
|
@ -143,7 +145,7 @@ FreeBSD_task:
|
|||
FreeBSD_task:
|
||||
name: freebsd-gcc12-ilp64
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
image_family: freebsd-13-3
|
||||
install_script:
|
||||
- pkg update -f && pkg upgrade -y && pkg install -y gmake gcc
|
||||
compile_script:
|
||||
|
@ -153,10 +155,10 @@ FreeBSD_task:
|
|||
FreeBSD_task:
|
||||
name: FreeBSD-clang-openmp
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
image_family: freebsd-13-3
|
||||
install_script:
|
||||
- pkg update -f && pkg upgrade -y && pkg install -y gmake gcc
|
||||
- ln -s /usr/local/lib/gcc12/libgfortran.so.5.0.0 /usr/lib/libgfortran.so
|
||||
- ln -s /usr/local/lib/gcc13/libgfortran.so.5.0.0 /usr/lib/libgfortran.so
|
||||
compile_script:
|
||||
- gmake CC=clang FC=gfortran USE_OPENMP=1 CPP_THREAD_SAFETY_TEST=1
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ jobs:
|
|||
run: |
|
||||
export PATH=$GITHUB_WORKSPACE/qemu-install/bin/:$PATH
|
||||
qemu-riscv64 ./utest/openblas_utest
|
||||
qemu-riscv64 ./utest/openblas_utest_ext
|
||||
OPENBLAS_NUM_THREADS=2 qemu-riscv64 ./ctest/xscblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-riscv64 ./ctest/xdcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-riscv64 ./ctest/xccblat1
|
||||
|
|
|
@ -0,0 +1,157 @@
|
|||
name: Run codspeed benchmarks
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
if: "github.repository == 'OpenMathLib/OpenBLAS'"
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
fortran: [gfortran]
|
||||
build: [make]
|
||||
pyver: ["3.12"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: ${{ matrix.pyver }}
|
||||
|
||||
- name: Print system information
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
cat /proc/cpuinfo
|
||||
fi
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gfortran cmake ccache libtinfo5
|
||||
else
|
||||
echo "::error::$RUNNER_OS not supported"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Compilation cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.ccache
|
||||
# We include the commit sha in the cache key, as new cache entries are
|
||||
# only created if there is no existing entry for the key yet.
|
||||
# GNU make and cmake call the compilers differently. It looks like
|
||||
# that causes the cache to mismatch. Keep the ccache for both build
|
||||
# tools separate to avoid polluting each other.
|
||||
key: ccache-${{ runner.os }}-${{ matrix.build }}-${{ matrix.fortran }}-${{ github.ref }}-${{ github.sha }}
|
||||
# Restore a matching ccache cache entry. Prefer same branch and same Fortran compiler.
|
||||
restore-keys: |
|
||||
ccache-${{ runner.os }}-${{ matrix.build }}-${{ matrix.fortran }}-${{ github.ref }}
|
||||
ccache-${{ runner.os }}-${{ matrix.build }}-${{ matrix.fortran }}
|
||||
ccache-${{ runner.os }}-${{ matrix.build }}
|
||||
|
||||
- name: Write out the .pc
|
||||
run: |
|
||||
cd benchmark/pybench
|
||||
cat > openblas.pc << EOF
|
||||
libdir=${{ github.workspace }}
|
||||
includedir= ${{ github.workspace }}
|
||||
openblas_config= OpenBLAS 0.3.27 DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64
|
||||
version=0.0.99
|
||||
extralib=-lm -lpthread -lgfortran -lquadmath -L${{ github.workspace }} -lopenblas
|
||||
Name: openblas
|
||||
Description: OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version
|
||||
Version: ${version}
|
||||
URL: https://github.com/xianyi/OpenBLAS
|
||||
Libs: ${{ github.workspace }}/libopenblas.so -Wl,-rpath,${{ github.workspace }}
|
||||
Libs.private: -lm -lpthread -lgfortran -lquadmath -L${{ github.workspace }} -lopenblas
|
||||
Cflags: -I${{ github.workspace}}
|
||||
EOF
|
||||
cat openblas.pc
|
||||
|
||||
- name: Configure ccache
|
||||
run: |
|
||||
if [ "${{ matrix.build }}" = "make" ]; then
|
||||
# Add ccache to path
|
||||
if [ "$RUNNER_OS" = "Linux" ]; then
|
||||
echo "/usr/lib/ccache" >> $GITHUB_PATH
|
||||
elif [ "$RUNNER_OS" = "macOS" ]; then
|
||||
echo "$(brew --prefix)/opt/ccache/libexec" >> $GITHUB_PATH
|
||||
else
|
||||
echo "::error::$RUNNER_OS not supported"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# Limit the maximum size and switch on compression to avoid exceeding the total disk or cache quota (5 GB).
|
||||
test -d ~/.ccache || mkdir -p ~/.ccache
|
||||
echo "max_size = 300M" > ~/.ccache/ccache.conf
|
||||
echo "compression = true" >> ~/.ccache/ccache.conf
|
||||
ccache -s
|
||||
|
||||
- name: Build OpenBLAS
|
||||
run: |
|
||||
case "${{ matrix.build }}" in
|
||||
"make")
|
||||
make -j$(nproc) DYNAMIC_ARCH=1 USE_OPENMP=0 FC="ccache ${{ matrix.fortran }}"
|
||||
;;
|
||||
"cmake")
|
||||
mkdir build && cd build
|
||||
cmake -DDYNAMIC_ARCH=1 \
|
||||
-DNOFORTRAN=0 \
|
||||
-DBUILD_WITHOUT_LAPACK=0 \
|
||||
-DCMAKE_VERBOSE_MAKEFILE=ON \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_Fortran_COMPILER=${{ matrix.fortran }} \
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
|
||||
-DCMAKE_Fortran_COMPILER_LAUNCHER=ccache \
|
||||
..
|
||||
cmake --build .
|
||||
;;
|
||||
*)
|
||||
echo "::error::Configuration not supported"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Show ccache status
|
||||
continue-on-error: true
|
||||
run: ccache -s
|
||||
|
||||
- name: Install benchmark dependencies
|
||||
run: pip install meson ninja numpy pytest pytest-codspeed --user
|
||||
|
||||
- name: Build the wrapper
|
||||
run: |
|
||||
cd benchmark/pybench
|
||||
export PKG_CONFIG_PATH=$PWD
|
||||
meson setup build --prefix=$PWD/build-install
|
||||
meson install -C build
|
||||
#
|
||||
# sanity check
|
||||
cd build/openblas_wrap
|
||||
python -c'import _flapack; print(dir(_flapack))'
|
||||
|
||||
- name: Run benchmarks under pytest-benchmark
|
||||
run: |
|
||||
cd benchmark/pybench
|
||||
pip install pytest-benchmark
|
||||
export PYTHONPATH=$PWD/build-install/lib/python${{matrix.pyver}}/site-packages/
|
||||
OPENBLAS_NUM_THREADS=1 pytest benchmarks/bench_blas.py -k 'gesdd'
|
||||
|
||||
- name: Run benchmarks
|
||||
uses: CodSpeedHQ/action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODSPEED_TOKEN }}
|
||||
run: |
|
||||
cd benchmark/pybench
|
||||
export PYTHONPATH=$PWD/build-install/lib/python${{matrix.pyver}}/site-packages/
|
||||
OPENBLAS_NUM_THREADS=1 pytest benchmarks/bench_blas.py --codspeed
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
name: Publish docs via GitHub Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Deploy docs
|
||||
if: "github.repository == 'OpenMathLib/OpenBLAS'"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
|
||||
- name: Install MkDocs and doc theme packages
|
||||
run: pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin
|
||||
|
||||
- name: Build docs site
|
||||
run: mkdocs build
|
||||
|
||||
# mkdocs gh-deploy command only builds to the top-level, hence deploying
|
||||
# with this action instead.
|
||||
# Deploys to http://www.openmathlib.org/OpenBLAS/docs/
|
||||
- name: Deploy docs
|
||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||
if: ${{ github.ref == 'refs/heads/develop' }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./site
|
||||
destination_dir: docs/
|
|
@ -33,10 +33,8 @@ jobs:
|
|||
|
||||
- name: Install APT deps
|
||||
run: |
|
||||
sudo add-apt-repository ppa:savoury1/virtualisation
|
||||
sudo apt-get update
|
||||
sudo apt-get install autoconf automake autotools-dev ninja-build make ccache \
|
||||
qemu-user-static
|
||||
sudo apt-get install autoconf automake autotools-dev ninja-build make ccache
|
||||
|
||||
- name: Download and install loongarch64-toolchain
|
||||
run: |
|
||||
|
@ -44,6 +42,20 @@ jobs:
|
|||
#wget https://github.com/loongson/build-tools/releases/download/2023.08.08/CLFS-loongarch64-8.1-x86_64-cross-tools-gcc-glibc.tar.xz
|
||||
tar -xf CLFS-loongarch64-8.1-x86_64-cross-tools-gcc-glibc.tar.xz -C /opt
|
||||
|
||||
- name: Checkout qemu
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: qemu/qemu
|
||||
path: qemu
|
||||
ref: master
|
||||
|
||||
- name: Install qemu
|
||||
run: |
|
||||
cd qemu
|
||||
./configure --prefix=$GITHUB_WORKSPACE/qemu-install --target-list=loongarch64-linux-user --disable-system --static
|
||||
make -j$(nproc)
|
||||
make install
|
||||
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "LD_LIBRARY_PATH=/opt/cross-tools/target/usr/lib64:/opt/cross-tools/loongarch64-unknown-linux-gnu/lib64:$LD_LIBRARY_PATH" >> $GITHUB_ENV
|
||||
|
@ -76,44 +88,46 @@ jobs:
|
|||
|
||||
- name: Test
|
||||
run: |
|
||||
qemu-loongarch64-static ./utest/openblas_utest
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xscblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xdcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xccblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xzcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xscblat2 < ./ctest/sin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xdcblat2 < ./ctest/din2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xccblat2 < ./ctest/cin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xzcblat2 < ./ctest/zin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xscblat3 < ./ctest/sin3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xdcblat3 < ./ctest/din3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xccblat3 < ./ctest/cin3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./ctest/xzcblat3 < ./ctest/zin3
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/sblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/dblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/cblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/zblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/sblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/dblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/cblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/zblat1
|
||||
export PATH=$GITHUB_WORKSPACE/qemu-install/bin/:$PATH
|
||||
qemu-loongarch64 ./utest/openblas_utest
|
||||
qemu-loongarch64 ./utest/openblas_utest_ext
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xscblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xdcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xccblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xzcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xscblat2 < ./ctest/sin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xdcblat2 < ./ctest/din2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xccblat2 < ./ctest/cin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xzcblat2 < ./ctest/zin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xscblat3 < ./ctest/sin3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xdcblat3 < ./ctest/din3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xccblat3 < ./ctest/cin3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xzcblat3 < ./ctest/zin3
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/sblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/dblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/cblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/zblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/sblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/dblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/cblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/zblat1
|
||||
rm -f ./test/?BLAT2.SUMM
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/sblat2 < ./test/sblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/dblat2 < ./test/dblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/cblat2 < ./test/cblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/zblat2 < ./test/zblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/sblat2 < ./test/sblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/dblat2 < ./test/dblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/cblat2 < ./test/cblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/zblat2 < ./test/zblat2.dat
|
||||
rm -f ./test/?BLAT2.SUMM
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/sblat2 < ./test/sblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/dblat2 < ./test/dblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/cblat2 < ./test/cblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/zblat2 < ./test/zblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/sblat2 < ./test/sblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/dblat2 < ./test/dblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/cblat2 < ./test/cblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/zblat2 < ./test/zblat2.dat
|
||||
rm -f ./test/?BLAT3.SUMM
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/sblat3 < ./test/sblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/dblat3 < ./test/dblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/cblat3 < ./test/cblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64-static ./test/zblat3 < ./test/zblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/sblat3 < ./test/sblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/dblat3 < ./test/dblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/cblat3 < ./test/cblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/zblat3 < ./test/zblat3.dat
|
||||
rm -f ./test/?BLAT3.SUMM
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/sblat3 < ./test/sblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/dblat3 < ./test/dblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/cblat3 < ./test/cblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64-static ./test/zblat3 < ./test/zblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/sblat3 < ./test/sblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/dblat3 < ./test/dblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/cblat3 < ./test/cblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/zblat3 < ./test/zblat3.dat
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
name: loongarch64 clang qemu test
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
TEST:
|
||||
if: "github.repository == 'OpenMathLib/OpenBLAS'"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: LOONGSONGENERIC
|
||||
opts: NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=LOONGSONGENERIC
|
||||
- target: LOONGSON3R5
|
||||
opts: NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=LOONGSON3R5
|
||||
- target: LOONGSON2K1000
|
||||
opts: NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=LOONGSON2K1000
|
||||
- target: DYNAMIC_ARCH
|
||||
opts: NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=GENERIC
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install libffi6
|
||||
run: |
|
||||
wget http://ftp.ca.debian.org/debian/pool/main/libf/libffi/libffi6_3.2.1-9_amd64.deb
|
||||
sudo dpkg -i libffi6_3.2.1-9_amd64.deb
|
||||
|
||||
- name: Install APT deps
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install autoconf automake autotools-dev ninja-build make ccache
|
||||
|
||||
- name: Download and install loongarch64-toolchain
|
||||
run: |
|
||||
wget https://github.com/XiWeiGu/loongarch64_toolchain/releases/download/V0.1/clang+llvm_8.0.1-6_amd64-linux-gnu_debian-10.tar.gz
|
||||
wget https://github.com/XiWeiGu/loongarch64_toolchain/releases/download/V0.1/loongson-gnu-toolchain-8.3-x86_64-loongarch64-linux-gnu-rc1.3.tar.xz
|
||||
tar -xf clang+llvm_8.0.1-6_amd64-linux-gnu_debian-10.tar.gz -C /opt
|
||||
tar -xf loongson-gnu-toolchain-8.3-x86_64-loongarch64-linux-gnu-rc1.3.tar.xz -C /opt
|
||||
|
||||
- name: Checkout qemu
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: qemu/qemu
|
||||
path: qemu
|
||||
ref: master
|
||||
|
||||
- name: Install qemu
|
||||
run: |
|
||||
cd qemu
|
||||
./configure --prefix=$GITHUB_WORKSPACE/qemu-install --target-list=loongarch64-linux-user --disable-system --static
|
||||
make -j$(nproc)
|
||||
make install
|
||||
|
||||
- name: Set env
|
||||
run: |
|
||||
echo "PATH=$GITHUB_WORKSPACE:/opt/clang+llvm_8.0.1-6_amd64-linux-gnu_debian-10/bin:/opt/loongson-gnu-toolchain-8.3-x86_64-loongarch64-linux-gnu-rc1.3/bin:$PATH" >> $GITHUB_ENV
|
||||
|
||||
- name: Compilation cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.ccache
|
||||
key: ccache-${{ runner.os }}-${{ matrix.target }}-${{ github.ref }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
ccache-${{ runner.os }}-${{ matrix.target }}-${{ github.ref }}
|
||||
ccache-${{ runner.os }}-${{ matrix.target }}
|
||||
|
||||
- name: Configure ccache
|
||||
run: |
|
||||
test -d ~/.ccache || mkdir -p ~/.ccache
|
||||
echo "max_size = 300M" > ~/.ccache/ccache.conf
|
||||
echo "compression = true" >> ~/.ccache/ccache.conf
|
||||
ccache -s
|
||||
|
||||
- name: Disable utest dsdot:dsdot_n_1
|
||||
run: |
|
||||
echo -n > utest/test_dsdot.c
|
||||
echo "Due to the qemu versions 7.2 causing utest cases to fail,"
|
||||
echo "the utest dsdot:dsdot_n_1 have been temporarily disabled."
|
||||
|
||||
- name: Build OpenBLAS
|
||||
run: make CC='ccache clang --target=loongarch64-linux-gnu --sysroot=/opt/loongson-gnu-toolchain-8.3-x86_64-loongarch64-linux-gnu-rc1.3/loongarch64-linux-gnu/sysroot/ -static' FC='ccache loongarch64-linux-gnu-gfortran -static' HOSTCC='ccache clang' CROSS_SUFFIX=llvm- NO_SHARED=1 ${{ matrix.opts }} -j$(nproc)
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
export PATH=$GITHUB_WORKSPACE/qemu-install/bin/:$PATH
|
||||
qemu-loongarch64 ./utest/openblas_utest
|
||||
qemu-loongarch64 ./utest/openblas_utest_ext
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xscblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xdcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xccblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xzcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xscblat2 < ./ctest/sin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xdcblat2 < ./ctest/din2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xccblat2 < ./ctest/cin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xzcblat2 < ./ctest/zin2
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xscblat3 < ./ctest/sin3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xdcblat3 < ./ctest/din3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xccblat3 < ./ctest/cin3
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./ctest/xzcblat3 < ./ctest/zin3
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/sblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/dblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/cblat1
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/zblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/sblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/dblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/cblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/zblat1
|
||||
rm -f ./test/?BLAT2.SUMM
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/sblat2 < ./test/sblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/dblat2 < ./test/dblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/cblat2 < ./test/cblat2.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/zblat2 < ./test/zblat2.dat
|
||||
rm -f ./test/?BLAT2.SUMM
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/sblat2 < ./test/sblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/dblat2 < ./test/dblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/cblat2 < ./test/cblat2.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/zblat2 < ./test/zblat2.dat
|
||||
rm -f ./test/?BLAT3.SUMM
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/sblat3 < ./test/sblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/dblat3 < ./test/dblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/cblat3 < ./test/cblat3.dat
|
||||
OPENBLAS_NUM_THREADS=1 OMP_NUM_THREADS=1 qemu-loongarch64 ./test/zblat3 < ./test/zblat3.dat
|
||||
rm -f ./test/?BLAT3.SUMM
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/sblat3 < ./test/sblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/dblat3 < ./test/dblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/cblat3 < ./test/cblat3.dat
|
||||
OPENBLAS_NUM_THREADS=2 qemu-loongarch64 ./test/zblat3 < ./test/zblat3.dat
|
||||
|
|
@ -80,6 +80,7 @@ jobs:
|
|||
run: |
|
||||
export PATH=$GITHUB_WORKSPACE/qemu-install/bin/:$PATH
|
||||
qemu-mips64el ./utest/openblas_utest
|
||||
qemu-mips64el ./utest/openblas_utest_ext
|
||||
OPENBLAS_NUM_THREADS=2 qemu-mips64el ./ctest/xscblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-mips64el ./ctest/xdcblat1
|
||||
OPENBLAS_NUM_THREADS=2 qemu-mips64el ./ctest/xccblat1
|
||||
|
|
|
@ -28,6 +28,9 @@ jobs:
|
|||
- target: RISCV64_ZVL256B
|
||||
opts: TARGET=RISCV64_ZVL256B BINARY=64 ARCH=riscv64
|
||||
qemu_cpu: rv64,g=true,c=true,v=true,vext_spec=v1.0,vlen=256,elen=64
|
||||
- target: DYNAMIC_ARCH=1
|
||||
opts: TARGET=RISCV64_GENERIC BINARY=64 ARCH=riscv64 DYNAMIC_ARCH=1
|
||||
qemu_cpu: rv64,g=true,c=true,v=true,vext_spec=v1.0,vlen=256,elen=64
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
|
|
@ -109,3 +109,4 @@ benchmark/smallscaling
|
|||
CMakeCache.txt
|
||||
CMakeFiles/*
|
||||
.vscode
|
||||
**/__pycache__
|
||||
|
|
|
@ -22,6 +22,8 @@ option(BUILD_WITHOUT_LAPACK "Do not build LAPACK and LAPACKE (Only BLAS or CBLAS
|
|||
|
||||
option(BUILD_LAPACK_DEPRECATED "When building LAPACK, include also some older, deprecated routines" ON)
|
||||
|
||||
set(LAPACK_STRLEN "" CACHE STRING "When building LAPACK, use this type (e.g. \"int\") for character lengths (defaults to size_t)")
|
||||
|
||||
option(BUILD_TESTING "Build LAPACK testsuite when building LAPACK" ON)
|
||||
|
||||
option(BUILD_BENCHMARKS "Build the collection of BLAS/LAPACK benchmarks" OFF)
|
||||
|
@ -30,7 +32,7 @@ option(C_LAPACK "Build LAPACK from C sources instead of the original Fortran" OF
|
|||
|
||||
option(BUILD_WITHOUT_CBLAS "Do not build the C interface (CBLAS) to the BLAS functions" OFF)
|
||||
|
||||
option(DYNAMIC_ARCH "Include support for multiple CPU targets, with automatic selection at runtime (x86/x86_64, aarch64 or ppc only)" OFF)
|
||||
option(DYNAMIC_ARCH "Include support for multiple CPU targets, with automatic selection at runtime (x86/x86_64, aarch64, ppc or RISCV64-RVV1.0 only)" OFF)
|
||||
|
||||
option(DYNAMIC_OLDER "Include specific support for older x86 cpu models (Penryn,Dunnington,Atom,Nano,Opteron) with DYNAMIC_ARCH" OFF)
|
||||
|
||||
|
@ -256,6 +258,10 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "AIX|Android|Linux|FreeBSD|OpenBSD|NetBSD|Drago
|
|||
endif()
|
||||
endif()
|
||||
|
||||
if (APPLE AND BUILD_SHARED_LIBS)
|
||||
set(CMAKE_MACOSX_RPATH ON)
|
||||
endif()
|
||||
|
||||
# Seems that this hack doesn't required since macOS 11 Big Sur
|
||||
if (APPLE AND BUILD_SHARED_LIBS AND CMAKE_HOST_SYSTEM_VERSION VERSION_LESS 20)
|
||||
set (CMAKE_C_USE_RESPONSE_FILE_FOR_OBJECTS 1)
|
||||
|
|
|
@ -198,6 +198,9 @@ In chronological order:
|
|||
* PingTouGe Semiconductor Co., Ltd.
|
||||
* [2020-10] Add RISC-V Vector (0.7.1) support. Optimize BLAS kernels for Xuantie C910
|
||||
|
||||
* Jake Arkinstall <https://github.com/jake-arkinstall>
|
||||
* [2021-02-10] Remove in-source configure_file to enable builds in read-only contexts (issue #3100, PR #3101)
|
||||
|
||||
* River Dillon <oss@outerpassage.net>
|
||||
* [2021-07-10] fix compilation with musl libc
|
||||
|
||||
|
|
123
Changelog.txt
123
Changelog.txt
|
@ -1,4 +1,127 @@
|
|||
OpenBLAS ChangeLog
|
||||
====================================================================
|
||||
Version 0.3.28
|
||||
8-Aug-2024
|
||||
|
||||
general:
|
||||
- Reworked the unfinished implementation of HUGETLB from GotoBLAS
|
||||
for allocating huge memory pages as buffers on suitable systems
|
||||
- Changed the unfinished implementation of GEMM3M for the generic
|
||||
target on all architectures to at least forward to regular GEMM
|
||||
- Improved multithreaded GEMM performance for large non-skinny matrices
|
||||
- Improved BLAS3 performance on larger multicore systems through improved
|
||||
parallelism
|
||||
- Improved performance of the initial memory allocation by reducing
|
||||
locking overhead
|
||||
- Improved performance of GBMV at small problem sizes by introducing
|
||||
a size barrier for the switch to multithreading
|
||||
- Added an implementation of the CBLAS_GEMM_BATCH extension
|
||||
- Fixed miscompilation of CAXPYC and ZAXPYC on all architectures in
|
||||
CMAKE builds (error introduced in 0.3.27)
|
||||
- Fixed corner cases involving the handling of NAN and INFINITY
|
||||
arguments in ?SCAL on all architectures
|
||||
- Added support for cross-compiling to WEBM with CMAKE (in addition
|
||||
to the already present makefile support)
|
||||
- Fixed NAN handling and potential accuracy issues in compilations with
|
||||
Intel ICX by supplying a suitable fp-model option by default
|
||||
- The contents of the github project wiki have been converted into
|
||||
a new set of documentation included with the source code.
|
||||
- It is now possible to register a callback function that replaces
|
||||
the built-in support for multithreading with an external backend
|
||||
like TBB (openblas_set_threads_callback_function)
|
||||
- Fixed potential duplication of suffixes in shared library naming
|
||||
- Improved C compiler detection by the build system to tolerate more
|
||||
naming variants for gcc builds
|
||||
- Fixed an unnecessary dependency of the utest on CBLAS
|
||||
- Fixed spurious error reports from the BLAS extensions utest
|
||||
- Fixed unwanted invocation of the GEMM3M tests in cross-compilation
|
||||
- Fixed a flaw in the makefile build that could lead to the pkgconfig
|
||||
file containing an entry of UNKNOWN for the target cpu after installing
|
||||
- Integrated fixes from the Reference-LAPACK project:
|
||||
- Fixed uninitialized variables in the LAPACK tests for ?QP3RK (PR 961)
|
||||
- Fixed potential bounds error in ?UNHR_COL/?ORHR_COL (PR 1018)
|
||||
- Fixed potential infinite loop in the LAPACK testsuite (PR 1024)
|
||||
- Make the variable type used for hidden length arguments configurable (PR 1025)
|
||||
- Fixed SYTRD workspace computation and various typos (PR 1030)
|
||||
- Prevent compiler use of FMA that could increase numerical error in ?GEEVX (PR 1033)
|
||||
|
||||
x86-64:
|
||||
- reverted thread management under Windows to its state before 0.3.26
|
||||
due to signs of race conditions in some circumstances now under study
|
||||
- fixed accidental selection of the unoptimized generic SBGEMM kernel
|
||||
in CMAKE builds for CooperLake and SapphireRapids targets
|
||||
- fixed a potential thread buffer overrun in SBSTOBF16 on small systems
|
||||
- fixed an accuracy issue in ZSCAL introduced in 0.3.26
|
||||
- fixed compilation with CMAKE and recent releases of LLVM
|
||||
- added support for Intel Emerald Rapids and Meteor Lake cpus
|
||||
- added autodetection support for the Zhaoxin KX-7000 cpu
|
||||
- fixed autodetection of Intel Prescott (probably broken since 0.3.19)
|
||||
- fixed compilation for older targets with the Yocto SDK
|
||||
- fixed compilation of the converter-generated C versions
|
||||
of the LAPACK sources with gcc-14
|
||||
- improved compiler options when building with CMAKE and LLVM for
|
||||
AVX512-capable targets
|
||||
- added support for supplying the L2 cache size via an environment
|
||||
variable (OPENBLAS_L2_SIZE) in case it is not correctly reported
|
||||
(as in some VM configurations)
|
||||
- improved the error message shown when thread creation fails on startup
|
||||
- fixed setting the rpath entry of the dylib in CMAKE builds on MacOS
|
||||
|
||||
arm:
|
||||
- fixed building for baremetal targets with make
|
||||
|
||||
arm64:
|
||||
- Added a fast path forwarding SGEMM and DGEMM calls with a 1xN or Mx1
|
||||
matrix to the corresponding GEMV kernel
|
||||
- added optimized SGEMV and DGEMV kernels for A64FX
|
||||
- added optimized SVE kernels for small-matrix GEMM
|
||||
- added A64FX to the cpu list for DYNAMIC_ARCH
|
||||
- fixed building with support for cpu affinity
|
||||
- worked around accuracy problems with C/ZNRM2 on NeoverseN1 and
|
||||
Apple M targets
|
||||
- improved GEMM performance on Neoverse V1
|
||||
- fixed compilation for NEOVERSEN2 with older compilers
|
||||
- fixed potential miscompilation of the SVE SDOT and DDOT kernels
|
||||
- fixed potential miscompilation of the non-SVE CDOT and ZDOT kernels
|
||||
- fixed a potential overflow when using very large user-defined BUFFERSIZE
|
||||
- fixed setting the rpath entry of the dylib in CMAKE builds on MacOS
|
||||
|
||||
power:
|
||||
- Added a fast path forwarding SGEMM and DGEMM calls with a 1xN or Mx1
|
||||
matrix to the corresponding GEMV kernel
|
||||
- significantly improved performance of SBGEMM on POWER10
|
||||
- fixed compilation with OpenMP and the XLF compiler
|
||||
- fixed building of the BLAS extension utests under AIX
|
||||
- fixed building of parts of the LAPACK testsuite with XLF
|
||||
- fixed CSWAP/ZSWAP on big-endian POWER10 targets
|
||||
- fixed a performance regression in SAXPY on POWER10 with OpenXL
|
||||
- fixed accuracy issues in CSCAL/ZSCAL when compiled with LLVM
|
||||
- fixed building for POWER9 under FreeBSD
|
||||
- fixed a potential overflow when using very large user-defined BUFFERSIZE
|
||||
- fixed an accuracy issue in the POWER6 kernels for GEMM and GEMV
|
||||
|
||||
riscv64:
|
||||
- Added a fast path forwarding SGEMM and DGEMM calls with a 1xN or Mx1
|
||||
matrix to the corresponding GEMV kernel
|
||||
- fixed building for RISCV64_GENERIC with OpenMP enabled
|
||||
- added DYNAMIC_ARCH support (comprising GENERIC_RISCV64 and the two
|
||||
RVV 1.0 targets with vector length of 128 and 256)
|
||||
- worked around the ZVL128B kernels for AXPBY mishandling the special
|
||||
case of zero Y increment
|
||||
|
||||
loongarch64:
|
||||
- improved GEMM performance on servers of the 3C5000 generation
|
||||
- improved performance and stability of DGEMM
|
||||
- improved GEMV and TRSM kernels for LSX and LASX vector ABIs
|
||||
- fixed CMAKE compilation with the INTERFACE64 option set
|
||||
- fixed compilation with CMAKE
|
||||
- worked around spurious errors flagged by the BLAS3 tests
|
||||
- worked around a miscompilation of the POTRS utest by gcc 14.1
|
||||
|
||||
mips64:
|
||||
- fixed ASUM and SUM kernels to accept negative step sizes in X
|
||||
- fixed complex GEMV kernels for MSA
|
||||
|
||||
====================================================================
|
||||
Version 0.3.27
|
||||
4-Apr-2024
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
pipeline {
|
||||
agent {
|
||||
docker {
|
||||
image 'osuosl/ubuntu-ppc64le'
|
||||
image 'osuosl/ubuntu-ppc64le:18.04'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
|
|
4
Makefile
4
Makefile
|
@ -45,6 +45,10 @@ else
|
|||
LAPACK_NOOPT := $(filter-out -O0 -O1 -O2 -O3 -Ofast -O -Og -Os,$(LAPACK_FFLAGS))
|
||||
endif
|
||||
|
||||
ifdef LAPACK_STRLEN
|
||||
LAPACK_FFLAGS += -DLAPACK_STRLEN=$(LAPACK_STRLEN)
|
||||
endif
|
||||
|
||||
SUBDIRS_ALL = $(SUBDIRS) test ctest utest exports benchmark ../laswp ../bench cpp_thread_test
|
||||
|
||||
.PHONY : all libs netlib $(RELA) test ctest shared install
|
||||
|
|
|
@ -145,13 +145,13 @@ ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ4) $(GCCVERSIONGTEQ11) $(ISCLANG)))
|
|||
ifneq ($(OSNAME), Darwin)
|
||||
CCOMMON_OPT += -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2
|
||||
else
|
||||
CCOMMON_OPT += -march=armv8.2-a+sve -mtune=cortex-a72
|
||||
CCOMMON_OPT += -march=armv8.2-a+sve+bf16 -mtune=cortex-a72
|
||||
endif
|
||||
ifneq ($(F_COMPILER), NAG)
|
||||
FCOMMON_OPT += -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2
|
||||
endif
|
||||
else
|
||||
CCOMMON_OPT += -march=armv8.5-a+sve
|
||||
CCOMMON_OPT += -march=armv8.5-a+sve+bf16
|
||||
ifneq ($(CROSS), 1)
|
||||
CCOMMON_OPT += -mtune=native
|
||||
endif
|
||||
|
@ -163,19 +163,29 @@ endif
|
|||
endif
|
||||
endif
|
||||
else
|
||||
CCOMMON_OPT += -march=armv8.2-a+sve -mtune=cortex-a72
|
||||
CCOMMON_OPT += -march=armv8.2-a+sve+bf16 -mtune=cortex-a72
|
||||
ifneq ($(F_COMPILER), NAG)
|
||||
FCOMMON_OPT += -march=armv8.2-a -mtune=cortex-a72
|
||||
endif
|
||||
endif
|
||||
else
|
||||
CCOMMON_OPT += -march=armv8-a+sve -mtune=cortex-a72
|
||||
CCOMMON_OPT += -march=armv8-a+sve+bf16 -mtune=cortex-a72
|
||||
ifneq ($(F_COMPILER), NAG)
|
||||
FCOMMON_OPT += -march=armv8-a -mtune=cortex-a72
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# Detect ARM Neoverse V2.
|
||||
ifeq ($(CORE), NEOVERSEV2)
|
||||
ifeq (1, $(filter 1,$(GCCVERSIONGTEQ12) $(ISCLANG)))
|
||||
CCOMMON_OPT += -march=armv9-a -mtune=neoverse-v2
|
||||
ifneq ($(F_COMPILER), NAG)
|
||||
FCOMMON_OPT += -march=armv9-a -mtune=neoverse-v2
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# Use a53 tunings because a55 is only available in GCC>=8.1
|
||||
ifeq ($(CORE), CORTEXA55)
|
||||
ifeq (1, $(filter 1,$(GCCVERSIONGTEQ7) $(ISCLANG)))
|
||||
|
@ -266,12 +276,19 @@ endif
|
|||
endif
|
||||
endif
|
||||
|
||||
ifeq (1, $(filter 1,$(GCCVERSIONGTEQ11) $(ISCLANG)))
|
||||
ifeq ($(CORE), A64FX)
|
||||
ifeq (1, $(filter 1,$(GCCVERSIONGTEQ10) $(ISCLANG)))
|
||||
ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ3) $(GCCVERSIONGTEQ11) $(ISCLANG)))
|
||||
CCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx
|
||||
ifneq ($(F_COMPILER), NAG)
|
||||
FCOMMON_OPT += -march=armv8.2-a+sve -mtune=a64fx
|
||||
endif
|
||||
else
|
||||
CCOMMON_OPT += -march=armv8.4-a+sve -mtune=neoverse-n1
|
||||
ifneq ($(F_COMPILER), NAG)
|
||||
FCOMMON_OPT += -march=armv8.4-a -mtune=neoverse-n1
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
|
|
|
@ -72,18 +72,18 @@ ifndef NO_CBLAS
|
|||
@echo Generating cblas.h in $(DESTDIR)$(OPENBLAS_INCLUDE_DIR)
|
||||
@cp cblas.h cblas.tmp
|
||||
ifdef SYMBOLPREFIX
|
||||
@sed 's/cblas[^( ]*/$(SYMBOLPREFIX)&/g' cblas.tmp > cblas.tmp2
|
||||
@sed 's/openblas[^( ]*/$(SYMBOLPREFIX)&/g' cblas.tmp2 > cblas.tmp
|
||||
@sed 's/cblas[^() ]*/$(SYMBOLPREFIX)&/g' cblas.tmp > cblas.tmp2
|
||||
@sed 's/openblas[^() ]*/$(SYMBOLPREFIX)&/g' cblas.tmp2 > cblas.tmp
|
||||
#change back any openblas_complex_float and double that got hit
|
||||
@sed 's/$(SYMBOLPREFIX)openblas_complex_/openblas_complex_/g' cblas.tmp > cblas.tmp2
|
||||
@sed 's/goto[^( ]*/$(SYMBOLPREFIX)&/g' cblas.tmp2 > cblas.tmp
|
||||
@sed 's/goto[^() ]*/$(SYMBOLPREFIX)&/g' cblas.tmp2 > cblas.tmp
|
||||
endif
|
||||
ifdef SYMBOLSUFFIX
|
||||
@sed 's/cblas[^( ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp > cblas.tmp2
|
||||
@sed 's/openblas[^( ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp2 > cblas.tmp
|
||||
@sed 's/cblas[^() ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp > cblas.tmp2
|
||||
@sed 's/openblas[^() ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp2 > cblas.tmp
|
||||
#change back any openblas_complex_float and double that got hit
|
||||
@sed 's/\(openblas_complex_\)\([^ ]*\)$(SYMBOLSUFFIX)/\1\2 /g' cblas.tmp > cblas.tmp2
|
||||
@sed 's/goto[^( ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp2 > cblas.tmp
|
||||
@sed 's/goto[^() ]*/&$(SYMBOLSUFFIX)/g' cblas.tmp2 > cblas.tmp
|
||||
endif
|
||||
@sed 's/common/openblas_config/g' cblas.tmp > "$(DESTDIR)$(OPENBLAS_INCLUDE_DIR)/cblas.h"
|
||||
endif
|
||||
|
@ -178,7 +178,7 @@ endif
|
|||
@echo 'libnamesuffix='$(LIBNAMESUFFIX) >> "$(PKGFILE)"
|
||||
@echo 'libsuffix='$(SYMBOLSUFFIX) >> "$(PKGFILE)"
|
||||
@echo 'includedir='$(OPENBLAS_INCLUDE_DIR) >> "$(PKGFILE)"
|
||||
@echo 'openblas_config= USE_64BITINT='$(INTERFACE64) 'DYNAMIC_ARCH='$(DYNAMIC_ARCH) 'DYNAMIC_OLDER='$(DYNAMIC_OLDER) 'NO_CBLAS='$(NO_CBLAS) 'NO_LAPACK='$(NO_LAPACK) 'NO_LAPACKE='$(NO_LAPACKE) 'NO_AFFINITY='$(NO_AFFINITY) 'USE_OPENMP='$(USE_OPENMP) $(CORE) 'MAX_THREADS='$(NUM_THREADS)>> "$(PKGFILE)"
|
||||
@echo 'openblas_config= USE_64BITINT='$(INTERFACE64) 'DYNAMIC_ARCH='$(DYNAMIC_ARCH) 'DYNAMIC_OLDER='$(DYNAMIC_OLDER) 'NO_CBLAS='$(NO_CBLAS) 'NO_LAPACK='$(NO_LAPACK) 'NO_LAPACKE='$(NO_LAPACKE) 'NO_AFFINITY='$(NO_AFFINITY) 'USE_OPENMP='$(USE_OPENMP) $(TARGET) 'MAX_THREADS='$(NUM_THREADS)>> "$(PKGFILE)"
|
||||
@echo 'version='$(VERSION) >> "$(PKGFILE)"
|
||||
@echo 'extralib='$(PKG_EXTRALIB) >> "$(PKGFILE)"
|
||||
@cat openblas.pc.in >> "$(PKGFILE)"
|
||||
|
|
|
@ -8,13 +8,13 @@ FCOMMON_OPT += -march=rv64imafdcv_zba_zbb_zfh -mabi=lp64d -static
|
|||
endif
|
||||
ifeq ($(CORE), RISCV64_ZVL256B)
|
||||
CCOMMON_OPT += -march=rv64imafdcv_zvl256b -mabi=lp64d
|
||||
FCOMMON_OPT += -march=rv64imafdcv -mabi=lp64d -static
|
||||
FCOMMON_OPT += -march=rv64imafdcv -mabi=lp64d
|
||||
endif
|
||||
ifeq ($(CORE), RISCV64_ZVL128B)
|
||||
CCOMMON_OPT += -march=rv64imafdcv -mabi=lp64d
|
||||
FCOMMON_OPT += -march=rv64imafdcv -mabi=lp64d -static
|
||||
FCOMMON_OPT += -march=rv64imafdcv -mabi=lp64d
|
||||
endif
|
||||
ifeq ($(CORE), RISCV64_GENERIC)
|
||||
CCOMMON_OPT += -march=rv64imafdc -mabi=lp64d
|
||||
FCOMMON_OPT += -march=rv64imafdc -mabi=lp64d -static
|
||||
FCOMMON_OPT += -march=rv64imafdc -mabi=lp64d
|
||||
endif
|
||||
|
|
|
@ -134,6 +134,12 @@ VERSION = 0.3.28
|
|||
# Build LAPACK Deprecated functions since LAPACK 3.6.0
|
||||
BUILD_LAPACK_DEPRECATED = 1
|
||||
|
||||
# The variable type assumed for the length of character arguments when passing
|
||||
# data between Fortran LAPACK and C BLAS (defaults to "size_t", but older GCC
|
||||
# versions used "int"). Mismatches will not cause runtime failures but may result
|
||||
# in build warnings or errors when building with link-time optimization (LTO)
|
||||
# LAPACK_STRLEN=int
|
||||
|
||||
# Build RecursiveLAPACK on top of LAPACK
|
||||
# BUILD_RELAPACK = 1
|
||||
# Have RecursiveLAPACK actually replace standard LAPACK routines instead of
|
||||
|
@ -173,6 +179,10 @@ NO_AFFINITY = 1
|
|||
# If you are compiling for Linux and you have more than 16 numa nodes or more than 256 cpus
|
||||
# BIGNUMA = 1
|
||||
|
||||
# If you are compiling for an embedded system ("bare metal") like Cortex M series
|
||||
# Note that you will have to provide implementations of malloc() and free() in this case
|
||||
# EMBEDDED = 1
|
||||
|
||||
# Don't use AVX kernel on Sandy Bridge. It is compatible with old compilers
|
||||
# and OS. However, the performance is low.
|
||||
# NO_AVX = 1
|
||||
|
@ -215,6 +225,16 @@ NO_AFFINITY = 1
|
|||
# to the user space. If bigphysarea is enabled, it will use it.
|
||||
# DEVICEDRIVER_ALLOCATION = 1
|
||||
|
||||
# Use large page allocation (called hugepage support in Linux context)
|
||||
# for the thread buffers (with access by shared memory operations)
|
||||
# HUGETLB_ALLOCATION = 1
|
||||
|
||||
# Use large page allocation called hugepages in Linux) based on mmap accessing
|
||||
# a memory-backed pseudofile (requires hugetlbfs to be mounted in the system,
|
||||
# the example below has it mounted on /hugepages. OpenBLAS will create the backing
|
||||
# file as gotoblas.processid in that path)
|
||||
# HUGETLBFILE_ALLOCATION = /hugepages
|
||||
|
||||
# If you need to synchronize FP CSR between threads (for x86/x86_64 and aarch64 only).
|
||||
# CONSISTENT_FPCSR = 1
|
||||
|
||||
|
|
|
@ -268,10 +268,30 @@ SMALL_MATRIX_OPT = 1
|
|||
else ifeq ($(ARCH), power)
|
||||
SMALL_MATRIX_OPT = 1
|
||||
BUILD_BFLOAT16 = 1
|
||||
else ifeq ($(ARCH), arm64)
|
||||
SMALL_MATRIX_OPT = 1
|
||||
endif
|
||||
ifeq ($(ARCH), loongarch64)
|
||||
SMALL_MATRIX_OPT = 1
|
||||
endif
|
||||
ifeq ($(ARCH), arm64)
|
||||
GEMM_GEMV_FORWARD = 1
|
||||
endif
|
||||
ifeq ($(ARCH), riscv)
|
||||
GEMM_GEMV_FORWARD = 1
|
||||
endif
|
||||
ifeq ($(ARCH), power)
|
||||
GEMM_GEMV_FORWARD = 1
|
||||
endif
|
||||
|
||||
ifeq ($(SMALL_MATRIX_OPT), 1)
|
||||
CCOMMON_OPT += -DSMALL_MATRIX_OPT
|
||||
endif
|
||||
ifeq ($(GEMM_GEMV_FORWARD), 1)
|
||||
ifneq ($(ONLY_CBLAS), 1)
|
||||
CCOMMON_OPT += -DGEMM_GEMV_FORWARD
|
||||
endif
|
||||
endif
|
||||
|
||||
# This operation is expensive, so execution should be once.
|
||||
ifndef GOTOBLAS_MAKEFILE
|
||||
|
@ -356,6 +376,9 @@ OBJCONV = $(CROSS_SUFFIX)objconv
|
|||
ifeq ($(NOFORTRAN), 1)
|
||||
C_LAPACK = 1
|
||||
override FEXTRALIB =
|
||||
ifeq ($(C_COMPILER), GCC)
|
||||
CCOMMON_OPT += -Wno-error=incompatible-pointer-types
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(C_COMPILER), GCC)
|
||||
|
@ -683,6 +706,7 @@ ifneq ($(NO_SVE), 1)
|
|||
DYNAMIC_CORE += NEOVERSEV1
|
||||
DYNAMIC_CORE += NEOVERSEN2
|
||||
DYNAMIC_CORE += ARMV8SVE
|
||||
DYNAMIC_CORE += A64FX
|
||||
endif
|
||||
DYNAMIC_CORE += THUNDERX
|
||||
DYNAMIC_CORE += THUNDERX2T99
|
||||
|
@ -709,6 +733,17 @@ ifeq ($(ARCH), loongarch64)
|
|||
DYNAMIC_CORE = LOONGSON3R5 LOONGSON2K1000 LOONGSONGENERIC
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH), riscv64)
|
||||
DYNAMIC_CORE = RISCV64_GENERIC
|
||||
DYNAMIC_CORE += RISCV64_ZVL128B
|
||||
DYNAMIC_CORE += RISCV64_ZVL256B
|
||||
ifdef DYNAMIC_LIST
|
||||
override DYNAMIC_CORE = RISCV64_GENERIC $(DYNAMIC_LIST)
|
||||
XCCOMMON_OPT = -DDYNAMIC_LIST -DDYN_RISCV64_GENERIC
|
||||
XCCOMMON_OPT += $(foreach dcore,$(DYNAMIC_LIST),-DDYN_$(dcore))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH), zarch)
|
||||
DYNAMIC_CORE = ZARCH_GENERIC
|
||||
|
||||
|
@ -811,8 +846,12 @@ ifeq ($(ARCH), arm)
|
|||
NO_BINARY_MODE = 1
|
||||
BINARY_DEFINED = 1
|
||||
|
||||
ifneq ($(EMBEDDED), 1)
|
||||
CCOMMON_OPT += -marm
|
||||
FCOMMON_OPT += -marm
|
||||
else
|
||||
CCOMMON_OPT += -DOS_EMBEDDED -mthumb -mcpu=cortex-m4 -mfloat-abi=hard -mfpu=fpv4-sp-d16
|
||||
endif
|
||||
|
||||
# If softfp abi is mentioned on the command line, force it.
|
||||
ifeq ($(ARM_SOFTFP_ABI), 1)
|
||||
|
@ -955,12 +994,18 @@ endif
|
|||
|
||||
ifeq ($(ARCH), loongarch64)
|
||||
LA64_ABI=$(shell $(CC) -mabi=lp64d -c $(TOPDIR)/cpuid_loongarch64.c -o /dev/null > /dev/null 2> /dev/null && echo lp64d)
|
||||
LA64_ARCH=$(shell $(CC) -march=loongarch64 -c $(TOPDIR)/cpuid_loongarch64.c -o /dev/null > /dev/null 2> /dev/null && echo loongarch64)
|
||||
ifneq ($(LA64_ABI), lp64d)
|
||||
LA64_ABI=lp64
|
||||
endif
|
||||
ifneq ($(LA64_ARCH), loongarch64)
|
||||
CCOMMON_OPT += -mabi=$(LA64_ABI)
|
||||
FCOMMON_OPT += -mabi=$(LA64_ABI)
|
||||
else
|
||||
CCOMMON_OPT += -march=loongarch64 -mabi=$(LA64_ABI)
|
||||
FCOMMON_OPT += -march=loongarch64 -mabi=$(LA64_ABI)
|
||||
endif
|
||||
endif
|
||||
|
||||
endif
|
||||
|
||||
|
@ -1195,9 +1240,6 @@ endif
|
|||
else
|
||||
FCOMMON_OPT += -q32
|
||||
endif
|
||||
ifeq ($(USE_OPENMP), 1)
|
||||
FCOMMON_OPT += -openmp
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(F_COMPILER), PGI)
|
||||
|
@ -1582,13 +1624,23 @@ ifdef FUNCTION_PROFILE
|
|||
CCOMMON_OPT += -DFUNCTION_PROFILE
|
||||
endif
|
||||
|
||||
ifdef SHMEM_ALLOCATION
|
||||
ifneq ($(SHMEM_ALLOCATION), 0)
|
||||
CCOMMON_OPT += -DALLOC_SHM
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef HUGETLB_ALLOCATION
|
||||
ifneq ($(HUGETLB_ALLOCATION), 0)
|
||||
CCOMMON_OPT += -DALLOC_HUGETLB
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef HUGETLBFILE_ALLOCATION
|
||||
ifneq ($(HUGETLBFILE_ALLOCATION), 0)
|
||||
CCOMMON_OPT += -DALLOC_HUGETLBFILE -DHUGETLB_FILE_NAME=$(HUGETLBFILE_ALLOCATION)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef STATIC_ALLOCATION
|
||||
CCOMMON_OPT += -DALLOC_STATIC
|
||||
|
|
|
@ -8,6 +8,11 @@ endif
|
|||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(C_COMPILER), CLANG)
|
||||
ifeq ($(findstring icx,$(CC)),icx)
|
||||
CCOMMON_OPT += -fp-model=consistent
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(DYNAMIC_ARCH),1)
|
||||
ADD_CPUFLAGS = 1
|
||||
|
|
|
@ -188,7 +188,7 @@ Please read `GotoBLAS_01Readme.txt` for older CPU models already supported by th
|
|||
|
||||
- **AIX**: Dynamic architecture with OpenXL and OpenMP.
|
||||
```sh
|
||||
make CC=ibm-clang_r FC=xlf TARGET=POWER7 BINARY=64 USE_OPENMP=1 INTERFACE64=1 DYNAMIC_ARCH=1 USE_THREAD=1
|
||||
make CC=ibm-clang_r FC=xlf_r TARGET=POWER7 BINARY=64 USE_OPENMP=1 INTERFACE64=1 DYNAMIC_ARCH=1 USE_THREAD=1
|
||||
```
|
||||
|
||||
#### IBM zEnterprise System
|
||||
|
@ -234,6 +234,8 @@ For **POWER**, the list encompasses POWER6, POWER8 and POWER9. POWER10 is additi
|
|||
|
||||
on **ZARCH** it comprises Z13 and Z14 as well as generic zarch support.
|
||||
|
||||
On **riscv64**, DYNAMIC_ARCH enables support for riscv64_zvl128b and riscv64_zvl256b in addition to generic riscv64 support. A compiler that supports RVV 1.0 is required to build OpenBLAS for riscv64 when DYNAMIC_ARCH is enabled.
|
||||
|
||||
The `TARGET` option can be used in conjunction with `DYNAMIC_ARCH=1` to specify which cpu model should be assumed for all the
|
||||
common code in the library, usually you will want to set this to the oldest model you expect to encounter.
|
||||
Please note that it is not possible to combine support for different architectures, so no combined 32 and 64 bit or x86_64 and arm64 in the same library.
|
||||
|
|
|
@ -133,29 +133,29 @@ jobs:
|
|||
mkdir build
|
||||
cd build
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
|
||||
cmake -G "Ninja" -DCMAKE_C_COMPILER=cl -DCMAKE_Fortran_COMPILER=flang -DC_LAPACK=1 -DCMAKE_MT=mt -DCMAKE_BUILD_TYPE=Release -DMSVC_STATIC_CRT=ON ..
|
||||
cmake -G "Ninja" -DCMAKE_C_COMPILER=cl -DCMAKE_Fortran_COMPILER=flang-new -DC_LAPACK=1 -DCMAKE_MT=mt -DCMAKE_BUILD_TYPE=Release -DMSVC_STATIC_CRT=ON ..
|
||||
cmake --build . --config Release
|
||||
ctest
|
||||
|
||||
ctest --rerun-failed --output-on-failure
|
||||
|
||||
|
||||
- job: OSX_OpenMP
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
steps:
|
||||
- script: |
|
||||
brew update
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-10 FC=gfortran-10
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-10 FC=gfortran-10 PREFIX=../blasinst install
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-13 FC=gfortran-13
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-13 FC=gfortran-13 PREFIX=../blasinst install
|
||||
ls -lR ../blasinst
|
||||
|
||||
- job: OSX_GCC_Nothreads
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
steps:
|
||||
- script: |
|
||||
brew update
|
||||
make USE_THREADS=0 CC=gcc-10 FC=gfortran-10
|
||||
make USE_THREADS=0 CC=gcc-13 FC=gfortran-13
|
||||
|
||||
- job: OSX_GCC12
|
||||
pool:
|
||||
|
@ -195,7 +195,7 @@ jobs:
|
|||
|
||||
- job: OSX_dynarch_cmake
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
LD_LIBRARY_PATH: /usr/local/opt/llvm/lib
|
||||
LIBRARY_PATH: /usr/local/opt/llvm/lib
|
||||
|
@ -203,7 +203,7 @@ jobs:
|
|||
- script: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DTARGET=CORE2 -DDYNAMIC_ARCH=1 -DDYNAMIC_LIST='NEHALEM HASWELL SKYLAKEX' -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_Fortran_COMPILER=gfortran-10 -DBUILD_SHARED_LIBS=ON ..
|
||||
cmake -DTARGET=CORE2 -DDYNAMIC_ARCH=1 -DDYNAMIC_LIST='NEHALEM HASWELL SKYLAKEX' -DCMAKE_C_COMPILER=gcc-13 -DCMAKE_Fortran_COMPILER=gfortran-13 -DBUILD_SHARED_LIBS=ON ..
|
||||
cmake --build .
|
||||
ctest
|
||||
|
||||
|
@ -242,7 +242,7 @@ jobs:
|
|||
|
||||
- job: OSX_NDK_ARMV7
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
steps:
|
||||
- script: |
|
||||
brew update
|
||||
|
@ -252,35 +252,35 @@ jobs:
|
|||
|
||||
- job: OSX_IOS_ARMV8
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
CC: /Applications/Xcode_12.4.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_12.4.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS14.4.sdk -arch arm64 -miphoneos-version-min=10.0
|
||||
CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.2.sdk -arch arm64 -miphoneos-version-min=10.0
|
||||
steps:
|
||||
- script: |
|
||||
make TARGET=ARMV8 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1
|
||||
|
||||
- job: OSX_IOS_ARMV7
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
CC: /Applications/Xcode_12.4.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -mno-thumb -Wno-macro-redefined -isysroot /Applications/Xcode_12.4.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS14.4.sdk -arch armv7 -miphoneos-version-min=5.1
|
||||
CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -mno-thumb -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.2.sdk -arch armv7 -miphoneos-version-min=5.1
|
||||
steps:
|
||||
- script: |
|
||||
make TARGET=ARMV7 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1
|
||||
|
||||
- job: OSX_xbuild_DYNAMIC_ARM64
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
CC: /Applications/Xcode_12.5.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_12.5.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk -arch arm64
|
||||
CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX13.1.sdk -arch arm64
|
||||
steps:
|
||||
- script: |
|
||||
ls /Applications/Xcode_12.5.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs
|
||||
/Applications/Xcode_12.5.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -arch arm64 --print-supported-cpus
|
||||
/Applications/Xcode_11.7.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang --version
|
||||
ls /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs
|
||||
/Applications/Xcode_12.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -arch arm64 --print-supported-cpus
|
||||
/Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang --version
|
||||
make TARGET=ARMV8 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1
|
||||
|
||||
- job: ALPINE_MUSL
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
# Continuous benchmarking of OpenBLAS performance
|
||||
|
||||
We run a set of benchmarks of subset of OpenBLAS functionality.
|
||||
|
||||
## Benchmark runner
|
||||
|
||||
[](https://codspeed.io/OpenMathLib/OpenBLAS/)
|
||||
|
||||
Click on [benchmarks](https://codspeed.io/OpenMathLib/OpenBLAS/benchmarks) to see the performance of a particular benchmark over time;
|
||||
Click on [branches](https://codspeed.io/OpenMathLib/OpenBLAS/branches/) and then on the last PR link to see the flamegraphs.
|
||||
|
||||
## What are the benchmarks
|
||||
|
||||
We run raw BLAS/LAPACK subroutines, via f2py-generated python wrappers. The wrappers themselves are equivalent to [those from SciPy](https://docs.scipy.org/doc/scipy/reference/linalg.lapack.html).
|
||||
In fact, the wrappers _are_ from SciPy, we take a small subset simply to avoid having to build the whole SciPy for each CI run.
|
||||
|
||||
|
||||
## Adding a new benchmark
|
||||
|
||||
`.github/workflows/codspeed-bench.yml` does all the orchestration on CI.
|
||||
|
||||
Benchmarks live in the `benchmark/pybench` directory. It is organized as follows:
|
||||
|
||||
- benchmarks themselves live in the `benchmarks` folder. Note that the LAPACK routines are imported from the `openblas_wrap` package.
|
||||
- the `openblas_wrap` package is a simple trampoline: it contains an f2py extension, `_flapack`, which talks to OpenBLAS, and exports the python names in its `__init__.py`.
|
||||
This way, the `openblas_wrap` package shields the benchmarks from the details of where a particular LAPACK function comes from. If wanted, you may for instance swap the `_flapack` extension to
|
||||
`scipy.linalg.blas` and `scipy.linalg.lapack`.
|
||||
|
||||
To change parameters of an existing benchmark, edit python files in the `benchmark/pybench/benchmarks` directory.
|
||||
|
||||
To add a benchmark for a new BLAS or LAPACK function, you need to:
|
||||
|
||||
- add an f2py wrapper for the bare LAPACK function. You can simply copy a wrapper from SciPy (look for `*.pyf.src` files in https://github.com/scipy/scipy/tree/main/scipy/linalg)
|
||||
- add an import to `benchmark/pybench/openblas_wrap/__init__.py`
|
||||
|
||||
|
||||
## Running benchmarks locally
|
||||
|
||||
This benchmarking layer is orchestrated from python, therefore you'll need to
|
||||
have all what it takes to build OpenBLAS from source, plus `python` and
|
||||
|
||||
```
|
||||
$ python -mpip install numpy meson ninja pytest pytest-benchmark
|
||||
```
|
||||
|
||||
The benchmark syntax is consistent with that of `pytest-benchmark` framework. The incantation to run the suite locally is `$ pytest benchmark/pybench/benchmarks/test_blas.py`.
|
||||
|
||||
An ASV compatible benchmark suite is planned but currently not implemented.
|
||||
|
|
@ -0,0 +1,274 @@
|
|||
import pytest
|
||||
import numpy as np
|
||||
import openblas_wrap as ow
|
||||
|
||||
dtype_map = {
|
||||
's': np.float32,
|
||||
'd': np.float64,
|
||||
'c': np.complex64,
|
||||
'z': np.complex128,
|
||||
'dz': np.complex128,
|
||||
}
|
||||
|
||||
|
||||
# ### BLAS level 1 ###
|
||||
|
||||
# dnrm2
|
||||
|
||||
dnrm2_sizes = [100, 1000]
|
||||
|
||||
def run_dnrm2(n, x, incx, func):
|
||||
res = func(x, n, incx=incx)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['d', 'dz'])
|
||||
@pytest.mark.parametrize('n', dnrm2_sizes)
|
||||
def test_nrm2(benchmark, n, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
|
||||
x = np.array(rndm.uniform(size=(n,)), dtype=dtyp)
|
||||
nrm2 = ow.get_func('nrm2', variant)
|
||||
result = benchmark(run_dnrm2, n, x, 1, nrm2)
|
||||
|
||||
|
||||
# ddot
|
||||
|
||||
ddot_sizes = [100, 1000]
|
||||
|
||||
def run_ddot(x, y, func):
|
||||
res = func(x, y)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('n', ddot_sizes)
|
||||
def test_dot(benchmark, n):
|
||||
rndm = np.random.RandomState(1234)
|
||||
|
||||
x = np.array(rndm.uniform(size=(n,)), dtype=float)
|
||||
y = np.array(rndm.uniform(size=(n,)), dtype=float)
|
||||
dot = ow.get_func('dot', 'd')
|
||||
result = benchmark(run_ddot, x, y, dot)
|
||||
|
||||
|
||||
# daxpy
|
||||
|
||||
daxpy_sizes = [100, 1000]
|
||||
|
||||
def run_daxpy(x, y, func):
|
||||
res = func(x, y, a=2.0)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd', 'c', 'z'])
|
||||
@pytest.mark.parametrize('n', daxpy_sizes)
|
||||
def test_daxpy(benchmark, n, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
|
||||
x = np.array(rndm.uniform(size=(n,)), dtype=dtyp)
|
||||
y = np.array(rndm.uniform(size=(n,)), dtype=dtyp)
|
||||
axpy = ow.get_func('axpy', variant)
|
||||
result = benchmark(run_daxpy, x, y, axpy)
|
||||
|
||||
|
||||
# ### BLAS level 2 ###
|
||||
|
||||
gemv_sizes = [100, 1000]
|
||||
|
||||
def run_gemv(a, x, y, func):
|
||||
res = func(1.0, a, x, y=y, overwrite_y=True)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd', 'c', 'z'])
|
||||
@pytest.mark.parametrize('n', gemv_sizes)
|
||||
def test_dgemv(benchmark, n, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
|
||||
x = np.array(rndm.uniform(size=(n,)), dtype=dtyp)
|
||||
y = np.empty(n, dtype=dtyp)
|
||||
|
||||
a = np.array(rndm.uniform(size=(n,n)), dtype=dtyp)
|
||||
x = np.array(rndm.uniform(size=(n,)), dtype=dtyp)
|
||||
y = np.zeros(n, dtype=dtyp)
|
||||
|
||||
gemv = ow.get_func('gemv', variant)
|
||||
result = benchmark(run_gemv, a, x, y, gemv)
|
||||
|
||||
assert result is y
|
||||
|
||||
|
||||
# dgbmv
|
||||
|
||||
dgbmv_sizes = [100, 1000]
|
||||
|
||||
def run_gbmv(m, n, kl, ku, a, x, y, func):
|
||||
res = func(m, n, kl, ku, 1.0, a, x, y=y, overwrite_y=True)
|
||||
return res
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd', 'c', 'z'])
|
||||
@pytest.mark.parametrize('n', dgbmv_sizes)
|
||||
@pytest.mark.parametrize('kl', [1])
|
||||
def test_dgbmv(benchmark, n, kl, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
|
||||
x = np.array(rndm.uniform(size=(n,)), dtype=dtyp)
|
||||
y = np.empty(n, dtype=dtyp)
|
||||
|
||||
m = n
|
||||
|
||||
a = rndm.uniform(size=(2*kl + 1, n))
|
||||
a = np.array(a, dtype=dtyp, order='F')
|
||||
|
||||
gbmv = ow.get_func('gbmv', variant)
|
||||
result = benchmark(run_gbmv, m, n, kl, kl, a, x, y, gbmv)
|
||||
assert result is y
|
||||
|
||||
|
||||
# ### BLAS level 3 ###
|
||||
|
||||
# dgemm
|
||||
|
||||
gemm_sizes = [100, 1000]
|
||||
|
||||
def run_gemm(a, b, c, func):
|
||||
alpha = 1.0
|
||||
res = func(alpha, a, b, c=c, overwrite_c=True)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd', 'c', 'z'])
|
||||
@pytest.mark.parametrize('n', gemm_sizes)
|
||||
def test_gemm(benchmark, n, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
a = np.array(rndm.uniform(size=(n, n)), dtype=dtyp, order='F')
|
||||
b = np.array(rndm.uniform(size=(n, n)), dtype=dtyp, order='F')
|
||||
c = np.empty((n, n), dtype=dtyp, order='F')
|
||||
gemm = ow.get_func('gemm', variant)
|
||||
result = benchmark(run_gemm, a, b, c, gemm)
|
||||
assert result is c
|
||||
|
||||
|
||||
# dsyrk
|
||||
|
||||
syrk_sizes = [100, 1000]
|
||||
|
||||
|
||||
def run_syrk(a, c, func):
|
||||
res = func(1.0, a, c=c, overwrite_c=True)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd', 'c', 'z'])
|
||||
@pytest.mark.parametrize('n', syrk_sizes)
|
||||
def test_syrk(benchmark, n, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
a = np.array(rndm.uniform(size=(n, n)), dtype=dtyp, order='F')
|
||||
c = np.empty((n, n), dtype=dtyp, order='F')
|
||||
syrk = ow.get_func('syrk', variant)
|
||||
result = benchmark(run_syrk, a, c, syrk)
|
||||
assert result is c
|
||||
|
||||
|
||||
# ### LAPACK ###
|
||||
|
||||
# linalg.solve
|
||||
|
||||
gesv_sizes = [100, 1000]
|
||||
|
||||
|
||||
def run_gesv(a, b, func):
|
||||
res = func(a, b, overwrite_a=True, overwrite_b=True)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd', 'c', 'z'])
|
||||
@pytest.mark.parametrize('n', gesv_sizes)
|
||||
def test_gesv(benchmark, n, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
|
||||
a = (np.array(rndm.uniform(size=(n, n)), dtype=dtyp, order='F') +
|
||||
np.eye(n, dtype=dtyp, order='F'))
|
||||
b = np.array(rndm.uniform(size=(n, 1)), dtype=dtyp, order='F')
|
||||
gesv = ow.get_func('gesv', variant)
|
||||
lu, piv, x, info = benchmark(run_gesv, a, b, gesv)
|
||||
assert lu is a
|
||||
assert x is b
|
||||
assert info == 0
|
||||
|
||||
|
||||
# linalg.svd
|
||||
|
||||
gesdd_sizes = [(100, 5), (1000, 222)]
|
||||
|
||||
|
||||
def run_gesdd(a, lwork, func):
|
||||
res = func(a, lwork=lwork, full_matrices=False, overwrite_a=False)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd'])
|
||||
@pytest.mark.parametrize('mn', gesdd_sizes)
|
||||
def test_gesdd(benchmark, mn, variant):
|
||||
m, n = mn
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
|
||||
a = np.array(rndm.uniform(size=(m, n)), dtype=dtyp, order='F')
|
||||
|
||||
gesdd_lwork = ow.get_func('gesdd_lwork', variant)
|
||||
|
||||
lwork, info = gesdd_lwork(m, n)
|
||||
lwork = int(lwork)
|
||||
assert info == 0
|
||||
|
||||
gesdd = ow.get_func('gesdd', variant)
|
||||
u, s, vt, info = benchmark(run_gesdd, a, lwork, gesdd)
|
||||
|
||||
assert info == 0
|
||||
|
||||
atol = {'s': 1e-5, 'd': 1e-13}
|
||||
np.testing.assert_allclose(u @ np.diag(s) @ vt, a, atol=atol[variant])
|
||||
|
||||
|
||||
# linalg.eigh
|
||||
|
||||
syev_sizes = [50, 200]
|
||||
|
||||
|
||||
def run_syev(a, lwork, func):
|
||||
res = func(a, lwork=lwork, overwrite_a=True)
|
||||
return res
|
||||
|
||||
|
||||
@pytest.mark.parametrize('variant', ['s', 'd'])
|
||||
@pytest.mark.parametrize('n', syev_sizes)
|
||||
def test_syev(benchmark, n, variant):
|
||||
rndm = np.random.RandomState(1234)
|
||||
dtyp = dtype_map[variant]
|
||||
|
||||
a = rndm.uniform(size=(n, n))
|
||||
a = np.asarray(a + a.T, dtype=dtyp, order='F')
|
||||
a_ = a.copy()
|
||||
|
||||
dsyev_lwork = ow.get_func('syev_lwork', variant)
|
||||
lwork, info = dsyev_lwork(n)
|
||||
lwork = int(lwork)
|
||||
assert info == 0
|
||||
|
||||
syev = ow.get_func('syev', variant)
|
||||
w, v, info = benchmark(run_syev, a, lwork, syev)
|
||||
|
||||
assert info == 0
|
||||
assert a is v # overwrite_a=True
|
||||
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
#
|
||||
# Taken from SciPy (of course)
|
||||
#
|
||||
project(
|
||||
'openblas-wrap',
|
||||
'c', 'fortran',
|
||||
version: '0.1',
|
||||
license: 'BSD-3',
|
||||
meson_version: '>= 1.1.0',
|
||||
default_options: [
|
||||
'buildtype=debugoptimized',
|
||||
'b_ndebug=if-release',
|
||||
'c_std=c17',
|
||||
'fortran_std=legacy',
|
||||
],
|
||||
)
|
||||
|
||||
py3 = import('python').find_installation(pure: false)
|
||||
py3_dep = py3.dependency()
|
||||
|
||||
cc = meson.get_compiler('c')
|
||||
|
||||
_global_c_args = cc.get_supported_arguments(
|
||||
'-Wno-unused-but-set-variable',
|
||||
'-Wno-unused-function',
|
||||
'-Wno-conversion',
|
||||
'-Wno-misleading-indentation',
|
||||
)
|
||||
add_project_arguments(_global_c_args, language : 'c')
|
||||
|
||||
# We need -lm for all C code (assuming it uses math functions, which is safe to
|
||||
# assume for SciPy). For C++ it isn't needed, because libstdc++/libc++ is
|
||||
# guaranteed to depend on it. For Fortran code, Meson already adds `-lm`.
|
||||
m_dep = cc.find_library('m', required : false)
|
||||
if m_dep.found()
|
||||
add_project_link_arguments('-lm', language : 'c')
|
||||
endif
|
||||
|
||||
generate_f2pymod = find_program('openblas_wrap/generate_f2pymod.py')
|
||||
|
||||
openblas = dependency('openblas', method: 'pkg-config', required: true)
|
||||
openblas_dep = declare_dependency(
|
||||
dependencies: openblas,
|
||||
compile_args: []
|
||||
)
|
||||
|
||||
|
||||
subdir('openblas_wrap')
|
|
@ -0,0 +1,17 @@
|
|||
"""
|
||||
Trampoline to hide the LAPACK details (scipy.lapack.linalg or scipy_openblas32 or...)
|
||||
from benchmarking.
|
||||
"""
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
|
||||
from . import _flapack
|
||||
|
||||
PREFIX = ''
|
||||
|
||||
|
||||
def get_func(name, variant):
|
||||
"""get_func('gesv', 'c') -> cgesv etc."""
|
||||
return getattr(_flapack, PREFIX + variant + name)
|
||||
|
|
@ -0,0 +1,417 @@
|
|||
!
|
||||
! Taken from scipy/linalg
|
||||
!
|
||||
! Shorthand notations
|
||||
!
|
||||
! <tchar=s,d,cs,zd>
|
||||
! <tchar2c=cs,zd>
|
||||
!
|
||||
! <prefix2=s,d>
|
||||
! <prefix2c=c,z>
|
||||
! <prefix3=s,sc>
|
||||
! <prefix4=d,dz>
|
||||
! <prefix6=s,d,c,z,c,z>
|
||||
!
|
||||
! <ftype2=real,double precision>
|
||||
! <ftype2c=complex,double complex>
|
||||
! <ftype3=real,complex>
|
||||
! <ftype4=double precision,double complex>
|
||||
! <ftypereal3=real,real>
|
||||
! <ftypereal4=double precision,double precision>
|
||||
! <ftype6=real,double precision,complex,double complex,\2,\3>
|
||||
! <ftype6creal=real,double precision,complex,double complex,\0,\1>
|
||||
!
|
||||
! <ctype2=float,double>
|
||||
! <ctype2c=complex_float,complex_double>
|
||||
! <ctype3=float,complex_float>
|
||||
! <ctype4=double,complex_double>
|
||||
! <ctypereal3=float,float>
|
||||
! <ctypereal4=double,double>
|
||||
! <ctype6=float,double,complex_float,complex_double,\2,\3>
|
||||
! <ctype6creal=float,double,complex_float,complex_double,\0,\1>
|
||||
!
|
||||
!
|
||||
! Level 1 BLAS
|
||||
!
|
||||
|
||||
|
||||
python module _flapack
|
||||
usercode '''
|
||||
#define F_INT int
|
||||
'''
|
||||
|
||||
interface
|
||||
|
||||
|
||||
subroutine <prefix>axpy(n,a,x,offx,incx,y,offy,incy)
|
||||
! Calculate z = a*x+y, where a is scalar.
|
||||
|
||||
callstatement (*f2py_func)(&n,&a,x+offx,&incx,y+offy,&incy)
|
||||
callprotoargument F_INT*,<ctype>*,<ctype>*,F_INT*,<ctype>*,F_INT*
|
||||
|
||||
<ftype> dimension(*), intent(in) :: x
|
||||
<ftype> dimension(*), intent(in,out,out=z) :: y
|
||||
<ftype> optional, intent(in):: a=<1.0,\0,(1.0\,0.0),\2>
|
||||
integer optional, intent(in),check(incx>0||incx<0) :: incx = 1
|
||||
integer optional, intent(in),check(incy>0||incy<0) :: incy = 1
|
||||
integer optional, intent(in),depend(x) :: offx=0
|
||||
integer optional, intent(in),depend(y) :: offy=0
|
||||
check(offx>=0 && offx<len(x)) :: offx
|
||||
check(offy>=0 && offy<len(y)) :: offy
|
||||
integer optional, intent(in),depend(x,incx,offx,y,incy,offy) :: &
|
||||
n = (len(x)-offx)/abs(incx)
|
||||
check(len(x)-offx>(n-1)*abs(incx)) :: n
|
||||
check(len(y)-offy>(n-1)*abs(incy)) :: n
|
||||
|
||||
end subroutine <prefix>axpy
|
||||
|
||||
function ddot(n,x,offx,incx,y,offy,incy) result (xy)
|
||||
! Computes a vector-vector dot product.
|
||||
|
||||
callstatement ddot_return_value = (*f2py_func)(&n,x+offx,&incx,y+offy,&incy)
|
||||
callprotoargument F_INT*,double*,F_INT*,double*,F_INT*
|
||||
intent(c) ddot
|
||||
fortranname F_FUNC(ddot,DDOT)
|
||||
|
||||
double precision dimension(*), intent(in) :: x
|
||||
double precision dimension(*), intent(in) :: y
|
||||
double precision ddot,xy
|
||||
integer optional, intent(in),check(incx>0||incx<0) :: incx = 1
|
||||
integer optional, intent(in),check(incy>0||incy<0) :: incy = 1
|
||||
integer optional, intent(in),depend(x) :: offx=0
|
||||
integer optional, intent(in),depend(y) :: offy=0
|
||||
check(offx>=0 && offx<len(x)) :: offx
|
||||
check(offy>=0 && offy<len(y)) :: offy
|
||||
integer optional, intent(in),depend(x,incx,offx,y,incy,offy) :: &
|
||||
n = (len(x)-offx)/abs(incx)
|
||||
check(len(x)-offx>(n-1)*abs(incx)) :: n
|
||||
check(len(y)-offy>(n-1)*abs(incy)) :: n
|
||||
|
||||
end function ddot
|
||||
|
||||
|
||||
function <prefix4>nrm2(n,x,offx,incx) result(n2)
|
||||
|
||||
<ftypereal4> <prefix4>nrm2, n2
|
||||
|
||||
callstatement <prefix4>nrm2_return_value = (*f2py_func)(&n,x+offx,&incx)
|
||||
callprotoargument F_INT*,<ctype4>*,F_INT*
|
||||
intent(c) <prefix4>nrm2
|
||||
fortranname F_FUNC(<prefix4>nrm2,<D,DZ>NRM2)
|
||||
|
||||
<ftype4> dimension(*),intent(in) :: x
|
||||
|
||||
integer optional, intent(in),check(incx>0) :: incx = 1
|
||||
|
||||
integer optional,intent(in),depend(x) :: offx=0
|
||||
check(offx>=0 && offx<len(x)) :: offx
|
||||
|
||||
integer optional,intent(in),depend(x,incx,offx) :: n = (len(x)-offx)/abs(incx)
|
||||
check(len(x)-offx>(n-1)*abs(incx)) :: n
|
||||
|
||||
end function <prefix4>nrm2
|
||||
|
||||
|
||||
!
|
||||
! Level 2 BLAS
|
||||
!
|
||||
|
||||
|
||||
subroutine <prefix>gemv(m,n,alpha,a,x,beta,y,offx,incx,offy,incy,trans,rows,cols,ly)
|
||||
! Computes a matrix-vector product using a general matrix
|
||||
!
|
||||
! y = gemv(alpha,a,x,beta=0,y=0,offx=0,incx=1,offy=0,incy=0,trans=0)
|
||||
! Calculate y <- alpha * op(A) * x + beta * y
|
||||
|
||||
callstatement (*f2py_func)((trans?(trans==2?"C":"T"):"N"),&m,&n,&alpha,a,&m, &
|
||||
x+offx,&incx,&beta,y+offy,&incy)
|
||||
callprotoargument char*,F_INT*,F_INT*,<ctype>*,<ctype>*,F_INT*,<ctype>*,F_INT*,<ctype>*, &
|
||||
<ctype>*,F_INT*
|
||||
|
||||
integer optional, intent(in), check(trans>=0 && trans <=2) :: trans = 0
|
||||
integer optional, intent(in), check(incx>0||incx<0) :: incx = 1
|
||||
integer optional, intent(in), check(incy>0||incy<0) :: incy = 1
|
||||
<ftype> intent(in) :: alpha
|
||||
<ftype> intent(in), optional :: beta = <0.0,\0,(0.0\,0.0),\2>
|
||||
|
||||
<ftype> dimension(*), intent(in) :: x
|
||||
<ftype> dimension(ly), intent(in,copy,out), depend(ly),optional :: y
|
||||
integer intent(hide), depend(incy,rows,offy) :: ly = &
|
||||
(y_capi==Py_None?1+offy+(rows-1)*abs(incy):-1)
|
||||
<ftype> dimension(m,n), intent(in) :: a
|
||||
integer depend(a), intent(hide):: m = shape(a,0)
|
||||
integer depend(a), intent(hide):: n = shape(a,1)
|
||||
|
||||
integer optional, intent(in) :: offx=0
|
||||
integer optional, intent(in) :: offy=0
|
||||
check(offx>=0 && offx<len(x)) :: x
|
||||
check(len(x)>offx+(cols-1)*abs(incx)) :: x
|
||||
depend(offx,cols,incx) :: x
|
||||
|
||||
check(offy>=0 && offy<len(y)) :: y
|
||||
check(len(y)>offy+(rows-1)*abs(incy)) :: y
|
||||
depend(offy,rows,incy) :: y
|
||||
|
||||
integer depend(m,n,trans), intent(hide) :: rows = (trans?n:m)
|
||||
integer depend(m,n,trans), intent(hide) :: cols = (trans?m:n)
|
||||
|
||||
end subroutine <prefix>gemv
|
||||
|
||||
|
||||
subroutine <prefix>gbmv(m,n,kl,ku,alpha,a,lda,x,incx,offx,beta,y,incy,offy,trans,ly)
|
||||
! Performs one of the matrix-vector operations
|
||||
!
|
||||
! y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y,
|
||||
! or y := alpha*A**H*x + beta*y,
|
||||
!
|
||||
! where alpha and beta are scalars, x and y are vectors and A is an
|
||||
! m by n band matrix, with kl sub-diagonals and ku super-diagonals.
|
||||
|
||||
callstatement (*f2py_func)((trans?(trans==2?"C":"T"):"N"),&m,&n,&kl,&ku,&alpha,a,&lda,x+offx,&incx,&beta,y+offy,&incy)
|
||||
callprotoargument char*,F_INT*,F_INT*,F_INT*,F_INT*,<ctype>*,<ctype>*,F_INT*,<ctype>*,F_INT*,<ctype>*,<ctype>*,F_INT*
|
||||
|
||||
integer optional,intent(in),check(trans>=0 && trans <=2) :: trans = 0
|
||||
integer intent(in), depend(ku,kl),check(m>=ku+kl+1) :: m
|
||||
integer intent(in),check(n>=0&&n==shape(a,1)),depend(a) :: n
|
||||
integer intent(in),check(kl>=0) :: kl
|
||||
integer intent(in),check(ku>=0) :: ku
|
||||
integer intent(hide),depend(a) :: lda = MAX(shape(a,0),1)
|
||||
integer optional, intent(in),check(incx>0||incx<0) :: incx = 1
|
||||
integer optional, intent(in),check(incy>0||incy<0) :: incy = 1
|
||||
integer intent(hide),depend(m,n,incy,offy,trans) :: ly = &
|
||||
(y_capi==Py_None?1+offy+(trans==0?m-1:n-1)*abs(incy):-1)
|
||||
integer optional, intent(in) :: offx=0
|
||||
integer optional, intent(in) :: offy=0
|
||||
|
||||
<ftype> intent(in) :: alpha
|
||||
<ftype> intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2>
|
||||
|
||||
<ftype> dimension(lda,n),intent(in) :: a
|
||||
|
||||
<ftype> dimension(ly), intent(in,out,copy,out=yout),depend(ly),optional :: y
|
||||
check(offy>=0 && offy<len(y)) :: y
|
||||
check(len(y)>offy+(trans==0?m-1:n-1)*abs(incy)) :: y
|
||||
depend(offy,n,incy) :: y
|
||||
|
||||
<ftype> dimension(*), intent(in) :: x
|
||||
check(offx>=0 && offx<len(x)) :: x
|
||||
check(len(x)>offx+(trans==0?n-1:m-1)*abs(incx)) :: x
|
||||
depend(offx,n,incx) :: x
|
||||
|
||||
end subroutine <prefix>gbmv
|
||||
|
||||
|
||||
|
||||
!
|
||||
! Level 3 BLAS
|
||||
!
|
||||
|
||||
|
||||
subroutine <prefix>gemm(m,n,k,alpha,a,b,beta,c,trans_a,trans_b,lda,ka,ldb,kb)
|
||||
! Computes a scalar-matrix-matrix product and adds the result to a
|
||||
! scalar-matrix product.
|
||||
!
|
||||
! c = gemm(alpha,a,b,beta=0,c=0,trans_a=0,trans_b=0,overwrite_c=0)
|
||||
! Calculate C <- alpha * op(A) * op(B) + beta * C
|
||||
|
||||
callstatement (*f2py_func)((trans_a?(trans_a==2?"C":"T"):"N"), &
|
||||
(trans_b?(trans_b==2?"C":"T"):"N"),&m,&n,&k,&alpha,a,&lda,b,&ldb,&beta,c,&m)
|
||||
callprotoargument char*,char*,F_INT*,F_INT*,F_INT*,<ctype>*,<ctype>*,F_INT*,<ctype>*, &
|
||||
F_INT*,<ctype>*,<ctype>*,F_INT*
|
||||
|
||||
integer optional,intent(in),check(trans_a>=0 && trans_a <=2) :: trans_a = 0
|
||||
integer optional,intent(in),check(trans_b>=0 && trans_b <=2) :: trans_b = 0
|
||||
<ftype> intent(in) :: alpha
|
||||
<ftype> intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2>
|
||||
|
||||
<ftype> dimension(lda,ka),intent(in) :: a
|
||||
<ftype> dimension(ldb,kb),intent(in) :: b
|
||||
<ftype> dimension(m,n),intent(in,out,copy),depend(m,n),optional :: c
|
||||
check(shape(c,0)==m && shape(c,1)==n) :: c
|
||||
|
||||
integer depend(a),intent(hide) :: lda = shape(a,0)
|
||||
integer depend(a),intent(hide) :: ka = shape(a,1)
|
||||
integer depend(b),intent(hide) :: ldb = shape(b,0)
|
||||
integer depend(b),intent(hide) :: kb = shape(b,1)
|
||||
|
||||
integer depend(a,trans_a,ka,lda),intent(hide):: m = (trans_a?ka:lda)
|
||||
integer depend(a,trans_a,ka,lda),intent(hide):: k = (trans_a?lda:ka)
|
||||
integer depend(b,trans_b,kb,ldb,k),intent(hide),check(trans_b?kb==k:ldb==k) :: &
|
||||
n = (trans_b?ldb:kb)
|
||||
|
||||
end subroutine <prefix>gemm
|
||||
|
||||
|
||||
subroutine <prefix6><sy,\0,\0,\0,he,he>rk(n,k,alpha,a,beta,c,trans,lower,lda,ka)
|
||||
! performs one of the symmetric rank k operations
|
||||
! C := alpha*A*A**T + beta*C, or C := alpha*A**T*A + beta*C,
|
||||
!
|
||||
! c = syrk(alpha,a,beta=0,c=0,trans=0,lower=0,overwrite_c=0)
|
||||
!
|
||||
callstatement (*f2py_func)((lower?"L":"U"), &
|
||||
(trans?(trans==2?"C":"T"):"N"), &n,&k,&alpha,a,&lda,&beta,c,&n)
|
||||
callprotoargument char*,char*,F_INT*,F_INT*,<ctype6>*,<ctype6>*,F_INT*,<ctype6>*, &
|
||||
<ctype6>*,F_INT*
|
||||
|
||||
integer optional, intent(in),check(lower==0||lower==1) :: lower = 0
|
||||
integer optional,intent(in),check(trans>=0 && trans <=2) :: trans = 0
|
||||
|
||||
<ftype6> intent(in) :: alpha
|
||||
<ftype6> intent(in),optional :: beta = <0.0,\0,(0.0\,0.0),\2,\2,\2>
|
||||
|
||||
<ftype6> dimension(lda,ka),intent(in) :: a
|
||||
<ftype6> dimension(n,n),intent(in,out,copy),depend(n),optional :: c
|
||||
check(shape(c,0)==n && shape(c,1)==n) :: c
|
||||
|
||||
integer depend(a),intent(hide) :: lda = shape(a,0)
|
||||
integer depend(a),intent(hide) :: ka = shape(a,1)
|
||||
|
||||
integer depend(a, trans, ka, lda), intent(hide) :: n = (trans ? ka : lda)
|
||||
integer depend(a, trans, ka, lda), intent(hide) :: k = (trans ? lda : ka)
|
||||
|
||||
end subroutine <prefix6><sy,\0,\0,\0,he,he>rk
|
||||
|
||||
|
||||
!
|
||||
! LAPACK
|
||||
!
|
||||
|
||||
subroutine <prefix>gesv(n,nrhs,a,piv,b,info)
|
||||
! lu,piv,x,info = gesv(a,b,overwrite_a=0,overwrite_b=0)
|
||||
! Solve A * X = B.
|
||||
! A = P * L * U
|
||||
! U is upper diagonal triangular, L is unit lower triangular,
|
||||
! piv pivots columns.
|
||||
|
||||
callstatement {F_INT i;(*f2py_func)(&n,&nrhs,a,&n,piv,b,&n,&info);for(i=0;i\<n;--piv[i++]);}
|
||||
callprotoargument F_INT*,F_INT*,<ctype>*,F_INT*,F_INT*,<ctype>*,F_INT*,F_INT*
|
||||
|
||||
integer depend(a),intent(hide):: n = shape(a,0)
|
||||
integer depend(b),intent(hide):: nrhs = shape(b,1)
|
||||
<ftype> dimension(n,n),check(shape(a,0)==shape(a,1)) :: a
|
||||
integer dimension(n),depend(n),intent(out) :: piv
|
||||
<ftype> dimension(n,nrhs),check(shape(a,0)==shape(b,0)),depend(n) :: b
|
||||
integer intent(out)::info
|
||||
intent(in,out,copy,out=x) b
|
||||
intent(in,out,copy,out=lu) a
|
||||
end subroutine <prefix>gesv
|
||||
|
||||
|
||||
subroutine <prefix2>gesdd(m,n,minmn,u0,u1,vt0,vt1,a,compute_uv,full_matrices,u,s,vt,work,lwork,iwork,info)
|
||||
! u,s,vt,info = gesdd(a,compute_uv=1,lwork=..,overwrite_a=0)
|
||||
! Compute the singular value decomposition (SVD) using divide and conquer:
|
||||
! A = U * SIGMA * transpose(V)
|
||||
! A - M x N matrix
|
||||
! U - M x M matrix or min(M,N) x N if full_matrices=False
|
||||
! SIGMA - M x N zero matrix with a main diagonal filled with min(M,N)
|
||||
! singular values
|
||||
! transpose(V) - N x N matrix or N x min(M,N) if full_matrices=False
|
||||
|
||||
callstatement (*f2py_func)((compute_uv?(full_matrices?"A":"S"):"N"),&m,&n,a,&m,s,u,&u0,vt,&vt0,work,&lwork,iwork,&info)
|
||||
callprotoargument char*,F_INT*,F_INT*,<ctype2>*,F_INT*,<ctype2>*,<ctype2>*,F_INT*,<ctype2>*,F_INT*,<ctype2>*,F_INT*,F_INT*,F_INT*
|
||||
|
||||
integer intent(in),optional,check(compute_uv==0||compute_uv==1):: compute_uv = 1
|
||||
integer intent(in),optional,check(full_matrices==0||full_matrices==1):: full_matrices = 1
|
||||
integer intent(hide),depend(a):: m = shape(a,0)
|
||||
integer intent(hide),depend(a):: n = shape(a,1)
|
||||
integer intent(hide),depend(m,n):: minmn = MIN(m,n)
|
||||
integer intent(hide),depend(compute_uv,minmn) :: u0 = (compute_uv?m:1)
|
||||
integer intent(hide),depend(compute_uv,minmn, full_matrices) :: u1 = (compute_uv?(full_matrices?m:minmn):1)
|
||||
integer intent(hide),depend(compute_uv,minmn, full_matrices) :: vt0 = (compute_uv?(full_matrices?n:minmn):1)
|
||||
integer intent(hide),depend(compute_uv,minmn) :: vt1 = (compute_uv?n:1)
|
||||
<ftype2> dimension(m,n),intent(in,copy,aligned8) :: a
|
||||
<ftype2> dimension(minmn),intent(out),depend(minmn) :: s
|
||||
<ftype2> dimension(u0,u1),intent(out),depend(u0, u1) :: u
|
||||
<ftype2> dimension(vt0,vt1),intent(out),depend(vt0, vt1) :: vt
|
||||
<ftype2> dimension(lwork),intent(hide,cache),depend(lwork) :: work
|
||||
integer optional,intent(in),depend(minmn,compute_uv) &
|
||||
:: lwork = max((compute_uv?4*minmn*minmn+MAX(m,n)+9*minmn:MAX(14*minmn+4,10*minmn+2+25*(25+8))+MAX(m,n)),1)
|
||||
integer intent(hide,cache),dimension(8*minmn),depend(minmn) :: iwork
|
||||
integer intent(out)::info
|
||||
|
||||
end subroutine <prefix2>gesdd
|
||||
|
||||
subroutine <prefix2>gesdd_lwork(m,n,minmn,u0,vt0,a,compute_uv,full_matrices,u,s,vt,work,lwork,iwork,info)
|
||||
! LWORK computation for (S/D)GESDD
|
||||
|
||||
fortranname <prefix2>gesdd
|
||||
callstatement (*f2py_func)((compute_uv?(full_matrices?"A":"S"):"N"),&m,&n,&a,&m,&s,&u,&u0,&vt,&vt0,&work,&lwork,&iwork,&info)
|
||||
callprotoargument char*,F_INT*,F_INT*,<ctype2>*,F_INT*,<ctype2>*,<ctype2>*,F_INT*,<ctype2>*,F_INT*,<ctype2>*,F_INT*,F_INT*,F_INT*
|
||||
|
||||
integer intent(in),optional,check(compute_uv==0||compute_uv==1):: compute_uv = 1
|
||||
integer intent(in),optional,check(full_matrices==0||full_matrices==1):: full_matrices = 1
|
||||
integer intent(in) :: m
|
||||
integer intent(in) :: n
|
||||
integer intent(hide),depend(m,n):: minmn = MIN(m,n)
|
||||
integer intent(hide),depend(compute_uv,minmn) :: u0 = (compute_uv?m:1)
|
||||
integer intent(hide),depend(compute_uv,minmn, full_matrices) :: vt0 = (compute_uv?(full_matrices?n:minmn):1)
|
||||
<ftype2> intent(hide) :: a
|
||||
<ftype2> intent(hide) :: s
|
||||
<ftype2> intent(hide) :: u
|
||||
<ftype2> intent(hide) :: vt
|
||||
<ftype2> intent(out) :: work
|
||||
integer intent(hide) :: lwork = -1
|
||||
integer intent(hide) :: iwork
|
||||
integer intent(out) :: info
|
||||
|
||||
end subroutine <prefix2>gesdd_lwork
|
||||
|
||||
|
||||
subroutine <prefix2>syev(compute_v,lower,n,w,a,lda,work,lwork,info)
|
||||
! w,v,info = syev(a,compute_v=1,lower=0,lwork=3*n-1,overwrite_a=0)
|
||||
! Compute all eigenvalues and, optionally, eigenvectors of a
|
||||
! real symmetric matrix A.
|
||||
!
|
||||
! Performance tip:
|
||||
! If compute_v=0 then set also overwrite_a=1.
|
||||
|
||||
callstatement (*f2py_func)((compute_v?"V":"N"),(lower?"L":"U"),&n,a,&lda,w,work,&lwork,&info)
|
||||
callprotoargument char*,char*,F_INT*,<ctype2>*,F_INT*,<ctype2>*,<ctype2>*,F_INT*,F_INT*
|
||||
|
||||
integer optional,intent(in):: compute_v = 1
|
||||
check(compute_v==1||compute_v==0) compute_v
|
||||
integer optional,intent(in),check(lower==0||lower==1) :: lower = 0
|
||||
|
||||
integer intent(hide),depend(a):: n = shape(a,0)
|
||||
integer intent(hide),depend(a):: lda = MAX(1,shape(a,0))
|
||||
<ftype2> dimension(n,n),check(shape(a,0)==shape(a,1)) :: a
|
||||
intent(in,copy,out,out=v) :: a
|
||||
|
||||
<ftype2> dimension(n),intent(out),depend(n) :: w
|
||||
|
||||
integer optional,intent(in),depend(n) :: lwork=max(3*n-1,1)
|
||||
check(lwork>=3*n-1) :: lwork
|
||||
<ftype2> dimension(lwork),intent(hide),depend(lwork) :: work
|
||||
|
||||
integer intent(out) :: info
|
||||
|
||||
end subroutine <prefix2>syev
|
||||
|
||||
|
||||
subroutine <prefix2>syev_lwork(lower,n,w,a,lda,work,lwork,info)
|
||||
! LWORK routines for syev
|
||||
|
||||
fortranname <prefix2>syev
|
||||
|
||||
callstatement (*f2py_func)("N",(lower?"L":"U"),&n,&a,&lda,&w,&work,&lwork,&info)
|
||||
callprotoargument char*,char*,F_INT*,<ctype2>*,F_INT*,<ctype2>*,<ctype2>*,F_INT*,F_INT*
|
||||
|
||||
integer intent(in):: n
|
||||
integer optional,intent(in),check(lower==0||lower==1) :: lower = 0
|
||||
|
||||
integer intent(hide),depend(n):: lda = MAX(1, n)
|
||||
<ftype2> intent(hide):: a
|
||||
<ftype2> intent(hide):: w
|
||||
integer intent(hide):: lwork = -1
|
||||
|
||||
<ftype2> intent(out):: work
|
||||
integer intent(out):: info
|
||||
|
||||
end subroutine <prefix2>syev_lwork
|
||||
|
||||
end interface
|
||||
|
||||
end python module _flapack
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,299 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Process f2py template files (`filename.pyf.src` -> `filename.pyf`)
|
||||
|
||||
Usage: python generate_pyf.py filename.pyf.src -o filename.pyf
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import subprocess
|
||||
import argparse
|
||||
|
||||
|
||||
# START OF CODE VENDORED FROM `numpy.distutils.from_template`
|
||||
#############################################################
|
||||
"""
|
||||
process_file(filename)
|
||||
|
||||
takes templated file .xxx.src and produces .xxx file where .xxx
|
||||
is .pyf .f90 or .f using the following template rules:
|
||||
|
||||
'<..>' denotes a template.
|
||||
|
||||
All function and subroutine blocks in a source file with names that
|
||||
contain '<..>' will be replicated according to the rules in '<..>'.
|
||||
|
||||
The number of comma-separated words in '<..>' will determine the number of
|
||||
replicates.
|
||||
|
||||
'<..>' may have two different forms, named and short. For example,
|
||||
|
||||
named:
|
||||
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
|
||||
'd', 's', 'z', and 'c' for each replicate of the block.
|
||||
|
||||
<_c> is already defined: <_c=s,d,c,z>
|
||||
<_t> is already defined: <_t=real,double precision,complex,double complex>
|
||||
|
||||
short:
|
||||
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
|
||||
a block.
|
||||
|
||||
In general, '<..>' contains a comma separated list of arbitrary
|
||||
expressions. If these expression must contain a comma|leftarrow|rightarrow,
|
||||
then prepend the comma|leftarrow|rightarrow with a backslash.
|
||||
|
||||
If an expression matches '\\<index>' then it will be replaced
|
||||
by <index>-th expression.
|
||||
|
||||
Note that all '<..>' forms in a block must have the same number of
|
||||
comma-separated entries.
|
||||
|
||||
Predefined named template rules:
|
||||
<prefix=s,d,c,z>
|
||||
<ftype=real,double precision,complex,double complex>
|
||||
<ftypereal=real,double precision,\\0,\\1>
|
||||
<ctype=float,double,complex_float,complex_double>
|
||||
<ctypereal=float,double,\\0,\\1>
|
||||
"""
|
||||
|
||||
routine_start_re = re.compile(
|
||||
r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b',
|
||||
re.I
|
||||
)
|
||||
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
|
||||
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
|
||||
|
||||
def parse_structure(astr):
|
||||
""" Return a list of tuples for each function or subroutine each
|
||||
tuple is the start and end of a subroutine or function to be
|
||||
expanded.
|
||||
"""
|
||||
|
||||
spanlist = []
|
||||
ind = 0
|
||||
while True:
|
||||
m = routine_start_re.search(astr, ind)
|
||||
if m is None:
|
||||
break
|
||||
start = m.start()
|
||||
if function_start_re.match(astr, start, m.end()):
|
||||
while True:
|
||||
i = astr.rfind('\n', ind, start)
|
||||
if i==-1:
|
||||
break
|
||||
start = i
|
||||
if astr[i:i+7]!='\n $':
|
||||
break
|
||||
start += 1
|
||||
m = routine_end_re.search(astr, m.end())
|
||||
ind = end = m and m.end()-1 or len(astr)
|
||||
spanlist.append((start, end))
|
||||
return spanlist
|
||||
|
||||
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
|
||||
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
|
||||
list_re = re.compile(r"<\s*((.*?))\s*>")
|
||||
|
||||
def find_repl_patterns(astr):
|
||||
reps = named_re.findall(astr)
|
||||
names = {}
|
||||
for rep in reps:
|
||||
name = rep[0].strip() or unique_key(names)
|
||||
repl = rep[1].replace(r'\,', '@comma@')
|
||||
thelist = conv(repl)
|
||||
names[name] = thelist
|
||||
return names
|
||||
|
||||
def find_and_remove_repl_patterns(astr):
|
||||
names = find_repl_patterns(astr)
|
||||
astr = re.subn(named_re, '', astr)[0]
|
||||
return astr, names
|
||||
|
||||
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
|
||||
def conv(astr):
|
||||
b = astr.split(',')
|
||||
l = [x.strip() for x in b]
|
||||
for i in range(len(l)):
|
||||
m = item_re.match(l[i])
|
||||
if m:
|
||||
j = int(m.group('index'))
|
||||
l[i] = l[j]
|
||||
return ','.join(l)
|
||||
|
||||
def unique_key(adict):
|
||||
""" Obtain a unique key given a dictionary."""
|
||||
allkeys = list(adict.keys())
|
||||
done = False
|
||||
n = 1
|
||||
while not done:
|
||||
newkey = '__l%s' % (n)
|
||||
if newkey in allkeys:
|
||||
n += 1
|
||||
else:
|
||||
done = True
|
||||
return newkey
|
||||
|
||||
|
||||
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
|
||||
def expand_sub(substr, names):
|
||||
substr = substr.replace(r'\>', '@rightarrow@')
|
||||
substr = substr.replace(r'\<', '@leftarrow@')
|
||||
lnames = find_repl_patterns(substr)
|
||||
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
|
||||
|
||||
def listrepl(mobj):
|
||||
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
|
||||
if template_name_re.match(thelist):
|
||||
return "<%s>" % (thelist)
|
||||
name = None
|
||||
for key in lnames.keys(): # see if list is already in dictionary
|
||||
if lnames[key] == thelist:
|
||||
name = key
|
||||
if name is None: # this list is not in the dictionary yet
|
||||
name = unique_key(lnames)
|
||||
lnames[name] = thelist
|
||||
return "<%s>" % name
|
||||
|
||||
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
|
||||
# newnames are constructed as needed
|
||||
|
||||
numsubs = None
|
||||
base_rule = None
|
||||
rules = {}
|
||||
for r in template_re.findall(substr):
|
||||
if r not in rules:
|
||||
thelist = lnames.get(r, names.get(r, None))
|
||||
if thelist is None:
|
||||
raise ValueError('No replicates found for <%s>' % (r))
|
||||
if r not in names and not thelist.startswith('_'):
|
||||
names[r] = thelist
|
||||
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
|
||||
num = len(rule)
|
||||
|
||||
if numsubs is None:
|
||||
numsubs = num
|
||||
rules[r] = rule
|
||||
base_rule = r
|
||||
elif num == numsubs:
|
||||
rules[r] = rule
|
||||
else:
|
||||
print("Mismatch in number of replacements (base <{}={}>) "
|
||||
"for <{}={}>. Ignoring."
|
||||
.format(base_rule, ','.join(rules[base_rule]), r, thelist))
|
||||
if not rules:
|
||||
return substr
|
||||
|
||||
def namerepl(mobj):
|
||||
name = mobj.group(1)
|
||||
return rules.get(name, (k+1)*[name])[k]
|
||||
|
||||
newstr = ''
|
||||
for k in range(numsubs):
|
||||
newstr += template_re.sub(namerepl, substr) + '\n\n'
|
||||
|
||||
newstr = newstr.replace('@rightarrow@', '>')
|
||||
newstr = newstr.replace('@leftarrow@', '<')
|
||||
return newstr
|
||||
|
||||
def process_str(allstr):
|
||||
newstr = allstr
|
||||
writestr = ''
|
||||
|
||||
struct = parse_structure(newstr)
|
||||
|
||||
oldend = 0
|
||||
names = {}
|
||||
names.update(_special_names)
|
||||
for sub in struct:
|
||||
cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
|
||||
writestr += cleanedstr
|
||||
names.update(defs)
|
||||
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
|
||||
oldend = sub[1]
|
||||
writestr += newstr[oldend:]
|
||||
|
||||
return writestr
|
||||
|
||||
include_src_re = re.compile(
|
||||
r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+\.src)['\"]",
|
||||
re.I
|
||||
)
|
||||
|
||||
def resolve_includes(source):
|
||||
d = os.path.dirname(source)
|
||||
with open(source) as fid:
|
||||
lines = []
|
||||
for line in fid:
|
||||
m = include_src_re.match(line)
|
||||
if m:
|
||||
fn = m.group('name')
|
||||
if not os.path.isabs(fn):
|
||||
fn = os.path.join(d, fn)
|
||||
if os.path.isfile(fn):
|
||||
lines.extend(resolve_includes(fn))
|
||||
else:
|
||||
lines.append(line)
|
||||
else:
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
def process_file(source):
|
||||
lines = resolve_includes(source)
|
||||
return process_str(''.join(lines))
|
||||
|
||||
_special_names = find_repl_patterns('''
|
||||
<_c=s,d,c,z>
|
||||
<_t=real,double precision,complex,double complex>
|
||||
<prefix=s,d,c,z>
|
||||
<ftype=real,double precision,complex,double complex>
|
||||
<ctype=float,double,complex_float,complex_double>
|
||||
<ftypereal=real,double precision,\\0,\\1>
|
||||
<ctypereal=float,double,\\0,\\1>
|
||||
''')
|
||||
|
||||
# END OF CODE VENDORED FROM `numpy.distutils.from_template`
|
||||
###########################################################
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("infile", type=str,
|
||||
help="Path to the input file")
|
||||
parser.add_argument("-o", "--outdir", type=str,
|
||||
help="Path to the output directory")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.infile.endswith(('.pyf', '.pyf.src', '.f.src')):
|
||||
raise ValueError(f"Input file has unknown extension: {args.infile}")
|
||||
|
||||
outdir_abs = os.path.join(os.getcwd(), args.outdir)
|
||||
|
||||
# Write out the .pyf/.f file
|
||||
if args.infile.endswith(('.pyf.src', '.f.src')):
|
||||
code = process_file(args.infile)
|
||||
fname_pyf = os.path.join(args.outdir,
|
||||
os.path.splitext(os.path.split(args.infile)[1])[0])
|
||||
|
||||
with open(fname_pyf, 'w') as f:
|
||||
f.write(code)
|
||||
else:
|
||||
fname_pyf = args.infile
|
||||
|
||||
# Now invoke f2py to generate the C API module file
|
||||
if args.infile.endswith(('.pyf.src', '.pyf')):
|
||||
p = subprocess.Popen([sys.executable, '-m', 'numpy.f2py', fname_pyf,
|
||||
'--build-dir', outdir_abs], #'--quiet'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
cwd=os.getcwd())
|
||||
out, err = p.communicate()
|
||||
if not (p.returncode == 0):
|
||||
raise RuntimeError(f"Writing {args.outfile} with f2py failed!\n"
|
||||
f"{out}\n"
|
||||
r"{err}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -0,0 +1,50 @@
|
|||
# find numpy & f2py includes
|
||||
inc_numpy = run_command(py3,
|
||||
['-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'],
|
||||
check : true
|
||||
).stdout().strip()
|
||||
|
||||
inc_f2py = run_command(py3,
|
||||
['-c', 'import os; os.chdir(".."); import numpy.f2py; print(numpy.f2py.get_include())'],
|
||||
check : true
|
||||
).stdout().strip()
|
||||
|
||||
|
||||
inc_np = include_directories(inc_numpy, inc_f2py)
|
||||
fortranobject_c = inc_f2py / 'fortranobject.c'
|
||||
|
||||
|
||||
fortranobject_lib = static_library('_fortranobject',
|
||||
fortranobject_c,
|
||||
# c_args: numpy_nodepr_api,
|
||||
dependencies: py3_dep,
|
||||
include_directories: [inc_np, inc_f2py],
|
||||
gnu_symbol_visibility: 'hidden',
|
||||
)
|
||||
fortranobject_dep = declare_dependency(
|
||||
link_with: fortranobject_lib,
|
||||
include_directories: [inc_np, inc_f2py],
|
||||
)
|
||||
|
||||
|
||||
# f2py generated wrappers
|
||||
|
||||
flapack_module = custom_target('flapack_module',
|
||||
output: ['_flapackmodule.c'],
|
||||
input: 'blas_lapack.pyf.src',
|
||||
command: [generate_f2pymod, '@INPUT@', '-o', '@OUTDIR@'],
|
||||
)
|
||||
|
||||
py3.extension_module('_flapack',
|
||||
flapack_module,
|
||||
link_args: [], # version_link_args,
|
||||
dependencies: [openblas_dep, fortranobject_dep],
|
||||
install: true,
|
||||
subdir: 'openblas_wrap'
|
||||
)
|
||||
|
||||
|
||||
py3.install_sources(
|
||||
['__init__.py'],
|
||||
subdir: 'openblas_wrap'
|
||||
)
|
|
@ -0,0 +1,12 @@
|
|||
libdir=/home/br/repos/OpenBLAS/
|
||||
includedir=/home/br/repos/OpenBLAS/
|
||||
openblas_config= OpenBLAS 0.3.27 DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64
|
||||
version=0.3.27
|
||||
extralib=-lm -lpthread -lgfortran -lquadmath -L${libdir} -lopenblas
|
||||
Name: openblas
|
||||
Description: OpenBLAS is an optimized BLAS library based on GotoBLAS2 1.13 BSD version
|
||||
Version: ${version}
|
||||
URL: https://github.com/xianyi/OpenBLAS
|
||||
Libs: -L${libdir} -lopenblas
|
||||
Libs.private: ${extralib}
|
||||
Cflags: -I${includedir}
|
18
c_check
18
c_check
|
@ -197,10 +197,22 @@ fi
|
|||
no_lsx=0
|
||||
no_lasx=0
|
||||
if [ "$architecture" = "loongarch64" ]; then
|
||||
lasx_flags='-march=loongarch64'
|
||||
lsx_flags='-march=loongarch64'
|
||||
|
||||
tmpd="$(mktemp -d)"
|
||||
tmparch="$tmpd/arch.c"
|
||||
printf "void main(void){ }\n" >> "$tmparch"
|
||||
args="-march=loongarch64 -o $tmparch.o $tmparch"
|
||||
{
|
||||
$compiler_name $flags $args >/dev/null 2>&1
|
||||
} || {
|
||||
lasx_flags=''
|
||||
lsx_flags=''
|
||||
}
|
||||
|
||||
tmplsx="$tmpd/lsx.c"
|
||||
codelsx='"vadd.b $vr0, $vr0, $vr0"'
|
||||
lsx_flags='-march=loongarch64'
|
||||
printf "void main(void){ __asm__ volatile(%s);}\n" "$codelsx" >> "$tmplsx"
|
||||
args="$lsx_flags -o $tmplsx.o $tmplsx"
|
||||
{
|
||||
|
@ -211,7 +223,6 @@ if [ "$architecture" = "loongarch64" ]; then
|
|||
|
||||
tmplasx="$tmpd/lasx.c"
|
||||
codelasx='"xvadd.b $xr0, $xr0, $xr0"'
|
||||
lasx_flags='-march=loongarch64'
|
||||
printf "void main(void){ __asm__ volatile(%s);}\n" "$codelasx" >> "$tmplasx"
|
||||
args="$lasx_flags -o $tmplasx.o $tmplasx"
|
||||
{
|
||||
|
@ -345,6 +356,9 @@ if [ "$compiler" = "GCC" ]; then
|
|||
no_avx2=0
|
||||
oldgcc=0
|
||||
data=`$compiler_name -dumpversion`
|
||||
case "$data" in *-*)
|
||||
data="${data%-*}"
|
||||
esac
|
||||
case "$data" in *.*.*)
|
||||
data="${data%.*}"
|
||||
esac
|
||||
|
|
20
cblas.h
20
cblas.h
|
@ -26,6 +26,11 @@ char* openblas_get_config(void);
|
|||
/*Get the CPU corename on runtime.*/
|
||||
char* openblas_get_corename(void);
|
||||
|
||||
/*Set the threading backend to a custom callback.*/
|
||||
typedef void (*openblas_dojob_callback)(int thread_num, void *jobdata, int dojob_data);
|
||||
typedef void (*openblas_threads_callback)(int sync, openblas_dojob_callback dojob, int numjobs, size_t jobdata_elsize, void *jobdata, int dojob_data);
|
||||
void openblas_set_threads_callback_function(openblas_threads_callback callback);
|
||||
|
||||
#ifdef OPENBLAS_OS_LINUX
|
||||
/* Sets thread affinity for OpenBLAS threads. `thread_idx` is in [0, openblas_get_num_threads()-1]. */
|
||||
int openblas_setaffinity(int thread_idx, size_t cpusetsize, cpu_set_t* cpu_set);
|
||||
|
@ -411,6 +416,18 @@ void cblas_cgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint
|
|||
void cblas_zgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double *calpha, double *a, OPENBLAS_CONST blasint clda, OPENBLAS_CONST double *cbeta,
|
||||
double *c, OPENBLAS_CONST blasint cldc);
|
||||
|
||||
void cblas_sgemm_batch(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransA_array, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransB_array, OPENBLAS_CONST blasint * M_array, OPENBLAS_CONST blasint * N_array, OPENBLAS_CONST blasint * K_array,
|
||||
OPENBLAS_CONST float * alpha_array, OPENBLAS_CONST float ** A_array, OPENBLAS_CONST blasint * lda_array, OPENBLAS_CONST float ** B_array, OPENBLAS_CONST blasint * ldb_array, OPENBLAS_CONST float * beta_array, float ** C_array, OPENBLAS_CONST blasint * ldc_array, OPENBLAS_CONST blasint group_count, OPENBLAS_CONST blasint * group_size);
|
||||
|
||||
void cblas_dgemm_batch(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransA_array, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransB_array, OPENBLAS_CONST blasint * M_array, OPENBLAS_CONST blasint * N_array, OPENBLAS_CONST blasint * K_array,
|
||||
OPENBLAS_CONST double * alpha_array, OPENBLAS_CONST double ** A_array, OPENBLAS_CONST blasint * lda_array, OPENBLAS_CONST double ** B_array, OPENBLAS_CONST blasint * ldb_array, OPENBLAS_CONST double * beta_array, double ** C_array, OPENBLAS_CONST blasint * ldc_array, OPENBLAS_CONST blasint group_count, OPENBLAS_CONST blasint * group_size);
|
||||
|
||||
void cblas_cgemm_batch(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransA_array, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransB_array, OPENBLAS_CONST blasint * M_array, OPENBLAS_CONST blasint * N_array, OPENBLAS_CONST blasint * K_array,
|
||||
OPENBLAS_CONST void * alpha_array, OPENBLAS_CONST void ** A_array, OPENBLAS_CONST blasint * lda_array, OPENBLAS_CONST void ** B_array, OPENBLAS_CONST blasint * ldb_array, OPENBLAS_CONST void * beta_array, void ** C_array, OPENBLAS_CONST blasint * ldc_array, OPENBLAS_CONST blasint group_count, OPENBLAS_CONST blasint * group_size);
|
||||
|
||||
void cblas_zgemm_batch(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransA_array, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransB_array, OPENBLAS_CONST blasint * M_array, OPENBLAS_CONST blasint * N_array, OPENBLAS_CONST blasint * K_array,
|
||||
OPENBLAS_CONST void * alpha_array, OPENBLAS_CONST void ** A_array, OPENBLAS_CONST blasint * lda_array, OPENBLAS_CONST void ** B_array, OPENBLAS_CONST blasint * ldb_array, OPENBLAS_CONST void * beta_array, void ** C_array, OPENBLAS_CONST blasint * ldc_array, OPENBLAS_CONST blasint group_count, OPENBLAS_CONST blasint * group_size);
|
||||
|
||||
/*** BFLOAT16 and INT8 extensions ***/
|
||||
/* convert float array to BFLOAT16 array by rounding */
|
||||
void cblas_sbstobf16(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *in, OPENBLAS_CONST blasint incin, bfloat16 *out, OPENBLAS_CONST blasint incout);
|
||||
|
@ -426,6 +443,9 @@ void cblas_sbgemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum
|
|||
|
||||
void cblas_sbgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K,
|
||||
OPENBLAS_CONST float alpha, OPENBLAS_CONST bfloat16 *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST bfloat16 *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc);
|
||||
void cblas_sbgemm_batch(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransA_array, OPENBLAS_CONST enum CBLAS_TRANSPOSE * TransB_array, OPENBLAS_CONST blasint * M_array, OPENBLAS_CONST blasint * N_array, OPENBLAS_CONST blasint * K_array,
|
||||
OPENBLAS_CONST float * alpha_array, OPENBLAS_CONST bfloat16 ** A_array, OPENBLAS_CONST blasint * lda_array, OPENBLAS_CONST bfloat16 ** B_array, OPENBLAS_CONST blasint * ldb_array, OPENBLAS_CONST float * beta_array, float ** C_array, OPENBLAS_CONST blasint * ldc_array, OPENBLAS_CONST blasint group_count, OPENBLAS_CONST blasint * group_size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif /* __cplusplus */
|
||||
|
|
|
@ -46,7 +46,7 @@ if (DYNAMIC_ARCH)
|
|||
if (ARM64)
|
||||
set(DYNAMIC_CORE ARMV8 CORTEXA53 CORTEXA57 THUNDERX THUNDERX2T99 TSV110 EMAG8180 NEOVERSEN1 THUNDERX3T110)
|
||||
if (${CMAKE_C_COMPILER_VERSION} VERSION_GREATER 9.99)
|
||||
set(DYNAMIC_CORE ${DYNAMIC_CORE} NEOVERSEV1 NEOVERSEN2 ARMV8SVE)
|
||||
set(DYNAMIC_CORE ${DYNAMIC_CORE} NEOVERSEV1 NEOVERSEN2 ARMV8SVE A64FX)
|
||||
endif ()
|
||||
if (DYNAMIC_LIST)
|
||||
set(DYNAMIC_CORE ARMV8 ${DYNAMIC_LIST})
|
||||
|
@ -58,6 +58,10 @@ if (DYNAMIC_ARCH)
|
|||
set(CCOMMON_OPT "${CCOMMON_OPT} -DHAVE_P10_SUPPORT")
|
||||
endif ()
|
||||
|
||||
if (RISCV64)
|
||||
set(DYNAMIC_CORE RISCV64_GENERIC RISCV64_ZVL128B RISCV64_ZVL256B)
|
||||
endif ()
|
||||
|
||||
if (X86)
|
||||
set(DYNAMIC_CORE KATMAI COPPERMINE NORTHWOOD PRESCOTT BANIAS CORE2 PENRYN DUNNINGTON NEHALEM ATHLON OPTERON OPTERON_SSE3 BARCELONA BOBCAT ATOM NANO)
|
||||
endif ()
|
||||
|
|
|
@ -2,12 +2,18 @@
|
|||
## Author: Hank Anderson <hank@statease.com>
|
||||
## Description: Ported from portion of OpenBLAS/Makefile.system
|
||||
## Sets C related variables.
|
||||
include(CheckCCompilerFlag)
|
||||
|
||||
if (${CMAKE_C_COMPILER_ID} MATCHES "IntelLLVM")
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -fp-model=consistent")
|
||||
set(GCC_VERSION 100)
|
||||
endif ()
|
||||
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU" OR ${CMAKE_C_COMPILER_ID} STREQUAL "LSB" OR ${CMAKE_C_COMPILER_ID} MATCHES "Clang")
|
||||
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -Wall")
|
||||
set(COMMON_PROF "${COMMON_PROF} -fno-inline")
|
||||
set(NO_UNINITIALIZED_WARN "-Wno-uninitialized")
|
||||
set(GCC_VERSION ${CMAKE_C_COMPILER_VERSION})
|
||||
|
||||
if (QUIET_MAKE)
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} ${NO_UNINITIALIZED_WARN} -Wno-unused")
|
||||
|
@ -36,14 +42,14 @@ if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU" OR ${CMAKE_C_COMPILER_ID} STREQUAL "LS
|
|||
|
||||
if (LOONGARCH64)
|
||||
if (BINARY64)
|
||||
CHECK_CXX_COMPILER_FLAG("-mabi=lp64d" COMPILER_SUPPORT_LP64D_ABI)
|
||||
CHECK_C_COMPILER_FLAG("-mabi=lp64d" COMPILER_SUPPORT_LP64D_ABI)
|
||||
if(COMPILER_SUPPORT_LP64D_ABI)
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -mabi=lp64d")
|
||||
else()
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -mabi=lp64")
|
||||
endif ()
|
||||
else ()
|
||||
CHECK_CXX_COMPILER_FLAG("-mabi=ilp32d" COMPILER_SUPPORT_ILP32D_ABI)
|
||||
CHECK_C_COMPILER_FLAG("-mabi=ilp32d" COMPILER_SUPPORT_ILP32D_ABI)
|
||||
if(COMPILER_SUPPORT_ILP32D_ABI)
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -mabi=ilp32d")
|
||||
else()
|
||||
|
@ -139,7 +145,6 @@ endif ()
|
|||
if (${CORE} STREQUAL COOPERLAKE)
|
||||
if (NOT DYNAMIC_ARCH)
|
||||
if (NOT NO_AVX512)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.1 OR ${GCC_VERSION} VERSION_EQUAL 10.1)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -march=cooperlake")
|
||||
else ()
|
||||
|
@ -152,7 +157,6 @@ endif ()
|
|||
if (${CORE} STREQUAL SAPPHIRERAPIDS)
|
||||
if (NOT DYNAMIC_ARCH)
|
||||
if (NOT NO_AVX512)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 11.0 OR ${GCC_VERSION} VERSION_EQUAL 11.0)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -march=sapphirerapids")
|
||||
else ()
|
||||
|
@ -166,7 +170,6 @@ if (${CORE} STREQUAL ZEN)
|
|||
if (HAVE_AVX512VL)
|
||||
if (NOT DYNAMIC_ARCH)
|
||||
if (NOT NO_AVX512)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 13.0 OR ${GCC_VERSION} VERSION_EQUAL 13.0)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -march=znver4")
|
||||
else ()
|
||||
|
@ -179,7 +182,6 @@ endif ()
|
|||
|
||||
if (${CORE} STREQUAL A64FX)
|
||||
if (NOT DYNAMIC_ARCH)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 11.0 OR ${GCC_VERSION} VERSION_EQUAL 11.0)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -march=armv8.2-a+sve -mtune=a64fx")
|
||||
else ()
|
||||
|
@ -193,7 +195,6 @@ if (${CORE} STREQUAL NEOVERSEN2)
|
|||
if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI" AND NOT NO_SVE)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -Msve_intrinsics -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2")
|
||||
else ()
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.4 OR ${GCC_VERSION} VERSION_EQUAL 10.4)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2")
|
||||
else ()
|
||||
|
@ -208,7 +209,6 @@ if (${CORE} STREQUAL NEOVERSEV1)
|
|||
if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI" AND NOT NO_SVE)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -Msve_intrinsics -march=armv8.4-a+sve -mtune=neoverse-v1")
|
||||
else ()
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.4 OR ${GCC_VERSION} VERSION_EQUAL 10.4)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -march=armv8.4-a+sve -mtune=neoverse-v1")
|
||||
else ()
|
||||
|
@ -220,7 +220,6 @@ endif ()
|
|||
|
||||
if (${CORE} STREQUAL NEOVERSEN1)
|
||||
if (NOT DYNAMIC_ARCH)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 9.4 OR ${GCC_VERSION} VERSION_EQUAL 9.4)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -march=armv8.2-a+sve -mtune=neoverse-n1")
|
||||
else ()
|
||||
|
@ -265,23 +264,21 @@ endif ()
|
|||
|
||||
if (${CORE} STREQUAL POWER10)
|
||||
if (NOT DYNAMIC_ARCH)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.2 OR ${GCC_VERSION} VERSION_EQUAL 10.2)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -mcpu=power10 -mtune=power10 -mvsx -fno-fast-math")
|
||||
else ()
|
||||
message(FATAL_ERROR "Compiler GCC.${GCC_VERSION} does not support Power10." )
|
||||
message(FATAL_ERROR "Compiler GCC ${GCC_VERSION} does not support Power10." )
|
||||
endif()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (${CORE} STREQUAL POWER9)
|
||||
if (NOT DYNAMIC_ARCH)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 5.0 OR ${GCC_VERSION} VERSION_EQUAL 5.0)
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -mcpu=power9 -mtune=power9 -mvsx -fno-fast-math")
|
||||
else ()
|
||||
set (CCOMMON_OPT "${CCOMMON_OPT} -mcpu=power8 -mtune=power8 -mvsx -fno-fast-math")
|
||||
message(WARNING "Compiler GCC.${GCC_VERSION} does not fully support Power9.")
|
||||
message(WARNING "Compiler GCC ${GCC_VERSION} does not fully support Power9.")
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
|
|
@ -61,14 +61,17 @@ if (${F_COMPILER} STREQUAL "GFORTRAN" OR ${F_COMPILER} STREQUAL "F95" OR CMAKE_F
|
|||
endif ()
|
||||
if (LOONGARCH64)
|
||||
if (BINARY64)
|
||||
CHECK_CXX_COMPILER_FLAG("-mabi=lp64d" COMPILER_SUPPORT_LP64D_ABI)
|
||||
CHECK_C_COMPILER_FLAG("-mabi=lp64d" COMPILER_SUPPORT_LP64D_ABI)
|
||||
if(COMPILER_SUPPORT_LP64D_ABI)
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -mabi=lp64d")
|
||||
else()
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -mabi=lp64")
|
||||
endif ()
|
||||
if (INTERFACE64)
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -fdefault-integer-8")
|
||||
endif ()
|
||||
else ()
|
||||
CHECK_CXX_COMPILER_FLAG("-mabi=ilp32d" COMPILER_SUPPORT_ILP32D_ABI)
|
||||
CHECK_C_COMPILER_FLAG("-mabi=ilp32d" COMPILER_SUPPORT_ILP32D_ABI)
|
||||
if(COMPILER_SUPPORT_ILP32D_ABI)
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -mabi=ilp32d")
|
||||
else()
|
||||
|
@ -114,12 +117,12 @@ if (${F_COMPILER} STREQUAL "GFORTRAN" OR ${F_COMPILER} STREQUAL "F95" OR CMAKE_F
|
|||
endif ()
|
||||
endif ()
|
||||
|
||||
if (${F_COMPILER} STREQUAL "INTEL")
|
||||
if (${F_COMPILER} STREQUAL "INTEL" OR CMAKE_Fortran_COMPILER_ID MATCHES "Intel")
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -DF_INTERFACE_INTEL")
|
||||
if (INTERFACE64)
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -i8")
|
||||
endif ()
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -recursive")
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -recursive -fp-model=consistent")
|
||||
if (USE_OPENMP)
|
||||
set(FCOMMON_OPT "${FCOMMON_OPT} -openmp")
|
||||
endif ()
|
||||
|
|
|
@ -38,7 +38,7 @@ if (${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
|
|||
|
||||
# Test for supporting MS_ABI
|
||||
# removed string parsing in favor of CMake's version comparison -hpa
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
set(GCC_VERSION ${CMAKE_C_COMPILER_VERSION})
|
||||
if (${GCC_VERSION} VERSION_GREATER 4.7 OR ${GCC_VERSION} VERSION_EQUAL 4.7)
|
||||
# GCC Version >=4.7
|
||||
# It is compatible with MSVC ABI.
|
||||
|
|
|
@ -1218,6 +1218,37 @@ endif ()
|
|||
set(ZGEMM_UNROLL_M 4)
|
||||
set(ZGEMM_UNROLL_N 4)
|
||||
set(SYMV_P 16)
|
||||
elseif ("${TCORE}" STREQUAL "A64FX")
|
||||
file(APPEND ${TARGET_CONF_TEMP}
|
||||
"#define L1_CODE_SIZE\t65536\n"
|
||||
"#define L1_CODE_LINESIZE\t256\n"
|
||||
"#define L1_CODE_ASSOCIATIVE\t8\n"
|
||||
"#define L1_DATA_SIZE\t32768\n"
|
||||
"#define L1_DATA_LINESIZE\t256\n"
|
||||
"#define L1_DATA_ASSOCIATIVE\t8\n"
|
||||
"#define L2_SIZE\t8388608\n\n"
|
||||
"#define L2_LINESIZE\t256\n"
|
||||
"#define L2_ASSOCIATIVE\t8\n"
|
||||
"#define L3_SIZE\t0\n\n"
|
||||
"#define L3_LINESIZE\t0\n\n"
|
||||
"#define L3_ASSOCIATIVE\t0\n\n"
|
||||
"#define DTB_DEFAULT_ENTRIES\t64\n"
|
||||
"#define DTB_SIZE\t4096\n"
|
||||
"#define HAVE_VFPV4\n"
|
||||
"#define HAVE_VFPV3\n"
|
||||
"#define HAVE_VFP\n"
|
||||
"#define HAVE_NEON\n"
|
||||
"#define HAVE_SVE\n"
|
||||
"#define ARMV8\n")
|
||||
set(SGEMM_UNROLL_M 4)
|
||||
set(SGEMM_UNROLL_N 8)
|
||||
set(DGEMM_UNROLL_M 2)
|
||||
set(DGEMM_UNROLL_N 8)
|
||||
set(CGEMM_UNROLL_M 2)
|
||||
set(CGEMM_UNROLL_N 4)
|
||||
set(ZGEMM_UNROLL_M 2)
|
||||
set(ZGEMM_UNROLL_N 4)
|
||||
set(SYMV_P 16)
|
||||
elseif ("${TCORE}" STREQUAL "P5600")
|
||||
file(APPEND ${TARGET_CONF_TEMP}
|
||||
"#define L2_SIZE 1048576\n"
|
||||
|
@ -1309,6 +1340,15 @@ endif ()
|
|||
"#define DTB_DEFAULT_ENTRIES 128\n"
|
||||
"#define DTB_SIZE 4096\n"
|
||||
"#define L2_ASSOCIATIVE 8\n")
|
||||
elseif ("${TCORE}" STREQUAL "RISCV64_GENERIC")
|
||||
file(APPEND ${TARGET_CONF_TEMP}
|
||||
"#define L1_DATA_SIZE 32768\n"
|
||||
"#define L1_DATA_LINESIZE 32\n"
|
||||
"#define L2_SIZE 1048576\n"
|
||||
"#define L2_LINESIZE 32 \n"
|
||||
"#define DTB_DEFAULT_ENTRIES 128\n"
|
||||
"#define DTB_SIZE 4096\n"
|
||||
"#define L2_ASSOCIATIVE 4\n")
|
||||
endif()
|
||||
set(SBGEMM_UNROLL_M 8)
|
||||
set(SBGEMM_UNROLL_N 4)
|
||||
|
@ -1342,7 +1382,7 @@ else(NOT CMAKE_CROSSCOMPILING)
|
|||
|
||||
if ("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC")
|
||||
#Use generic for MSVC now
|
||||
message("MSVC")
|
||||
message(STATUS "MSVC")
|
||||
set(GETARCH_FLAGS ${GETARCH_FLAGS} -DFORCE_GENERIC)
|
||||
else()
|
||||
list(APPEND GETARCH_SRC ${PROJECT_SOURCE_DIR}/cpuid.S)
|
||||
|
|
|
@ -160,11 +160,16 @@ else()
|
|||
endif ()
|
||||
endif ()
|
||||
|
||||
if (C_LAPACK)
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU")
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -Wno-error=incompatible-pointer-types")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
include("${PROJECT_SOURCE_DIR}/cmake/prebuild.cmake")
|
||||
if (DEFINED TARGET)
|
||||
if (${TARGET} STREQUAL COOPERLAKE AND NOT NO_AVX512)
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU")
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${CMAKE_C_COMPILER_VERSION} VERSION_GREATER 10.09)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=cooperlake")
|
||||
else()
|
||||
|
@ -172,15 +177,14 @@ if (DEFINED TARGET)
|
|||
endif()
|
||||
elseif (${CMAKE_C_COMPILER_ID} STREQUAL "Clang" OR ${CMAKE_C_COMPILER_ID} STREQUAL "AppleClang")
|
||||
if (${CMAKE_C_COMPILER_VERSION} VERSION_GREATER 8.99)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=cooperlake -exhaustive-register-search")
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=cooperlake -mllvm -exhaustive-register-search")
|
||||
else()
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512 -exhaustive-register-search")
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512 -mllvm -exhaustive-register-search")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
if (${TARGET} STREQUAL SAPPHIRERAPIDS AND NOT NO_AVX512)
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU")
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${CMAKE_C_COMPILER_VERSION} VERSION_GREATER 11.0)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=sapphirerapids")
|
||||
else()
|
||||
|
@ -188,22 +192,21 @@ if (DEFINED TARGET)
|
|||
endif()
|
||||
elseif (${CMAKE_C_COMPILER_ID} STREQUAL "Clang" OR ${CMAKE_C_COMPILER_ID} STREQUAL "AppleClang")
|
||||
if (${CMAKE_C_COMPILER_VERSION} VERSION_GREATER 12.0)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=sapphirerapids -exhaustive-register-search")
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=sapphirerapids -mllvm -exhaustive-register-search")
|
||||
else()
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512 -exhaustive-register-search")
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512 -mllvm -exhaustive-register-search")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
if (${TARGET} STREQUAL SKYLAKEX AND NOT NO_AVX512)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512")
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "Clang" OR ${CMAKE_C_COMPILER_ID} STREQUAL "AppleClang")
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -exhaustive-register-search")
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mllvm -exhaustive-register-search")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (((${TARGET} STREQUAL ZEN) AND HAVE_AVX512VL) AND NOT NO_AVX512)
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU")
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${CMAKE_C_COMPILER_VERSION} VERSION_GREATER 12.99)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=znver4")
|
||||
else()
|
||||
|
@ -215,14 +218,13 @@ if (DEFINED TARGET)
|
|||
else()
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=skylake-avx512")
|
||||
endif()
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -exhaustive-register-search")
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mllvm -exhaustive-register-search")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if ((${TARGET} STREQUAL HASWELL OR (${TARGET} STREQUAL ZEN AND NOT HAVE_AVX512VL)) AND NOT NO_AVX2)
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "GNU")
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 4.7 OR ${GCC_VERSION} VERSION_EQUAL 4.7)
|
||||
if (CMAKE_C_COMPILER_VERSION VERSION_GREATER 4.7 OR CMAKE_C_COMPILER_VERSION VERSION_EQUAL 4.7)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mavx2")
|
||||
endif()
|
||||
elseif (${CMAKE_C_COMPILER_ID} STREQUAL "CLANG")
|
||||
|
@ -261,20 +263,18 @@ if (DEFINED TARGET)
|
|||
endif()
|
||||
|
||||
if (${TARGET} STREQUAL POWER10)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.2 OR ${GCC_VERSION} VERSION_EQUAL 10.2)
|
||||
if (CMAKE_C_COMPILER VERSION VERSION_GREATER 10.2 OR CMAKE_C_COMPILER_VERSION VERSION_EQUAL 10.2)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mcpu=power10 -mtune=power10 -mvsx -fno-fast-math")
|
||||
else ()
|
||||
message(FATAL_ERROR "Compiler GCC.${GCC_VERSION} does not support Power10.")
|
||||
message(FATAL_ERROR "Compiler GCC ${CMAKE_C_COMPILER_VERSION} does not support Power10.")
|
||||
endif()
|
||||
endif()
|
||||
if (${TARGET} STREQUAL POWER9)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 5.0 OR ${GCC_VERSION} VERSION_EQUAL 5.0)
|
||||
if (CMAKE_C_COMPILER_VERSION VERSION_GREATER 5.0 OR CMAKE_C_COMPILER_VERSION VERSION_EQUAL 5.0)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mcpu=power9 -mtune=power9 -mvsx -fno-fast-math")
|
||||
else ()
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -mcpu=power8 -mtune=power8 -mvsx -fno-fast-math")
|
||||
message(WARNING "Compiler GCC.${GCC_VERSION} does not support fully Power9.")
|
||||
message(WARNING "Compiler GCC ${CMAKE_C_COMPILER_VERSION} does not support fully Power9.")
|
||||
endif()
|
||||
endif()
|
||||
if (${TARGET} STREQUAL POWER8)
|
||||
|
@ -285,11 +285,10 @@ if (${TARGET} STREQUAL NEOVERSEV1)
|
|||
if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI" AND NOT NO_SVE)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -Msve_intrinsics -march=armv8.4-a+sve -mtune=neoverse-v1")
|
||||
else ()
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.4 OR ${GCC_VERSION} VERSION_EQUAL 10.4)
|
||||
if (CMAKE_C_COMPILER_VERSION VERSION_GREATER 10.4 OR CMAKE_C_COMPILER_VERSION VERSION_EQUAL 10.4)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.4-a+sve -mtune=neoverse-v1")
|
||||
else ()
|
||||
message(FATAL_ERROR "Compiler ${CMAKE_C_COMPILER} ${GCC_VERSION} does not support Neoverse V1.")
|
||||
message(FATAL_ERROR "Compiler ${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_VERSION} does not support Neoverse V1.")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@ -297,11 +296,10 @@ if (${TARGET} STREQUAL NEOVERSEV1)
|
|||
if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI" AND NOT NO_SVE)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -Msve-intrinsics -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2")
|
||||
else ()
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.4 OR ${GCC_VERSION} VERSION_EQUAL 10.4)
|
||||
if (CMAKE_C_COMPILER_VERSION VERSION_GREATER 10.4 OR CMAKE_C_COMPILER_VERSION VERSION_EQUAL 10.4)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.5-a+sve+sve2+bf16 -mtune=neoverse-n2")
|
||||
else ()
|
||||
message(FATAL_ERROR "Compiler $${CMAKE_C_COMPILER} {GCC_VERSION} does not support Neoverse N2.")
|
||||
message(FATAL_ERROR "Compiler $${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_VERSION} does not support Neoverse N2.")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@ -312,6 +310,18 @@ if (${TARGET} STREQUAL NEOVERSEV1)
|
|||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.2-a+sve")
|
||||
endif()
|
||||
endif()
|
||||
if (${TARGET} STREQUAL A64FX)
|
||||
if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI" AND NOT NO_SVE)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -Msve-intrinsics -march=armv8.2-a+sve -mtune=a64fx")
|
||||
else ()
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (${GCC_VERSION} VERSION_GREATER 10.4 OR ${GCC_VERSION} VERSION_EQUAL 10.4)
|
||||
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.2-a+sve -mtune=a64fx")
|
||||
else ()
|
||||
message(FATAL_ERROR "Compiler $${CMAKE_C_COMPILER} {GCC_VERSION} does not support A64FX.")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
|
@ -381,12 +391,19 @@ endif ()
|
|||
if (X86_64 OR ${CORE} STREQUAL POWER10)
|
||||
set(SMALL_MATRIX_OPT TRUE)
|
||||
endif ()
|
||||
if (ARM64)
|
||||
set(GEMM_GEMV_FORWARD TRUE)
|
||||
endif ()
|
||||
|
||||
if (GEMM_GEMV_FORWARD AND NOT ONLY_CBLAS)
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -DGEMM_GEMV_FORWARD")
|
||||
endif ()
|
||||
if (SMALL_MATRIX_OPT)
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -DSMALL_MATRIX_OPT")
|
||||
endif ()
|
||||
|
||||
if (DYNAMIC_ARCH)
|
||||
if (X86 OR X86_64 OR ARM64 OR POWER)
|
||||
if (X86 OR X86_64 OR ARM64 OR POWER OR RISCV64)
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_ARCH")
|
||||
if (DYNAMIC_OLDER)
|
||||
set(CCOMMON_OPT "${CCOMMON_OPT} -DDYNAMIC_OLDER")
|
||||
|
@ -604,7 +621,10 @@ set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${FCOMMON_OPT}")
|
|||
set(FPFLAGS "${FPFLAGS} ${FCOMMON_OPT} ${COMMON_PROF}")
|
||||
|
||||
#For LAPACK Fortran codes.
|
||||
set(LAPACK_FFLAGS "${LAPACK_FFLAGS} ${CMAKE_Fortran_FLAGS}")
|
||||
set(LAPACK_FFLAGS "${LAPACK_FFLAGS} ${CMAKE_Fortran_FLAGS}" )
|
||||
if (LAPACK_STRLEN)
|
||||
set (LAPACK_FFLAGS "${LAPACK_FFLAGS} -DLAPACK_STRLEN=${LAPACK_STRLEN}")
|
||||
endif()
|
||||
set(LAPACK_FPFLAGS "${LAPACK_FPFLAGS} ${FPFLAGS}")
|
||||
|
||||
#Disable -fopenmp for LAPACK Fortran codes on Windows.
|
||||
|
@ -617,7 +637,7 @@ if (${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
|
|||
endif ()
|
||||
|
||||
if (CMAKE_Fortran_COMPILER)
|
||||
if (${F_COMPILER} STREQUAL "NAG" OR ${F_COMPILER} STREQUAL "CRAY" OR CMAKE_Fortran_COMPILER_ID MATCHES "LLVMFlang.*")
|
||||
if ("${F_COMPILER}" STREQUAL "NAG" OR "${F_COMPILER}" STREQUAL "CRAY" OR CMAKE_Fortran_COMPILER_ID MATCHES "LLVMFlang.*")
|
||||
set(FILTER_FLAGS "-msse3;-mssse3;-msse4.1;-mavx;-mavx2,-mskylake-avx512")
|
||||
if (CMAKE_Fortran_COMPILER_ID MATCHES "LLVMFlang.*")
|
||||
message(STATUS "removing fortran flags")
|
||||
|
|
|
@ -187,8 +187,8 @@ macro(ParseMakefileVars MAKEFILE_IN)
|
|||
set (HasValidGroup 1)
|
||||
set (STR ${CMAKE_MATCH_4})
|
||||
endif ()
|
||||
if (DEFINED ${CMAKE_MATCH_1} AND ${HasValidGroup} EQUAL 1)
|
||||
if (NOT (${${CMAKE_MATCH_1}} STREQUAL ${STR}))
|
||||
if (DEFINED CMAKE_MATCH_1 AND ${HasValidGroup} EQUAL 1)
|
||||
if (NOT (CMAKE_MATCH_1 STREQUAL ${STR}))
|
||||
#message (STATUS "condition is true")
|
||||
set (IfElse 1)
|
||||
continue ()
|
||||
|
|
|
@ -55,6 +55,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
#ifndef ASSEMBLER
|
||||
|
||||
|
||||
static __inline int WhereAmI(void){
|
||||
uint64_t ret;
|
||||
__asm__ volatile (
|
||||
" mrs x0, mpidr_el1 \n"
|
||||
" and x0, x0, 0xff \n"
|
||||
:"=r" (ret)
|
||||
:: "memory"
|
||||
);
|
||||
ret +=1;
|
||||
if ((int)ret <0) ret = 0;
|
||||
return (int)ret;
|
||||
}
|
||||
|
||||
static __inline void blas_lock(volatile BLASULONG *address){
|
||||
|
||||
BLASULONG ret;
|
||||
|
|
|
@ -47,6 +47,11 @@ int BLASFUNC(xerbla)(char *, blasint *info, blasint);
|
|||
|
||||
void openblas_set_num_threads_(int *);
|
||||
|
||||
/*Set the threading backend to a custom callback.*/
|
||||
typedef void (*openblas_dojob_callback)(int thread_num, void *jobdata, int dojob_data);
|
||||
typedef void (*openblas_threads_callback)(int sync, openblas_dojob_callback dojob, int numjobs, size_t jobdata_elsize, void *jobdata, int dojob_data);
|
||||
extern openblas_threads_callback openblas_threads_callback_;
|
||||
|
||||
FLOATRET BLASFUNC(sdot) (blasint *, float *, blasint *, float *, blasint *);
|
||||
FLOATRET BLASFUNC(sdsdot)(blasint *, float *, float *, blasint *, float *, blasint *);
|
||||
|
||||
|
|
|
@ -1939,6 +1939,11 @@ int dgeadd_k(BLASLONG, BLASLONG, double, double*, BLASLONG, double, double *, BL
|
|||
int cgeadd_k(BLASLONG, BLASLONG, float, float, float*, BLASLONG, float, float, float *, BLASLONG);
|
||||
int zgeadd_k(BLASLONG, BLASLONG, double,double, double*, BLASLONG, double, double, double *, BLASLONG);
|
||||
|
||||
int sgemm_batch_thread(blas_arg_t * queue, BLASLONG nums);
|
||||
int dgemm_batch_thread(blas_arg_t * queue, BLASLONG nums);
|
||||
int cgemm_batch_thread(blas_arg_t * queue, BLASLONG nums);
|
||||
int zgemm_batch_thread(blas_arg_t * queue, BLASLONG nums);
|
||||
int sbgemm_batch_thread(blas_arg_t * queue, BLASLONG nums);
|
||||
|
||||
#ifdef __CUDACC__
|
||||
}
|
||||
|
|
|
@ -96,6 +96,32 @@ static inline int WhereAmI(void){
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline int get_cpu_model(char *model_name) {
|
||||
FILE *cpuinfo_file = fopen("/proc/cpuinfo", "r");
|
||||
if (!cpuinfo_file) {
|
||||
return 0;
|
||||
}
|
||||
char line[1024];
|
||||
while (fgets(line, sizeof(line), cpuinfo_file)) {
|
||||
if (strstr(line, "model name")) {
|
||||
char *token = strtok(line, ":");
|
||||
token = strtok(NULL, ":");
|
||||
while (*token == ' ')
|
||||
token++;
|
||||
char *end = token + strlen(token) - 1;
|
||||
while (end > token && (*end == '\n' || *end == '\r')) {
|
||||
*end = '\0';
|
||||
end--;
|
||||
}
|
||||
strcpy(model_name, token);
|
||||
fclose(cpuinfo_file);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
fclose(cpuinfo_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef DOUBLE
|
||||
#define GET_IMAGE(res) __asm__ __volatile__("fmov.d %0, $f2" : "=f"(res) : : "memory")
|
||||
#else
|
||||
|
|
|
@ -2655,9 +2655,20 @@ typedef struct {
|
|||
BLASLONG prea, preb, prec, pred;
|
||||
#endif
|
||||
|
||||
|
||||
//for gemm_batch
|
||||
void * routine;
|
||||
int routine_mode;
|
||||
|
||||
} blas_arg_t;
|
||||
#endif
|
||||
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
#define BLAS_SMALL_OPT 0x10000U
|
||||
#define BLAS_SMALL_B0_OPT 0x30000U
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef XDOUBLE
|
||||
|
||||
#define TRSV_NUU qtrsv_NUU
|
||||
|
|
|
@ -841,17 +841,17 @@ Lmcount$lazy_ptr:
|
|||
#endif
|
||||
|
||||
#if defined(PPC440)
|
||||
#define BUFFER_SIZE ( 2 << 20)
|
||||
#define BUFFER_SIZE ( 2UL << 20)
|
||||
#elif defined(PPC440FP2)
|
||||
#define BUFFER_SIZE ( 16 << 20)
|
||||
#define BUFFER_SIZE ( 16UL << 20)
|
||||
#elif defined(POWER6) || defined(POWER8) || defined(POWER9) || defined(POWER10)
|
||||
#define BUFFER_SIZE ( 64 << 22)
|
||||
#define BUFFER_SIZE ( 64UL << 22)
|
||||
#else
|
||||
#define BUFFER_SIZE ( 16 << 20)
|
||||
#define BUFFER_SIZE ( 16UL << 20)
|
||||
#endif
|
||||
#ifdef DYNAMIC_ARCH
|
||||
#undef BUFFER_SIZE
|
||||
#define BUFFER_SIZE (64 << 22)
|
||||
#define BUFFER_SIZE (64UL << 22)
|
||||
#endif
|
||||
|
||||
#ifndef PAGESIZE
|
||||
|
|
|
@ -111,8 +111,8 @@ typedef struct blas_queue {
|
|||
struct blas_queue *next;
|
||||
|
||||
#if defined( __WIN32__) || defined(__CYGWIN32__) || defined(_WIN32) || defined(__CYGWIN__)
|
||||
// CRITICAL_SECTION lock;
|
||||
// HANDLE finish;
|
||||
CRITICAL_SECTION lock;
|
||||
HANDLE finish;
|
||||
volatile int finished;
|
||||
#else
|
||||
pthread_mutex_t lock;
|
||||
|
|
|
@ -253,7 +253,7 @@ static __inline unsigned int blas_quickdivide(unsigned int x, unsigned int y){
|
|||
#ifndef BUFFERSIZE
|
||||
#define BUFFER_SIZE (32 << 22)
|
||||
#else
|
||||
#define BUFFER_SIZE (32 << BUFFERSIZE)
|
||||
#define BUFFER_SIZE (32UL << BUFFERSIZE)
|
||||
#endif
|
||||
|
||||
#define SEEK_ADDRESS
|
||||
|
|
|
@ -46,6 +46,7 @@ size_t length64=sizeof(value64);
|
|||
#define CPU_NEOVERSEN1 11
|
||||
#define CPU_NEOVERSEV1 16
|
||||
#define CPU_NEOVERSEN2 17
|
||||
#define CPU_NEOVERSEV2 24
|
||||
#define CPU_CORTEXX1 18
|
||||
#define CPU_CORTEXX2 19
|
||||
#define CPU_CORTEXA510 20
|
||||
|
@ -91,7 +92,8 @@ static char *cpuname[] = {
|
|||
"CORTEXA510",
|
||||
"CORTEXA710",
|
||||
"FT2000",
|
||||
"CORTEXA76"
|
||||
"CORTEXA76",
|
||||
"NEOVERSEV2"
|
||||
};
|
||||
|
||||
static char *cpuname_lower[] = {
|
||||
|
@ -118,7 +120,8 @@ static char *cpuname_lower[] = {
|
|||
"cortexa510",
|
||||
"cortexa710",
|
||||
"ft2000",
|
||||
"cortexa76"
|
||||
"cortexa76",
|
||||
"neoversev2"
|
||||
};
|
||||
|
||||
int get_feature(char *search)
|
||||
|
@ -213,6 +216,8 @@ int detect(void)
|
|||
return CPU_CORTEXX2;
|
||||
else if (strstr(cpu_part, "0xd4e")) //X3
|
||||
return CPU_CORTEXX2;
|
||||
else if (strstr(cpu_part, "0xd4f")) //NVIDIA Grace et al.
|
||||
return CPU_NEOVERSEV2;
|
||||
else if (strstr(cpu_part, "0xd0b"))
|
||||
return CPU_CORTEXA76;
|
||||
}
|
||||
|
@ -425,6 +430,23 @@ void get_cpuconfig(void)
|
|||
printf("#define DTB_DEFAULT_ENTRIES 48\n");
|
||||
printf("#define DTB_SIZE 4096\n");
|
||||
break;
|
||||
case CPU_NEOVERSEV2:
|
||||
printf("#define ARMV9\n");
|
||||
printf("#define %s\n", cpuname[d]);
|
||||
printf("#define L1_CODE_SIZE 65536\n");
|
||||
printf("#define L1_CODE_LINESIZE 64\n");
|
||||
printf("#define L1_CODE_ASSOCIATIVE 4\n");
|
||||
printf("#define L1_DATA_SIZE 65536\n");
|
||||
printf("#define L1_DATA_LINESIZE 64\n");
|
||||
printf("#define L1_DATA_ASSOCIATIVE 4\n");
|
||||
printf("#define L2_SIZE 1048576\n");
|
||||
printf("#define L2_LINESIZE 64\n");
|
||||
printf("#define L2_ASSOCIATIVE 8\n");
|
||||
// L1 Data TLB = 48 entries
|
||||
// L2 Data TLB = 2048 entries
|
||||
printf("#define DTB_DEFAULT_ENTRIES 48\n");
|
||||
printf("#define DTB_SIZE 4096\n"); // Set to 4096 for symmetry with other configs.
|
||||
break;
|
||||
case CPU_CORTEXA510:
|
||||
case CPU_CORTEXA710:
|
||||
case CPU_CORTEXX1:
|
||||
|
|
|
@ -33,6 +33,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
#include <stdint.h>
|
||||
#include <sys/auxv.h>
|
||||
#include <stdio.h>
|
||||
|
||||
/* If LASX extension instructions supported,
|
||||
* using core LOONGSON3R5
|
||||
|
|
18
cpuid_x86.c
18
cpuid_x86.c
|
@ -1529,12 +1529,14 @@ int get_cpuname(void){
|
|||
switch (model) {
|
||||
case 5: // Comet Lake H and S
|
||||
case 6: // Comet Lake U
|
||||
case 10: // Meteor Lake
|
||||
if(support_avx2())
|
||||
return CPUTYPE_HASWELL;
|
||||
if(support_avx())
|
||||
return CPUTYPE_SANDYBRIDGE;
|
||||
else
|
||||
return CPUTYPE_NEHALEM;
|
||||
case 0: // Meteor Lake
|
||||
case 7: // Rocket Lake
|
||||
if(support_avx512())
|
||||
return CPUTYPE_SKYLAKEX;
|
||||
|
@ -1560,6 +1562,19 @@ int get_cpuname(void){
|
|||
return CPUTYPE_NEHALEM;
|
||||
}
|
||||
break;
|
||||
case 12: //family 6 exmodel 12
|
||||
switch (model) {
|
||||
case 15:
|
||||
if(support_avx512())
|
||||
return CPUTYPE_SAPPHIRERAPIDS;
|
||||
if(support_avx2())
|
||||
return CPUTYPE_HASWELL;
|
||||
if(support_avx())
|
||||
return CPUTYPE_SANDYBRIDGE;
|
||||
else
|
||||
return CPUTYPE_NEHALEM;
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 0x7:
|
||||
|
@ -2377,12 +2392,12 @@ int get_coretype(void){
|
|||
else
|
||||
return CORE_NEHALEM;
|
||||
}
|
||||
}
|
||||
case 15:
|
||||
if (model <= 0x2) return CORE_NORTHWOOD;
|
||||
else return CORE_PRESCOTT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (vendor == VENDOR_AMD){
|
||||
if (family <= 0x5) return CORE_80486;
|
||||
|
@ -2511,6 +2526,7 @@ int get_coretype(void){
|
|||
case 0x7:
|
||||
switch (exmodel) {
|
||||
case 5:
|
||||
case 6:
|
||||
if (support_avx2())
|
||||
return CORE_ZEN;
|
||||
else
|
||||
|
|
|
@ -25,6 +25,9 @@ endif
|
|||
|
||||
override CFLAGS += -DADD$(BU) -DCBLAS
|
||||
ifeq ($(F_COMPILER),GFORTRAN)
|
||||
ifneq (, $(filter $(CORE),LOONGSON3R3 LOONGSON3R4))
|
||||
override FFLAGS = $(filter_out(-O2 -O3,$(FFLAGS))) -O0
|
||||
endif
|
||||
override FFLAGS += -fno-tree-vectorize
|
||||
endif
|
||||
override TARGET_ARCH=
|
||||
|
@ -203,7 +206,6 @@ ifeq ($(BUILD_COMPLEX16),1)
|
|||
OPENBLAS_NUM_THREADS=2 ./xzcblat3 < zin3
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(SUPPORT_GEMM3M),1)
|
||||
ifeq ($(USE_OPENMP), 1)
|
||||
|
@ -222,7 +224,7 @@ ifeq ($(BUILD_COMPLEX16),1)
|
|||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
endif
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
## Mailing list
|
||||
|
||||
We have a [GitHub discussions](https://github.com/OpenMathLib/OpenBLAS/discussions/) forum to discuss usage and development of OpenBLAS. We also have a [Google group for *users*](https://groups.google.com/forum/#!forum/openblas-users) and a [Google group for *development of*](https://groups.google.com/forum/#!forum/openblas-dev) OpenBLAS.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
This work was or is partially supported by the following grants, contracts and institutions:
|
||||
|
||||
* Research and Development of Compiler System and Toolchain for Domestic CPU, National S&T Major Projects: Core Electronic Devices, High-end General Chips and Fundamental Software (No.2009ZX01036-001-002)
|
||||
* National High-tech R&D Program of China (Grant No.2012AA010903)
|
||||
* [PerfXLab](http://www.perfxlab.com/)
|
||||
* Chan Zuckerberg Initiative's Essential Open Source Software for Science program:
|
||||
* Cycle 1 grant: [Strengthening NumPy's foundations - growing beyond code](https://figshare.com/articles/journal_contribution/Proposal_NumPy_OpenBLAS_for_Chan_Zuckerberg_Initiative_EOSS_2019_round_1/10302167) (2019-2020)
|
||||
* Cycle 3 grant: [Improving usability and sustainability for NumPy and OpenBLAS](https://chanzuckerberg.com/eoss/proposals/improving-usability-and-sustainability-for-numpy-and-openblas/) (2020-2021)
|
||||
* Sovereign Tech Fund funding: [Keeping high performance linear algebra computation accessible and open for all](https://www.sovereigntechfund.de/tech/openblas) (2023-2024)
|
||||
|
||||
Over the course of OpenBLAS development, a number of donations were received.
|
||||
You can read OpenBLAS's statement of receipts and disbursement and cash balance in
|
||||
[this Google doc](https://docs.google.com/spreadsheet/ccc?key=0AghkTjXe2lDndE1UZml0dGpaUzJmZGhvenBZd1F2R1E&usp=sharing) (covers 2013-2016).
|
||||
A list of backers is available [in BACKERS.md](https://github.com/OpenMathLib/OpenBLAS/blob/develop/BACKERS.md) in the main repo.
|
||||
|
||||
### Donations
|
||||
|
||||
We welcome hardware donations, including the latest CPUs and motherboards.
|
||||
|
||||
|
||||
## Open source users of OpenBLAS
|
||||
|
||||
Prominent open source users of OpenBLAS include:
|
||||
|
||||
* [Julia](https://julialang.org) - a high-level, high-performance dynamic programming language for technical computing
|
||||
* [NumPy](https://numpy.org) - the fundamental package for scientific computing with Python
|
||||
* [SciPy](https://scipy.org) - fundamental algorithms for scientific computing in Python
|
||||
* [R](https://www.r-project.org/) - a free software environment for statistical computing and graphics
|
||||
* [OpenCV](https://opencv.org/) - the world's biggest computer vision library
|
||||
|
||||
OpenBLAS is packaged in most major Linux distros, as well as general and
|
||||
numerical computing-focused packaging ecosystems like Nix, Homebrew, Spack and
|
||||
conda-forge.
|
||||
|
||||
OpenBLAS is used directly by libraries written in C, C++ and Fortran (and
|
||||
probably other languages), and directly by end users in those languages.
|
||||
|
||||
|
||||
## Publications
|
||||
|
||||
### 2013
|
||||
|
||||
* Wang Qian, Zhang Xianyi, Zhang Yunquan, Qing Yi, **AUGEM: Automatically Generate High Performance Dense Linear Algebra Kernels on x86 CPUs**, In the International Conference for High Performance Computing, Networking, Storage and Analysis (SC'13), Denver CO, November 2013. [[pdf](http://xianyi.github.io/paper/augem_SC13.pdf)]
|
||||
|
||||
### 2012
|
||||
|
||||
* Zhang Xianyi, Wang Qian, Zhang Yunquan, **Model-driven Level 3 BLAS Performance Optimization on Loongson 3A Processor**, 2012 IEEE 18th International Conference on Parallel and Distributed Systems (ICPADS), 17-19 Dec. 2012.
|
|
@ -0,0 +1,120 @@
|
|||
This page describes the Make-based build, which is the default/authoritative
|
||||
build method. Note that the OpenBLAS repository also supports building with
|
||||
CMake (not described here) - that generally works and is tested, however there
|
||||
may be small differences between the Make and CMake builds.
|
||||
|
||||
!!! warning
|
||||
This page is made by someone who is not the developer and should not be considered as an official documentation of the build system. For getting the full picture, it is best to read the Makefiles and understand them yourself.
|
||||
|
||||
## Makefile dep graph
|
||||
|
||||
```
|
||||
Makefile
|
||||
|
|
||||
|----- Makefile.system # !!! this is included by many of the Makefiles in the subdirectories !!!
|
||||
| |
|
||||
| |===== Makefile.prebuild # This is triggered (not included) once by Makefile.system
|
||||
| | | # and runs before any of the actual library code is built.
|
||||
| | | # (builds and runs the "getarch" tool for cpu identification,
|
||||
| | | # runs the compiler detection scripts c_check and f_check)
|
||||
| | |
|
||||
| | ----- (Makefile.conf) [ either this or Makefile_kernel.conf is generated ]
|
||||
| | | { Makefile.system#L243 }
|
||||
| | ----- (Makefile_kernel.conf) [ temporary Makefile.conf during DYNAMIC_ARCH builds ]
|
||||
| |
|
||||
| |----- Makefile.rule # defaults for build options that can be given on the make command line
|
||||
| |
|
||||
| |----- Makefile.$(ARCH) # architecture-specific compiler options and OpenBLAS buffer size values
|
||||
|
|
||||
|~~~~~ exports/
|
||||
|
|
||||
|~~~~~ test/
|
||||
|
|
||||
|~~~~~ utest/
|
||||
|
|
||||
|~~~~~ ctest/
|
||||
|
|
||||
|~~~~~ cpp_thread_test/
|
||||
|
|
||||
|~~~~~ kernel/
|
||||
|
|
||||
|~~~~~ ${SUBDIRS}
|
||||
|
|
||||
|~~~~~ ${BLASDIRS}
|
||||
|
|
||||
|~~~~~ ${NETLIB_LAPACK_DIR}{,/timing,/testing/{EIG,LIN}}
|
||||
|
|
||||
|~~~~~ relapack/
|
||||
```
|
||||
|
||||
## Important Variables
|
||||
|
||||
Most of the tunable variables are found in [Makefile.rule](https://github.com/xianyi/OpenBLAS/blob/develop/Makefile.rule), along with their detailed descriptions.<br/>
|
||||
Most of the variables are detected automatically in [Makefile.prebuild](https://github.com/xianyi/OpenBLAS/blob/develop/Makefile.prebuild), if they are not set in the environment.
|
||||
|
||||
### CPU related
|
||||
```
|
||||
ARCH - Target architecture (eg. x86_64)
|
||||
TARGET - Target CPU architecture, in case of DYNAMIC_ARCH=1 means library will not be usable on less capable CPUs
|
||||
TARGET_CORE - TARGET_CORE will override TARGET internally during each cpu-specific cycle of the build for DYNAMIC_ARCH
|
||||
DYNAMIC_ARCH - For building library for multiple TARGETs (does not lose any optimizations, but increases library size)
|
||||
DYNAMIC_LIST - optional user-provided subset of the DYNAMIC_CORE list in Makefile.system
|
||||
```
|
||||
|
||||
### Toolchain related
|
||||
```
|
||||
CC - TARGET C compiler used for compilation (can be cross-toolchains)
|
||||
FC - TARGET Fortran compiler used for compilation (can be cross-toolchains, set NOFORTRAN=1 if used cross-toolchain has no fortran compiler)
|
||||
AR, AS, LD, RANLIB - TARGET toolchain helpers used for compilation (can be cross-toolchains)
|
||||
|
||||
HOSTCC - compiler of build machine, needed to create proper config files for target architecture
|
||||
HOST_CFLAGS - flags for build machine compiler
|
||||
```
|
||||
|
||||
### Library related
|
||||
```
|
||||
BINARY - 32/64 bit library
|
||||
|
||||
BUILD_SHARED - Create shared library
|
||||
BUILD_STATIC - Create static library
|
||||
|
||||
QUAD_PRECISION - enable support for IEEE quad precision [ largely unimplemented leftover from GotoBLAS, do not use ]
|
||||
EXPRECISION - Obsolete option to use float80 of SSE on BSD-like systems
|
||||
INTERFACE64 - Build with 64bit integer representations to support large array index values [ incompatible with standard API ]
|
||||
|
||||
BUILD_SINGLE - build the single-precision real functions of BLAS [and optionally LAPACK]
|
||||
BUILD_DOUBLE - build the double-precision real functions
|
||||
BUILD_COMPLEX - build the single-precision complex functions
|
||||
BUILD_COMPLEX16 - build the double-precision complex functions
|
||||
(all four types are included in the build by default when none was specifically selected)
|
||||
|
||||
BUILD_BFLOAT16 - build the "half precision brainfloat" real functions
|
||||
|
||||
USE_THREAD - Use a multithreading backend (default to pthread)
|
||||
USE_LOCKING - implement locking for thread safety even when USE_THREAD is not set (so that the singlethreaded library can
|
||||
safely be called from multithreaded programs)
|
||||
USE_OPENMP - Use OpenMP as multithreading backend
|
||||
NUM_THREADS - define this to the maximum number of parallel threads you expect to need (defaults to the number of cores in the build cpu)
|
||||
NUM_PARALLEL - define this to the number of OpenMP instances that your code may use for parallel calls into OpenBLAS (default 1,see below)
|
||||
|
||||
```
|
||||
|
||||
|
||||
OpenBLAS uses a fixed set of memory buffers internally, used for communicating
|
||||
and compiling partial results from individual threads. For efficiency, the
|
||||
management array structure for these buffers is sized at build time - this
|
||||
makes it necessary to know in advance how many threads need to be supported on
|
||||
the target system(s).
|
||||
|
||||
With OpenMP, there is an additional level of complexity as there may be calls
|
||||
originating from a parallel region in the calling program. If OpenBLAS gets
|
||||
called from a single parallel region, it runs single-threaded automatically to
|
||||
avoid overloading the system by fanning out its own set of threads. In the case
|
||||
that an OpenMP program makes multiple calls from independent regions or
|
||||
instances in parallel, this default serialization is not sufficient as the
|
||||
additional caller(s) would compete for the original set of buffers already in
|
||||
use by the first call. So if multiple OpenMP runtimes call into OpenBLAS at the
|
||||
same time, then only one of them will be able to make progress while all the
|
||||
rest of them spin-wait for the one available buffer. Setting `NUM_PARALLEL` to
|
||||
the upper bound on the number of OpenMP runtimes that you can have in a process
|
||||
ensures that there are a sufficient number of buffer sets available.
|
|
@ -0,0 +1,56 @@
|
|||
# CI jobs
|
||||
|
||||
| Arch|Target CPU|OS|Build system|XComp to|C Compiler|Fortran Compiler|threading|DYN_ARCH|INT64|Libraries| CI Provider| CPU count|
|
||||
| ------------|---|---|-----------|-------------|----------|----------------|------|------------|----------|-----------|----------|-------|
|
||||
| x86_64 |Intel 32bit|Windows|CMAKE/VS2015| -|mingw6.3| - | pthreads | - | - | static | Appveyor| |
|
||||
| x86_64 |Intel |Windows|CMAKE/VS2015| -|mingw5.3| - | pthreads | - | - | static | Appveyor| |
|
||||
| x86_64 |Intel |Centos5|gmake | -|gcc 4.8 |gfortran| pthreads | + | - | both | Azure | |
|
||||
| x86_64 |SDE (SkylakeX)|Ubuntu| CMAKE| - | gcc | gfortran | pthreads | - | - | both | Azure | |
|
||||
| x86_64 |Haswell/ SkylakeX|Windows|CMAKE/VS2017| - | VS2017| - | | - | - | static | Azure | |
|
||||
| x86_64 | " | Windows|mingw32-make| - |gcc | gfortran | | list | - | both | Azure | |
|
||||
| x86_64 | " |Windows|CMAKE/Ninja| - |LLVM | - | | - | - | static | Azure | |
|
||||
| x86_64 | " |Windows|CMAKE/Ninja| - |LLVM | flang | | - | - | static | Azure | |
|
||||
| x86_64 | " |Windows|CMAKE/Ninja| - |VS2022| flang* | | - | - | static | Azure | |
|
||||
| x86_64 | " |macOS11|gmake | - | gcc-10|gfortran| OpenMP | + | - | both | Azure | |
|
||||
| x86_64 | " |macOS11|gmake | - | gcc-10|gfortran| none | - | - | both | Azure | |
|
||||
| x86_64 | " |macOS12|gmake | - | gcc-12|gfortran|pthreads| - | - | both | Azure | |
|
||||
| x86_64 | " |macOS11|gmake | - | llvm | - | OpenMP | + | - | both | Azure | |
|
||||
| x86_64 | " |macOS11|CMAKE | - | llvm | - | OpenMP | no_avx512 | - | static | Azure | |
|
||||
| x86_64 | " |macOS11|CMAKE | - | gcc-10| gfortran| pthreads | list | - | shared | Azure | |
|
||||
| x86_64 | " |macOS11|gmake | - | llvm | ifort | pthreads | - | - | both | Azure | |
|
||||
| x86_64 | " |macOS11|gmake |arm| AndroidNDK-llvm | - | | - | - | both | Azure | |
|
||||
| x86_64 | " |macOS11|gmake |arm64| XCode 12.4 | - | | + | - | both | Azure | |
|
||||
| x86_64 | " |macOS11|gmake |arm | XCode 12.4 | - | | + | - | both | Azure | |
|
||||
| x86_64 | " |Alpine Linux(musl)|gmake| - | gcc | gfortran | pthreads | + | - | both | Azure | |
|
||||
| arm64 |Apple M1 |OSX |CMAKE/XCode| - | LLVM | - | OpenMP | - | - | static | Cirrus | |
|
||||
| arm64 |Apple M1 |OSX |CMAKE/Xcode| - | LLVM | - | OpenMP | - | + | static | Cirrus | |
|
||||
| arm64 |Apple M1 |OSX |CMAKE/XCode|x86_64| LLVM| - | - | + | - | static | Cirrus | |
|
||||
| arm64 |Neoverse N1|Linux |gmake | - |gcc10.2| -| pthreads| - | - | both | Cirrus | |
|
||||
| arm64 |Neoverse N1|Linux |gmake | - |gcc10.2| -| pthreads| - | + | both | Cirrus | |
|
||||
| arm64 |Neoverse N1|Linux |gmake |- |gcc10.2| -| OpenMP | - | - | both |Cirrus | 8 |
|
||||
| x86_64 | Ryzen| FreeBSD |gmake | - | gcc12.2|gfortran| pthreads| - | - | both | Cirrus | |
|
||||
| x86_64 | Ryzen| FreeBSD |gmake | | gcc12.2|gfortran| pthreads| - | + | both | Cirrus | |
|
||||
| x86_64 |GENERIC |QEMU |gmake| mips64 | gcc | gfortran | pthreads | - | - | static | Github | |
|
||||
| x86_64 |SICORTEX |QEMU |gmake| mips64 | gcc | gfortran | pthreads | - | - | static | Github | |
|
||||
| x86_64 |I6400 |QEMU |gmake| mips64 | gcc | gfortran | pthreads | - | - | static | Github | |
|
||||
| x86_64 |P6600 |QEMU |gmake| mips64 | gcc | gfortran | pthreads | - | - | static | Github | |
|
||||
| x86_64 |I6500 |QEMU |gmake| mips64 | gcc | gfortran | pthreads | - | - | static | Github | |
|
||||
| x86_64 |Intel |Ubuntu |CMAKE| - | gcc-11.3 | gfortran | pthreads | + | - | static | Github | |
|
||||
| x86_64 |Intel |Ubuntu |gmake| - | gcc-11.3 | gfortran | pthreads | + | - | both | Github | |
|
||||
| x86_64 |Intel |Ubuntu |CMAKE| - | gcc-11.3 | flang-classic | pthreads | + | - | static | Github | |
|
||||
| x86_64 |Intel |Ubuntu |gmake| - | gcc-11.3 | flang-classic | pthreads | + | - | both | Github | |
|
||||
| x86_64 |Intel |macOS12 | CMAKE| - | AppleClang 14 | gfortran | pthreads | + | - | static | Github | |
|
||||
| x86_64 |Intel |macOS12 | gmake| - | AppleClang 14 | gfortran | pthreads | + | - | both | Github | |
|
||||
| x86_64 |Intel |Windows2022 | CMAKE/Ninja| - | mingw gcc 13 | gfortran | | + | - | static | Github | |
|
||||
| x86_64 |Intel |Windows2022 | CMAKE/Ninja| - | mingw gcc 13 | gfortran | | + | + | static | Github | |
|
||||
| x86_64 |Intel 32bit|Windows2022 | CMAKE/Ninja| - | mingw gcc 13 | gfortran | | + | - | static | Github | |
|
||||
| x86_64 |Intel |Windows2022 | CMAKE/Ninja| - | LLVM 16 | - | | + | - | static | Github | |
|
||||
| x86_64 |Intel | Windows2022 |CMAKE/Ninja| - | LLVM 16 | - | | + | + | static | Github | |
|
||||
| x86_64 |Intel | Windows2022 |CMAKE/Ninja| - | gcc 13| - | | + | - | static | Github | |
|
||||
| x86_64 |Intel| Ubuntu |gmake |mips64|gcc|gfortran|pthreads|+|-|both|Github| |
|
||||
| x86_64 |generic|Ubuntu |gmake |riscv64|gcc|gfortran|pthreads|-|-|both|Github| |
|
||||
| x86_64 |Intel|Ubuntu |gmake |mips32|gcc|gfortran|pthreads|-|-|both|Github | |
|
||||
| x86_64 |Intel|Ubuntu |gmake |ia64|gcc|gfortran|pthreads|-|-|both|Github| |
|
||||
| x86_64 |C910V|QEmu |gmake |riscv64|gcc|gfortran|pthreads|-|-|both|Github| |
|
||||
|power |pwr9| Ubuntu |gmake | - |gcc|gfortran|OpenMP|-|-|both|OSUOSL| |
|
||||
|zarch |z14 | Ubuntu |gmake | - |gcc|gfortran|OpenMP|-|-|both|OSUOSL| |
|
|
@ -0,0 +1,192 @@
|
|||
# Developer manual
|
||||
|
||||
## Source code layout
|
||||
|
||||
```
|
||||
OpenBLAS/
|
||||
├── benchmark Benchmark codes for BLAS
|
||||
├── cmake CMakefiles
|
||||
├── ctest Test codes for CBLAS interfaces
|
||||
├── driver Implemented in C
|
||||
│ ├── level2
|
||||
│ ├── level3
|
||||
│ ├── mapper
|
||||
│ └── others Memory management, threading, etc
|
||||
├── exports Generate shared library
|
||||
├── interface Implement BLAS and CBLAS interfaces (calling driver or kernel)
|
||||
│ ├── lapack
|
||||
│ └── netlib
|
||||
├── kernel Optimized assembly kernels for CPU architectures
|
||||
│ ├── alpha Original GotoBLAS kernels for DEC Alpha
|
||||
│ ├── arm ARMV5,V6,V7 kernels (including generic C codes used by other architectures)
|
||||
│ ├── arm64 ARMV8
|
||||
│ ├── generic General kernel codes written in plain C, parts used by many architectures.
|
||||
│ ├── ia64 Original GotoBLAS kernels for Intel Itanium
|
||||
│ ├── mips
|
||||
│ ├── mips64
|
||||
│ ├── power
|
||||
| ├── riscv64
|
||||
| ├── simd Common code for Universal Intrinsics, used by some x86_64 and arm64 kernels
|
||||
│ ├── sparc
|
||||
│ ├── x86
|
||||
│ ├── x86_64
|
||||
│ └── zarch
|
||||
├── lapack Optimized LAPACK codes (replacing those in regular LAPACK)
|
||||
│ ├── getf2
|
||||
│ ├── getrf
|
||||
│ ├── getrs
|
||||
│ ├── laswp
|
||||
│ ├── lauu2
|
||||
│ ├── lauum
|
||||
│ ├── potf2
|
||||
│ ├── potrf
|
||||
│ ├── trti2
|
||||
│ ├── trtri
|
||||
│ └── trtrs
|
||||
├── lapack-netlib LAPACK codes from netlib reference implementation
|
||||
├── reference BLAS Fortran reference implementation (unused)
|
||||
├── relapack Elmar Peise's recursive LAPACK (implemented on top of regular LAPACK)
|
||||
├── test Test codes for BLAS
|
||||
└── utest Regression test
|
||||
|
||||
```
|
||||
|
||||
A call tree for `dgemm` looks as follows:
|
||||
```
|
||||
interface/gemm.c
|
||||
│
|
||||
driver/level3/level3.c
|
||||
│
|
||||
gemm assembly kernels at kernel/
|
||||
```
|
||||
|
||||
To find the kernel currently used for a particular supported CPU, please check the corresponding `kernel/$(ARCH)/KERNEL.$(CPU)` file.
|
||||
|
||||
Here is an example for `kernel/x86_64/KERNEL.HASWELL`:
|
||||
```
|
||||
...
|
||||
DTRMMKERNEL = dtrmm_kernel_4x8_haswell.c
|
||||
DGEMMKERNEL = dgemm_kernel_4x8_haswell.S
|
||||
...
|
||||
```
|
||||
According to the above `KERNEL.HASWELL`, OpenBLAS Haswell dgemm kernel file is `dgemm_kernel_4x8_haswell.S`.
|
||||
|
||||
|
||||
## Optimizing GEMM for a given hardware
|
||||
|
||||
!!! abstract "Read the Goto paper to understand the algorithm"
|
||||
|
||||
Goto, Kazushige; van de Geijn, Robert A. (2008).
|
||||
["Anatomy of High-Performance Matrix Multiplication"](http://delivery.acm.org/10.1145/1360000/1356053/a12-goto.pdf?ip=155.68.162.54&id=1356053&acc=ACTIVE%20SERVICE&key=A79D83B43E50B5B8%2EF070BBE7E45C3F17%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35&__acm__=1517932837_edfe766f1e295d9a7830812371e1d173).
|
||||
ACM Transactions on Mathematical Software 34 (3): Article 12
|
||||
|
||||
(The above link is available only to ACM members, but this and many related
|
||||
papers is also available on [the pages of van de Geijn's FLAME project](http://www.cs.utexas.edu/~flame/web/FLAMEPublications.html))
|
||||
|
||||
The `driver/level3/level3.c` is the implementation of Goto's algorithm.
|
||||
Meanwhile, you can look at `kernel/generic/gemmkernel_2x2.c`, which is a naive
|
||||
`2x2` register blocking `gemm` kernel in C. Then:
|
||||
|
||||
* Write optimized assembly kernels. Consider instruction pipeline, available registers, memory/cache access.
|
||||
* Tune cache block sizes (`Mc`, `Kc`, and `Nc`)
|
||||
|
||||
Note that not all of the CPU-specific parameters in `param.h` are actively used in algorithms.
|
||||
`DNUMOPT` only appears as a scale factor in profiling output of the level3 `syrk` interface code,
|
||||
while its counterpart `SNUMOPT` (aliased as `NUMOPT` in `common.h`) is not used anywhere at all.
|
||||
|
||||
`SYMV_P` is only used in the generic kernels for the `symv` and `chemv`/`zhemv` functions -
|
||||
at least some of those are usually overridden by CPU-specific implementations, so if you start
|
||||
by cloning the existing implementation for a related CPU you need to check its `KERNEL` file
|
||||
to see if tuning `SYMV_P` would have any effect at all.
|
||||
|
||||
`GEMV_UNROLL` is only used by some older x86-64 kernels, so not all sections in `param.h` define it.
|
||||
Similarly, not all of the CPU parameters like L2 or L3 cache sizes are necessarily used in current
|
||||
kernels for a given model - by all indications the CPU identification code was imported from some
|
||||
other project originally.
|
||||
|
||||
|
||||
## Running OpenBLAS tests
|
||||
|
||||
We use tests for Netlib BLAS, CBLAS, and LAPACK. In addition, we use
|
||||
OpenBLAS-specific regression tests. They can be run with Make:
|
||||
|
||||
* `make -C test` for BLAS tests
|
||||
* `make -C ctest` for CBLAS tests
|
||||
* `make -C utest` for OpenBLAS regression tests
|
||||
* `make lapack-test` for LAPACK tests
|
||||
|
||||
We also use the [BLAS-Tester](https://github.com/xianyi/BLAS-Tester) tests for regression testing.
|
||||
It is basically the ATLAS test suite adapted for building with OpenBLAS.
|
||||
|
||||
The project makes use of several Continuous Integration (CI) services
|
||||
conveniently interfaced with GitHub to automatically run tests on a number of
|
||||
platforms and build configurations.
|
||||
|
||||
Also note that the test suites included with "numerically heavy" projects like
|
||||
Julia, NumPy, SciPy, Octave or QuantumEspresso can be used for regression
|
||||
testing, when those projects are built such that they use OpenBLAS.
|
||||
|
||||
|
||||
## Benchmarking
|
||||
|
||||
A number of benchmarking methods are used by OpenBLAS:
|
||||
|
||||
- Several simple C benchmarks for performance testing individual BLAS functions
|
||||
are available in the `benchmark` folder. They can be run locally through the
|
||||
`Makefile` in that directory. And the `benchmark/scripts` subdirectory
|
||||
contains similar benchmarks that use OpenBLAS via NumPy, SciPy, Octave and R.
|
||||
- On pull requests, a representative set of functions is tested for performance
|
||||
regressions with Codspeed; results can be viewed at
|
||||
[https://codspeed.io/OpenMathLib/OpenBLAS](https://codspeed.io/OpenMathLib/OpenBLAS).
|
||||
- The [OpenMathLib/BLAS-Benchmarks](https://github.com/OpenMathLib/BLAS-Benchmarks) repository
|
||||
contains an [Airspeed Velocity](https://github.com/airspeed-velocity/asv/)-based benchmark
|
||||
suite which is run on several CPU architectures in cron jobs. Results are published
|
||||
to a dashboard: [http://www.openmathlib.org/BLAS-Benchmarks/](http://www.openmathlib.org/BLAS-Benchmarks/).
|
||||
|
||||
Benchmarking code for BLAS libraries, and specific performance analysis results, can be found
|
||||
in a number of places. For example:
|
||||
|
||||
* [MatlabJuliaMatrixOperationsBenchmark](https://github.com/RoyiAvital/MatlabJuliaMatrixOperationsBenchmark)
|
||||
(various matrix operations in Julia and Matlab)
|
||||
* [mmperf/mmperf](https://github.com/mmperf/mmperf/) (single-core matrix multiplication)
|
||||
|
||||
|
||||
## Adding autodetection support for a new revision or variant of a supported CPU
|
||||
|
||||
Especially relevant for x86-64, a new CPU model may be a "refresh" (die shrink and/or different number of cores) within an existing
|
||||
model family without significant changes to its instruction set (e.g., Intel Skylake and Kaby Lake still are fundamentally the same architecture as Haswell,
|
||||
low end Goldmont etc. are Nehalem). In this case, compilation with the appropriate older `TARGET` will already lead to a satisfactory build.
|
||||
|
||||
To achieve autodetection of the new model, its CPUID (or an equivalent identifier) needs to be added in the `cpuid_<architecture>.c`
|
||||
relevant for its general architecture, with the returned name for the new type set appropriately. For x86, which has the most complex
|
||||
`cpuid` file, there are two functions that need to be edited: `get_cpuname()` to return, e.g., `CPUTYPE_HASWELL` and `get_corename()` for the (broader)
|
||||
core family returning, e.g., `CORE_HASWELL`.[^1]
|
||||
|
||||
[^1]:
|
||||
This information ends up in the `Makefile.conf` and `config.h` files generated by `getarch`. Failure to
|
||||
set either will typically lead to a missing definition of the `GEMM_UNROLL` parameters later in the build,
|
||||
as `getarch_2nd` will be unable to find a matching parameter section in `param.h`.
|
||||
|
||||
For architectures where `DYNAMIC_ARCH` builds are supported, a similar but simpler code section for the corresponding
|
||||
runtime detection of the CPU exists in `driver/others/dynamic.c` (for x86), and `driver/others/dynamic_<arch>.c` for other architectures.
|
||||
Note that for x86 the CPUID is compared after splitting it into its family, extended family, model and extended model parts, so the single decimal
|
||||
number returned by Linux in `/proc/cpuinfo` for the model has to be converted back to hexadecimal before splitting into its constituent
|
||||
digits. For example, `142 == 8E` translates to extended model 8, model 14.
|
||||
|
||||
|
||||
## Adding dedicated support for a new CPU model
|
||||
|
||||
Usually it will be possible to start from an existing model, clone its `KERNEL` configuration file to the new name to use for this
|
||||
`TARGET` and eventually replace individual kernels with versions better suited for peculiarities of the new CPU model.
|
||||
In addition, it is necessary to add (or clone at first) the corresponding section of `GEMM_UNROLL` parameters in the top-level `param.h`,
|
||||
and possibly to add definitions such as `USE_TRMM` (governing whether `TRMM` functions use the respective `GEMM` kernel or a separate source file)
|
||||
to the `Makefile`s (and `CMakeLists.txt`) in the kernel directory. The new CPU name needs to be added to `TargetList.txt`,
|
||||
and the CPU auto-detection code used by the `getarch` helper program - contained in
|
||||
the `cpuid_<architecture>.c` file amended to include the CPUID (or equivalent) information processing required (see preceding section).
|
||||
|
||||
|
||||
## Adding support for an entirely new architecture
|
||||
|
||||
This endeavour is best started by cloning the entire support structure for 32-bit ARM, and within that the ARMv5 CPU in particular,
|
||||
as this is implemented through plain C kernels only. An example providing a convenient "shopping list" can be seen in pull request
|
||||
[#1526](https://github.com/OpenMathLib/OpenBLAS/pull/1526).
|
|
@ -1,11 +1,12 @@
|
|||
# Guidance for redistributing OpenBLAS
|
||||
# Redistributing OpenBLAS
|
||||
|
||||
*We note that this document contains recommendations only - packagers and other
|
||||
redistributors are in charge of how OpenBLAS is built and distributed in their
|
||||
systems, and may have good reasons to deviate from the guidance given on this
|
||||
page. These recommendations are aimed at general packaging systems, with a user
|
||||
base that typically is large, open source (or freely available at least), and
|
||||
doesn't behave uniformly or that the packager is directly connected with.*
|
||||
!!! note
|
||||
This document contains recommendations only - packagers and other
|
||||
redistributors are in charge of how OpenBLAS is built and distributed in their
|
||||
systems, and may have good reasons to deviate from the guidance given on this
|
||||
page. These recommendations are aimed at general packaging systems, with a user
|
||||
base that typically is large, open source (or freely available at least), and
|
||||
doesn't behave uniformly or that the packager is directly connected with.*
|
||||
|
||||
OpenBLAS has a large number of build-time options which can be used to change
|
||||
how it behaves at runtime, how artifacts or symbols are named, etc. Variation
|
||||
|
@ -48,7 +49,7 @@ settings):
|
|||
to provide an ILP64 interface build as well, use a symbol suffix to avoid
|
||||
symbol name clashes (see the next section).
|
||||
|
||||
[^1] All major distributions do include LAPACK as of mid 2023 as far as we
|
||||
[^1]: All major distributions do include LAPACK as of mid 2023 as far as we
|
||||
know. Older versions of Arch Linux did not, and that was known to cause
|
||||
problems.
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
OpenBLAS for the most part contains implementations of the reference (Netlib)
|
||||
BLAS, CBLAS, LAPACK and LAPACKE interfaces. A few OpenBLAS-specific functions
|
||||
are also provided however, which mostly can be seen as "BLAS extensions".
|
||||
This page documents those non-standard APIs.
|
||||
|
||||
## BLAS-like extensions
|
||||
|
||||
| Routine | Data Types | Description |
|
||||
| ------------- |:------------- | :---------------|
|
||||
| ?axpby | s,d,c,z | like axpy with a multiplier for y |
|
||||
| ?gemm3m | c,z | gemm3m |
|
||||
| ?imatcopy | s,d,c,z | in-place transpositon/copying |
|
||||
| ?omatcopy | s,d,c,z | out-of-place transpositon/copying |
|
||||
| ?geadd | s,d,c,z | matrix add |
|
||||
| ?gemmt | s,d,c,z | gemm but only a triangular part updated|
|
||||
|
||||
|
||||
## bfloat16 functionality
|
||||
|
||||
BLAS-like and conversion functions for `bfloat16` (available when OpenBLAS was compiled with `BUILD_BFLOAT16=1`):
|
||||
|
||||
* `void cblas_sbstobf16` converts a float array to an array of bfloat16 values by rounding
|
||||
* `void cblas_sbdtobf16` converts a double array to an array of bfloat16 values by rounding
|
||||
* `void cblas_sbf16tos` converts a bfloat16 array to an array of floats
|
||||
* `void cblas_dbf16tod` converts a bfloat16 array to an array of doubles
|
||||
* `float cblas_sbdot` computes the dot product of two bfloat16 arrays
|
||||
* `void cblas_sbgemv` performs the matrix-vector operations of GEMV with the input matrix and X vector as bfloat16
|
||||
* `void cblas_sbgemm` performs the matrix-matrix operations of GEMM with both input arrays containing bfloat16
|
||||
|
||||
## Utility functions
|
||||
|
||||
* `openblas_get_num_threads`
|
||||
* `openblas_set_num_threads`
|
||||
* `int openblas_get_num_procs(void)` returns the number of processors available on the system (may include "hyperthreading cores")
|
||||
* `int openblas_get_parallel(void)` returns 0 for sequential use, 1 for platform-based threading and 2 for OpenMP-based threading
|
||||
* `char * openblas_get_config()` returns the options OpenBLAS was built with, something like `NO_LAPACKE DYNAMIC_ARCH NO_AFFINITY Haswell`
|
||||
* `int openblas_set_affinity(int thread_index, size_t cpusetsize, cpu_set_t *cpuset)` sets the CPU affinity mask of the given thread
|
||||
to the provided cpuset. Only available on Linux, with semantics identical to `pthread_setaffinity_np`.
|
||||
|
|
@ -0,0 +1,345 @@
|
|||
---
|
||||
title: FAQ
|
||||
---
|
||||
|
||||
<!-- Note: Using title metadata instead of markdown header to avoid "FAQ" being present in the generated TOC. -->
|
||||
|
||||
[TOC]
|
||||
|
||||
## General questions
|
||||
|
||||
### <a name="whatblas"></a>What is BLAS? Why is it important?
|
||||
|
||||
[BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) stands for Basic Linear Algebra Subprograms. BLAS provides standard interfaces for [linear algebra](https://en.wikipedia.org/wiki/Linear_algebra), including BLAS1 (vector-vector operations), BLAS2 (matrix-vector operations), and BLAS3 (matrix-matrix operations). In general, BLAS is the computational kernel ("the bottom of the food chain") in linear algebra or scientific applications. Thus, if BLAS implementation is highly optimized, the whole application can get substantial benefit.
|
||||
|
||||
### <a name="whatsinblas"></a>What functions are there and how can I call them from my C code?
|
||||
|
||||
As BLAS is a standardized interface, you can refer to the documentation of its reference implementation at [netlib.org](http://netlib.org/blas/index.html#_blas_routines). Calls from C go through its CBLAS interface,
|
||||
so your code will need to include the provided cblas.h in addition to linking with -lopenblas.
|
||||
A single-precision matrix multiplication will look like
|
||||
```
|
||||
#include <cblas.h>
|
||||
...
|
||||
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, M, N, K, 1.0, A, K, B, N, 0.0, result, N);
|
||||
```
|
||||
where M,N,K are the dimensions of your data - see https://petewarden.files.wordpress.com/2015/04/gemm_corrected.png
|
||||
(This image is part of an article on GEMM in the context of deep learning that is well worth reading in full -
|
||||
https://petewarden.com/2015/04/20/why-gemm-is-at-the-heart-of-deep-learning/)
|
||||
|
||||
### <a name="what"></a>What is OpenBLAS? Why did you create this project?
|
||||
|
||||
OpenBLAS is an open source BLAS library forked from the GotoBLAS2-1.13 BSD version. Since Mr. Kazushige Goto left TACC, GotoBLAS is no longer being maintained. Thus, we created this project to continue developing OpenBLAS/GotoBLAS.
|
||||
|
||||
### <a name="gotoblas"></a>What's the difference between OpenBLAS and GotoBLAS?
|
||||
|
||||
In OpenBLAS 0.2.0, we optimized level 3 BLAS on the Intel Sandy Bridge 64-bit OS. We obtained a performance comparable with that Intel MKL.
|
||||
|
||||
We optimized level 3 BLAS performance on the [ICT Loongson-3A](http://en.wikipedia.org/wiki/Loongson) CPU. It outperformed GotoBLAS by 135% in a single thread and 120% in 4 threads.
|
||||
|
||||
We fixed some GotoBLAS bugs including a SEGFAULT bug on the new Linux kernel, MingW32/64 bugs, and a ztrmm computing error bug on Intel Nehalem.
|
||||
|
||||
We also added some minor features, e.g. supporting "make install", compiling without LAPACK and upgrading the LAPACK version to 3.4.2.
|
||||
|
||||
You can find the full list of modifications in Changelog.txt.
|
||||
|
||||
### <a name="gemmpqr"></a>Where do parameters GEMM_P, GEMM_Q, GEMM_R come from?
|
||||
|
||||
The detailed explanation is probably in the original publication authored by Kazushige Goto - Goto, Kazushige; van de Geijn, Robert A; Anatomy of high-performance matrix multiplication. ACM Transactions on Mathematical Software (TOMS). Volume 34 Issue 3, May 2008
|
||||
While this article is paywalled and too old for preprints to be available on arxiv.org, more recent
|
||||
publications like https://arxiv.org/pdf/1609.00076 contain at least a brief description of the algorithm.
|
||||
In practice, the values are derived by experimentation to yield the block sizes that give the highest performance. A general rule of thumb for selecting a starting point seems to be that PxQ is about half the size of L2 cache.
|
||||
|
||||
### <a name="reportbug"></a>How can I report a bug?
|
||||
|
||||
Please file an issue at this [issue page](https://github.com/xianyi/OpenBLAS/issues) or send mail to the [OpenBLAS mailing list](https://groups.google.com/forum/#!forum/openblas-users).
|
||||
|
||||
Please provide the following information: CPU, OS, compiler, and OpenBLAS compiling flags (Makefile.rule). In addition, please describe how to reproduce this bug.
|
||||
|
||||
### <a name="publication"></a>How to reference OpenBLAS.
|
||||
|
||||
You can reference our papers in [this page](about.md#publications). Alternatively, you can cite the OpenBLAS homepage http://www.openblas.net.
|
||||
|
||||
### <a name="multi-threaded"></a>How can I use OpenBLAS in multi-threaded applications?
|
||||
|
||||
If your application is already multi-threaded, it will conflict with OpenBLAS multi-threading. Thus, you must set OpenBLAS to use single thread as following.
|
||||
|
||||
* export OPENBLAS_NUM_THREADS=1 in the environment variables.
|
||||
Or
|
||||
* Call openblas_set_num_threads(1) in the application on runtime.
|
||||
Or
|
||||
* Build OpenBLAS single thread version, e.g. make USE_THREAD=0 USE_LOCKING=1 (see comment below)
|
||||
|
||||
If the application is parallelized by OpenMP, please build OpenBLAS with USE_OPENMP=1
|
||||
|
||||
With the increased availability of fast multicore hardware it has unfortunately become clear that the thread management provided by OpenMP is not sufficient to prevent race conditions when OpenBLAS was built single-threaded by USE_THREAD=0 and there are concurrent calls from multiple threads to OpenBLAS functions. In this case,
|
||||
it is vital to also specify USE_LOCKING=1 (introduced with OpenBLAS 0.3.7).
|
||||
|
||||
### <a name="sparse"></a>Does OpenBLAS support sparse matrices and/or vectors ?
|
||||
|
||||
OpenBLAS implements only the standard (dense) BLAS and LAPACK functions with a select few extensions popularized by Intel's MKL. Some
|
||||
cases can probably be made to work using e.g. GEMV or AXPBY, in general using a dedicated package like SuiteSparse (which can make use of OpenBLAS or equivalent for standard operations) is recommended.
|
||||
|
||||
### <a name="recent_hardware"></a>What support is there for recent PC hardware ? What about GPU ?
|
||||
|
||||
As OpenBLAS is a volunteer project, it can take some time for the combination of a capable developer,
|
||||
free time, and particular hardware to come along, even for relatively common processors. Starting from 0.3.1, support
|
||||
is being added for AVX 512 (TARGET=SKYLAKEX), requiring a compiler that is capable of handling avx512 intrinsics.
|
||||
While AMD Zen processors should be autodetected by the build system, as of 0.3.2 they are still handled exactly
|
||||
like Intel Haswell. There once was an effort to build an OpenCL implementation that one can still find at https://github.com/xianyi/clOpenBLAS , but work on this stopped in 2015.
|
||||
|
||||
### <a name="sandybridge_perf"></a>How about the level 3 BLAS performance on Intel Sandy Bridge?
|
||||
|
||||
We obtained a performance comparable with Intel MKL that actually outperformed Intel MKL in some cases.
|
||||
Here is the result of the DGEMM subroutine's performance on Intel Core i5-2500K Windows 7 SP1 64-bit:
|
||||

|
||||
|
||||
<hr noshade="noshade">
|
||||
|
||||
## OS and Compiler
|
||||
|
||||
### <a name="MSVC"></a>How can I call an OpenBLAS function in Microsoft Visual Studio?
|
||||
|
||||
Please read [this page](install.md#visual-studio).
|
||||
|
||||
### <a name="C99_complex_number"></a>How can I use CBLAS and LAPACKE without C99 complex number support (e.g. in Visual Studio)?
|
||||
|
||||
Zaheer has fixed this bug. You can now use the structure instead of C99 complex numbers. Please read [this issue page](http://github.com/xianyi/OpenBLAS/issues/95) for details.
|
||||
|
||||
[This issue](https://github.com/xianyi/OpenBLAS/issues/305) is for using LAPACKE in Visual Studio.
|
||||
|
||||
### <a name="Linux_SEGFAULT"></a>I get a SEGFAULT with multi-threading on Linux. What's wrong?
|
||||
|
||||
This may be related to a bug in the Linux kernel 2.6.32 (?). Try applying the patch segaults.patch to disable mbind using
|
||||
|
||||
patch < segfaults.patch
|
||||
|
||||
and see if the crashes persist. Note that this patch will lead to many compiler warnings.
|
||||
|
||||
### <a name="xgetbv"></a>When I make the library, there is no such instruction: `xgetbv' error. What's wrong?
|
||||
|
||||
Please use GCC 4.4 and later version. This version supports xgetbv instruction. If you use the library for Sandy Bridge with AVX instructions, you should use GCC 4.6 and later version.
|
||||
|
||||
On Mac OS X, please use Clang 3.1 and later version. For example, make CC=clang
|
||||
|
||||
For the compatibility with old compilers (GCC < 4.4), you can enable NO_AVX flag. For example, make NO_AVX=1
|
||||
|
||||
### <a name="patch_missing"></a>My build fails due to the linker error "multiple definition of `dlamc3_'". What is the problem?
|
||||
|
||||
This linker error occurs if GNU patch is missing or if our patch for LAPACK fails to apply.
|
||||
|
||||
Background: OpenBLAS implements optimized versions of some LAPACK functions, so we need to disable the reference versions. If this process fails we end with duplicated implementations of the same function.
|
||||
|
||||
### <a name="xeigtst"></a>My build worked fine and passed all tests, but running `make lapack-test` ends with segfaults
|
||||
|
||||
Some of the LAPACK tests, notably in xeigtstz, try to allocate around 10MB on the stack. You may need to use
|
||||
`ulimit -s` to change the default limits on your system to allow this.
|
||||
|
||||
### <a name="no_affinity"></a>How could I disable OpenBLAS threading affinity on runtime?
|
||||
|
||||
You can define the OPENBLAS_MAIN_FREE or GOTOBLAS_MAIN_FREE environment variable to disable threading affinity on runtime. For example, before the running,
|
||||
```
|
||||
export OPENBLAS_MAIN_FREE=1
|
||||
```
|
||||
|
||||
Alternatively, you can disable affinity feature with enabling NO_AFFINITY=1 in Makefile.rule.
|
||||
|
||||
### <a name="static_link"></a>How to solve undefined reference errors when statically linking against libopenblas.a
|
||||
|
||||
On Linux, if OpenBLAS was compiled with threading support (`USE_THREAD=1` by default), custom programs statically linked against `libopenblas.a` should also link to the pthread library e.g.:
|
||||
|
||||
```
|
||||
gcc -static -I/opt/OpenBLAS/include -L/opt/OpenBLAS/lib -o my_program my_program.c -lopenblas -lpthread
|
||||
```
|
||||
|
||||
Failing to add the `-lpthread` flag will cause errors such as:
|
||||
|
||||
```
|
||||
/opt/OpenBLAS/libopenblas.a(memory.o): In function `_touch_memory':
|
||||
memory.c:(.text+0x15): undefined reference to `pthread_mutex_lock'
|
||||
memory.c:(.text+0x41): undefined reference to `pthread_mutex_unlock'
|
||||
/opt/OpenBLAS/libopenblas.a(memory.o): In function `openblas_fork_handler':
|
||||
memory.c:(.text+0x440): undefined reference to `pthread_atfork'
|
||||
/opt/OpenBLAS/libopenblas.a(memory.o): In function `blas_memory_alloc':
|
||||
memory.c:(.text+0x7a5): undefined reference to `pthread_mutex_lock'
|
||||
memory.c:(.text+0x825): undefined reference to `pthread_mutex_unlock'
|
||||
/opt/OpenBLAS/libopenblas.a(memory.o): In function `blas_shutdown':
|
||||
memory.c:(.text+0x9e1): undefined reference to `pthread_mutex_lock'
|
||||
memory.c:(.text+0xa6e): undefined reference to `pthread_mutex_unlock'
|
||||
/opt/OpenBLAS/libopenblas.a(blas_server.o): In function `blas_thread_server':
|
||||
blas_server.c:(.text+0x273): undefined reference to `pthread_mutex_lock'
|
||||
blas_server.c:(.text+0x287): undefined reference to `pthread_mutex_unlock'
|
||||
blas_server.c:(.text+0x33f): undefined reference to `pthread_cond_wait'
|
||||
/opt/OpenBLAS/libopenblas.a(blas_server.o): In function `blas_thread_init':
|
||||
blas_server.c:(.text+0x416): undefined reference to `pthread_mutex_lock'
|
||||
blas_server.c:(.text+0x4be): undefined reference to `pthread_mutex_init'
|
||||
blas_server.c:(.text+0x4ca): undefined reference to `pthread_cond_init'
|
||||
blas_server.c:(.text+0x4e0): undefined reference to `pthread_create'
|
||||
blas_server.c:(.text+0x50f): undefined reference to `pthread_mutex_unlock'
|
||||
...
|
||||
```
|
||||
|
||||
The `-lpthread` is not required when linking dynamically against `libopenblas.so.0`.
|
||||
|
||||
### <a name="binutils"></a>Building OpenBLAS for Haswell or Dynamic Arch on RHEL-6, CentOS-6, Rocks-6.1,Scientific Linux 6
|
||||
|
||||
Minimum requirement to actually run AVX2-enabled software like OpenBLAS is kernel-2.6.32-358, shipped with EL6U4 in 2013
|
||||
|
||||
The `binutils` package from RHEL6 does not know the instruction `vpermpd` or any other AVX2 instruction. You can download a newer `binutils` package from Enterprise Linux software collections, following instructions here: <br>
|
||||
https://www.softwarecollections.org/en/scls/rhscl/devtoolset-3/ <br>
|
||||
After configuring repository you need to install devtoolset-?-binutils to get later usable binutils package
|
||||
```
|
||||
$ yum search devtoolset-\?-binutils
|
||||
$ sudo yum install devtoolset-3-binutils
|
||||
```
|
||||
once packages are installed check the correct name for SCL redirection set to enable new version
|
||||
```
|
||||
$ scl --list
|
||||
devtoolset-3
|
||||
rh-python35
|
||||
```
|
||||
Now just prefix your build commands with respective redirection:
|
||||
```
|
||||
$ scl enable devtoolset-3 -- make DYNAMIC_ARCH=1
|
||||
```
|
||||
AVX-512 (SKYLAKEX) support requires devtoolset-8-gcc-gfortran (which exceeds formal requirement for AVX-512 because of packaging issues in earlier packages) which dependency-installs respective binutils and gcc or later and kernel 2.6.32-696 aka 6U9 or 3.10.0-327 aka 7U2 or later to run. In absence of abovementioned toolset OpenBLAS will fall back to AVX2 instructions in place of AVX512 sacrificing some performance on SKYLAKE-X platform.
|
||||
|
||||
### <a name="qemu"></a>Building OpenBLAS in QEMU/KVM/XEN
|
||||
|
||||
By default, QEMU reports the CPU as "QEMU Virtual CPU version 2.2.0", which shares CPUID with existing 32bit CPU even in 64bit virtual machine, and OpenBLAS recognizes it as PENTIUM2. Depending on the exact combination of CPU features the hypervisor choses to expose, this may not correspond to any CPU that exists, and OpenBLAS will error when trying to build. To fix this, pass `-cpu host` or `-cpu passthough` to QEMU, or another CPU model.
|
||||
Similarly, the XEN hypervisor may not pass through all features of the host cpu while reporting the cpu type itself correctly, which can
|
||||
lead to compiler error messages about an "ABI change" when compiling AVX512 code. Again changing the Xen configuration by running e.g.
|
||||
"xen-cmdline --set-xen cpuid=avx512" should get around this (as would building OpenBLAS for an older cpu lacking that particular feature, e.g. TARGET=HASWELL)
|
||||
|
||||
### <a name="ppcxl"></a>Building OpenBLAS on POWER fails with IBM XL
|
||||
|
||||
Trying to compile OpenBLAS with IBM XL ends with error messages about unknown register names
|
||||
like "vs32". Working around these by using known alternate names for the vector registers only leads to another assembler error about unsupported constraints. This is a known deficiency in the IBM compiler at least up to and including 16.1.0 (and in the POWER version of clang, from which it is derived) - use gcc instead. (See issues #1078
|
||||
and #1699 for related discussions)
|
||||
|
||||
### <a name="debianlts"></a>Replacing system BLAS/updating APT OpenBLAS in Mint/Ubuntu/Debian
|
||||
|
||||
Debian and Ubuntu LTS versions provide OpenBLAS package which is not updated after initial release, and under circumstances one might want to use more recent version of OpenBLAS e.g. to get support for newer CPUs
|
||||
|
||||
Ubuntu and Debian provides 'alternatives' mechanism to comfortably replace BLAS and LAPACK libraries systemwide.
|
||||
|
||||
After successful build of OpenBLAS (with DYNAMIC_ARCH set to 1)
|
||||
|
||||
```
|
||||
$ make clean
|
||||
$ make DYNAMIC_ARCH=1
|
||||
$ sudo make DYNAMIC_ARCH=1 install
|
||||
```
|
||||
One can redirect BLAS and LAPACK alternatives to point to source-built OpenBLAS
|
||||
First you have to install NetLib LAPACK reference implementation (to have alternatives to replace):
|
||||
```
|
||||
$ sudo apt install libblas-dev liblapack-dev
|
||||
```
|
||||
Then we can set alternative to our freshly-built library:
|
||||
```
|
||||
$ sudo update-alternatives --install /usr/lib/libblas.so.3 libblas.so.3 /opt/OpenBLAS/lib/libopenblas.so.0 41 \
|
||||
--slave /usr/lib/liblapack.so.3 liblapack.so.3 /opt/OpenBLAS/lib/libopenblas.so.0
|
||||
```
|
||||
Or remove redirection and switch back to APT-provided BLAS implementation order:
|
||||
```
|
||||
$ sudo update-alternatives --remove libblas.so.3 /opt/OpenBLAS/lib/libopenblas.so.0
|
||||
```
|
||||
In recent versions of the distributions, the installation path for the libraries has been changed to include the name of the host architecture, like /usr/lib/x86_64-linux-gnu/blas/libblas.so.3 or libblas.so.3.x86_64-linux-gnu. Use ```$ update-alternatives --display libblas.so.3```
|
||||
to find out what layout your system has.
|
||||
|
||||
### <a name="findblas"></a>I built OpenBLAS for use with some other software, but that software cannot find it
|
||||
|
||||
Openblas installs as a single library named libopenblas.so, while some programs may be searching for a separate libblas.so and liblapack.so so you may need to create appropriate symbolic links (`ln -s libopenblas.so libblas.so;
|
||||
ln -s libopenblas.so liblapack.so`) or copies. Also make sure that the installation location (usually /opt/OpenBLAS/lib or /usr/local/lib) is among the library search paths of your system.
|
||||
|
||||
### <a name="installincludes"></a>I included cblas.h in my program, but the compiler complains about a missing common.h or functions from it
|
||||
|
||||
You probably tried to include a cblas.h that you simply copied from the OpenBLAS source, instead you need to run
|
||||
`make install` after building OpenBLAS and then use the modified cblas.h that this step builds in the installation
|
||||
path (usually either /usr/local/include, /opt/OpenBLAS/include or whatever you specified as PREFIX= on the `make install`)
|
||||
|
||||
### <a name="boundscheck"></a>Compiling OpenBLAS with gcc's -fbounds-check actually triggers aborts in programs
|
||||
|
||||
This is due to different interpretations of the (informal) standard for passing characters as arguments between C and FORTRAN functions. As the method for storing text differs in the two languages, when C calls Fortran the text length is passed as an "invisible" additional parameter.
|
||||
Historically, this has not been required when the text is just a single character, so older code like the Reference-LAPACK bundled with OpenBLAS
|
||||
does not do it. Recently gcc's checking has changed to require it, but there is no consensus yet if and how the existing LAPACK (and many other codebases) should adapt. (And for actual compilation, gcc has mostly backtracked and provided compatibility options - hence the default build settings in the OpenBLAS Makefiles add -fno-optimize-sibling-calls to the gfortran options to prevent miscompilation with "affected" versions. See ticket 2154 in the issue tracker for more details and links)
|
||||
<hr noshade="noshade">
|
||||
|
||||
### <a name="newcpu"></a>Build fails with lots of errors about undefined ?GEMM_UNROLL_M
|
||||
|
||||
Your cpu is apparently too new to be recognized by the build scripts, so they failed to assign appropriate parameters for the block algorithm.
|
||||
Do a `make clean` and try again with TARGET set to one of the cpu models listed in `TargetList.txt` - for x86_64 this will usually be HASWELL.
|
||||
|
||||
### <a name="cmakeosx"></a>CMAKE/OSX: Build fails with 'argument list too long'
|
||||
|
||||
This is a limitation in the maximum length of a command on OSX, coupled with how CMAKE works. You should be able to work around this
|
||||
by adding the option `-DCMAKE_Fortran_USE_RESPONSE_FILE_FOR_OBJECTS=1` to your CMAKE arguments.
|
||||
|
||||
### <a name="xhyve"></a>Likely problems with AVX2 support in Docker Desktop for OSX
|
||||
|
||||
There have been a few reports of wrong calculation results and build-time test failures when building in a container environment managed by the OSX version of Docker Desktop, which uses the xhyve virtualizer underneath. Judging from these reports, AVX2 support in xhyve appears to be subtly broken but a corresponding ticket in the xhyve issue tracker has not drawn any reaction or comment since 2019. Therefore it is strongly recommended to build OpenBLAS with the NO_AVX2=1 option when inside a container under (or for later use with) the Docker Desktop environment on Intel-based Apple hardware.
|
||||
|
||||
## Usage
|
||||
|
||||
### <a name="allocmorebuffers"></a>Program is Terminated. Because you tried to allocate too many memory regions
|
||||
|
||||
In OpenBLAS, we mange a pool of memory buffers and allocate the number of buffers as the following.
|
||||
```
|
||||
#define NUM_BUFFERS (MAX_CPU_NUMBER * 2)
|
||||
```
|
||||
This error indicates that the program exceeded the number of buffers.
|
||||
|
||||
Please build OpenBLAS with larger `NUM_THREADS`. For example, `make NUM_THREADS=32` or `make NUM_THREADS=64`.
|
||||
In `Makefile.system`, we will set `MAX_CPU_NUMBER=NUM_THREADS`.
|
||||
|
||||
### <a name="choose_target_dynamic"></a>How to choose TARGET manually at runtime when compiled with DYNAMIC_ARCH
|
||||
|
||||
The environment variable which control the kernel selection is `OPENBLAS_CORETYPE` (see `driver/others/dynamic.c`)
|
||||
e.g. `export OPENBLAS_CORETYPE=Haswell`. And the function `char* openblas_get_corename()` returns the used target.
|
||||
|
||||
### <a name="missgoto"></a>After updating the installed OpenBLAS, a program complains about "undefined symbol gotoblas"
|
||||
|
||||
This symbol gets defined only when OpenBLAS is built with "make DYNAMIC_ARCH=1" (which is what distributors will choose to ensure support for more than just one CPU type).
|
||||
|
||||
### <a name="buildoptions"></a>How can I find out at runtime what options the library was built with ?
|
||||
|
||||
OpenBLAS has two utility functions that may come in here:
|
||||
|
||||
openblas_get_parallel() will return 0 for a single-threaded library, 1 if multithreading without OpenMP, 2 if built with USE_OPENMP=1
|
||||
|
||||
openblas_get_config() will return a string containing settings such as USE64BITINT or DYNAMIC_ARCH that were active at build time, as well as the target cpu (or in case of a dynamic_arch build, the currently detected one).
|
||||
|
||||
### <a name="wronglibrary"></a>After making OpenBLAS, I find that the static library is multithreaded, but the dynamic one is not ?
|
||||
|
||||
The shared OpenBLAS library you built is probably working fine as well, but your program may be picking up a different (probably single-threaded) version from one of the standard system paths like /usr/lib on startup.
|
||||
Running `ldd /path/to/your/program` will tell you which library the linkage loader will actually use.
|
||||
|
||||
Specifying the "correct" library location with the `-L` flag (like `-L /opt/OpenBLAS/lib`) when linking your program only defines which library will be used to see if all symbols _can_ be resolved, you will need to add an rpath entry to the binary (using `-Wl,rpath=/opt/OpenBLAS/lib`) to make it request searching that location. Alternatively, remove the "wrong old" library (if you can), or set LD_LIBRARY_PATH to the desired location before running your program.
|
||||
|
||||
### <a name="cudahpl"></a>I want to use OpenBLAS with CUDA in the HPL 2.3 benchmark code but it keeps looking for Intel MKL
|
||||
|
||||
You need to edit file src/cuda/cuda_dgemm.c in the NVIDIA version of HPL, change the "handle2" and "handle" dlopen calls to use libopenblas.so instead of libmkl_intel_lp64.so, and add an trailing underscore in the dlsym lines for dgemm_mkl and dtrsm_mkl (like `dgemm_mkl = (void(*)())dlsym(handle, “dgemm_”);`)
|
||||
|
||||
### <a name="cpusoffline"></a>Multithreaded OpenBLAS runs no faster or is even slower than singlethreaded on my ARMV7 board
|
||||
|
||||
The power saving mechanisms of your board may have shut down some cores, making them invisible to OpenBLAS in its startup phase. Try bringing them online before starting your calculation.
|
||||
|
||||
### <a name="biglittle"></a>Speed varies wildly between individual runs on a typical ARMV8 smartphone processor
|
||||
|
||||
Check the technical specifications, it could be that the SoC combines fast and slow cpus and threads can end up on either. In that case, binding the process to specific cores e.g. by setting `OMP_PLACES=cores` may help. (You may need to experiment with OpenMP options, it has been reported that using `OMP_NUM_THREADS=2 OMP_PLACES=cores` caused
|
||||
a huge drop in performance on a 4+4 core chip while `OMP_NUM_THREADS=2 OMP_PLACES=cores(2)` worked as intended - as did OMP_PLACES=cores with 4 threads)
|
||||
|
||||
### <a name="numthreads"></a>I cannot get OpenBLAS to use more than a small subset of available cores on a big system
|
||||
|
||||
Multithreading support in OpenBLAS requires the use of internal buffers for sharing partial results, the number and size of which is defined at compile time. Unless you specify NUM_THREADS in your make or cmake command, the build scripts try to autodetect the number of cores available in your build host to size the library to match. This unfortunately means that if you move the resulting binary from a small "front-end node" to a larger "compute node" later, it will still be limited to the hardware capabilities of the original system. The solution is to set NUM_THREADS to a number big enough to encompass the biggest systems you expect to run the binary on - at runtime, it will scale down the maximum number of threads it uses to match the number of cores physically available.
|
||||
|
||||
### <a name="ELFoffset"></a>Getting "ELF load command address/offset not properly aligned" when loading libopenblas.so
|
||||
|
||||
If you get a message "error while loading shared libraries: libopenblas.so.0: ELF load command address/offset not properly aligned" when starting a program that is (dynamically) linked to OpenBLAS, this is very likely due to a bug in the GNU linker (ld) that is part of the
|
||||
GNU binutils package. This error was specifically observed on older versions of Ubuntu Linux updated with the (at the time) most recent binutils version 2.38, but an internet search turned up sporadic reports involving various other libraries dating back several years. A bugfix was created by the binutils developers and should be available in later versions of binutils.(See issue 3708 for details)
|
||||
|
||||
#### <a name="OpenMP"></a>Using OpenBLAS with OpenMP
|
||||
|
||||
OpenMP provides its own locking mechanisms, so when your code makes BLAS/LAPACK calls from inside OpenMP parallel regions it is imperative
|
||||
that you use an OpenBLAS that is built with USE_OPENMP=1, as otherwise deadlocks might occur. Furthermore, OpenBLAS will automatically restrict itself to using only a single thread when called from an OpenMP parallel region. When it is certain that calls will only occur
|
||||
from the main thread of your program (i.e. outside of omp parallel constructs), a standard pthreads build of OpenBLAS can be used as well. In that case it may be useful to tune the linger behaviour of idle threads in both your OpenMP program (e.g. set OMP_WAIT_POLICY=passive) and OpenBLAS (by redefining the THREAD_TIMEOUT variable at build time, or setting the environment variable OPENBLAS_THREAD_TIMEOUT smaller than the default 26) so that the two alternating thread pools do not unnecessarily hog the cpu during the handover.
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
## Introduction
|
||||
|
||||
OpenBLAS is an optimized Basic Linear Algebra Subprograms (BLAS) library based on [GotoBLAS2](https://www.tacc.utexas.edu/research-development/tacc-software/gotoblas2) 1.13 BSD version.
|
||||
|
||||
OpenBLAS implements low-level routines for performing linear algebra operations such as vector addition, scalar multiplication, dot products, linear combinations, and matrix multiplication. OpenBLAS makes these routines available on multiple platforms, covering server, desktop and mobile operating systems, as well as different architectures including x86, ARM, MIPS, PPC, RISC-V, and zarch.
|
||||
|
||||
The old GotoBLAS documentation can be found [on GitHub](https://github.com/OpenMathLib/OpenBLAS/blob/develop/GotoBLAS_01Readme.txt).
|
||||
|
||||
## License
|
||||
|
||||
OpenBLAS is licensed under the 3-clause BSD license. The full license can be found [on GitHub](https://github.com/OpenMathLib/OpenBLAS/blob/develop/LICENSE).
|
|
@ -0,0 +1,777 @@
|
|||
# Install OpenBLAS
|
||||
|
||||
OpenBLAS can be installed through package managers or from source. If you only
|
||||
want to use OpenBLAS rather than make changes to it, we recommend installing a
|
||||
pre-built binary package with your package manager of choice.
|
||||
|
||||
This page contains an overview of installing with package managers as well as
|
||||
from source. For the latter, see [further down on this page](#building-from-source).
|
||||
|
||||
|
||||
## Installing with a package manager
|
||||
|
||||
!!! note
|
||||
Almost every package manager provides OpenBLAS packages; the list on this
|
||||
page is not comprehensive. If your package manager of choice isn't shown
|
||||
here, please search its package database for `openblas` or `libopenblas`.
|
||||
|
||||
|
||||
### Linux
|
||||
|
||||
On Linux, OpenBLAS can be installed with the system package manager, or with a
|
||||
package manager like [Conda](https://docs.conda.io/en/latest/)
|
||||
(or alternative package managers for the conda-forge ecosystem, like
|
||||
[Mamba](https://mamba.readthedocs.io/en/latest/),
|
||||
[Micromamba](https://mamba.readthedocs.io/en/latest/user_guide/micromamba.html),
|
||||
or [Pixi](https://pixi.sh/latest/#windows-installer)),
|
||||
[Spack](https://spack.io/), or [Nix](https://nixos.org/). For the latter set of
|
||||
tools, the package name in all cases is `openblas`. Since package management in
|
||||
quite a few of these tools is declarative (i.e., managed by adding `openblas`
|
||||
to a metadata file describing the dependencies for your project or
|
||||
environment), we won't attempt to give detailed instructions for these tools here.
|
||||
|
||||
Linux distributions typically split OpenBLAS up in two packages: one containing
|
||||
the library itself (typically named `openblas` or `libopenblas`), and one containing headers,
|
||||
pkg-config and CMake files (typically named the same as the package for the
|
||||
library with `-dev` or `-devel` appended; e.g., `openblas-devel`). Please keep
|
||||
in mind that if you want to install OpenBLAS in order to use it directly in
|
||||
your own project, you will need to install both of those packages.
|
||||
|
||||
Distro-specific installation commands:
|
||||
|
||||
=== "Debian/Ubuntu/Mint/Kali"
|
||||
|
||||
```bash
|
||||
$ sudo apt update
|
||||
$ sudo apt install libopenblas-dev
|
||||
```
|
||||
OpenBLAS can be configured as the default BLAS through the `update-alternatives` mechanism:
|
||||
|
||||
```bash
|
||||
$ sudo update-alternatives --config libblas.so.3
|
||||
```
|
||||
|
||||
=== "openSUSE/SLE"
|
||||
|
||||
```bash
|
||||
$ sudo zypper refresh
|
||||
$ sudo zypper install openblas-devel
|
||||
```
|
||||
|
||||
OpenBLAS can be configured as the default BLAS through the `update-alternatives` mechanism:
|
||||
```bash
|
||||
$ sudo update-alternatives --config libblas.so.3
|
||||
```
|
||||
|
||||
=== "Fedora/CentOS/RHEL"
|
||||
|
||||
```bash
|
||||
$ dnf check-update
|
||||
$ dnf install openblas-devel
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
Fedora does not ship the pkg-config files for OpenBLAS. Instead, it wants you to
|
||||
link against [FlexiBLAS](https://www.mpi-magdeburg.mpg.de/projects/flexiblas) (which
|
||||
uses OpenBLAS by default as its backend on Fedora), which you can install with:
|
||||
|
||||
```bash
|
||||
$ dnf install flexiblas-devel
|
||||
```
|
||||
|
||||
For CentOS and RHEL, OpenBLAS packages are provided via the [Fedora EPEL repository](https://fedoraproject.org/wiki/EPEL).
|
||||
After adding that repository and its repository keys, you can install
|
||||
`openblas-devel` with either `dnf` or `yum`.
|
||||
|
||||
=== "Arch/Manjaro/Antergos"
|
||||
|
||||
```bash
|
||||
$ sudo pacman -S openblas
|
||||
```
|
||||
|
||||
|
||||
### Windows
|
||||
|
||||
=== "Conda-forge"
|
||||
|
||||
OpenBLAS can be installed with `conda` (or `mamba`, `micromamba`, or
|
||||
`pixi`) from conda-forge:
|
||||
```
|
||||
conda install openblas
|
||||
```
|
||||
|
||||
Conda-forge provides a method for switching the default BLAS implementation
|
||||
used by all packages. To use that for OpenBLAS, install `libblas=*=*openblas`
|
||||
(see [the docs on this mechanism](https://conda-forge.org/docs/maintainer/knowledge_base/#switching-blas-implementation)
|
||||
for more details).
|
||||
|
||||
=== "vcpkg"
|
||||
|
||||
OpenBLAS can be installed with vcpkg:
|
||||
```cmd
|
||||
# In classic mode:
|
||||
vcpkg install openblas
|
||||
|
||||
# Or in manifest mode:
|
||||
vcpkg add port openblas
|
||||
```
|
||||
|
||||
=== "OpenBLAS releases"
|
||||
|
||||
Windows is the only platform for which binaries are made available by the
|
||||
OpenBLAS project itself. They can be downloaded from the GitHub
|
||||
Releases](https://github.com/OpenMathLib/OpenBLAS/releases) page. These
|
||||
binaries are built with MinGW, using the following build options:
|
||||
```
|
||||
NUM_THREADS=64 TARGET=GENERIC DYNAMIC_ARCH=1 DYNAMIC_OLDER=1 CONSISTENT_FPCSR=1 INTERFACE=0
|
||||
```
|
||||
There are separate packages for x86-64 and x86. The zip archive contains
|
||||
the include files, static and shared libraries, as well as configuration
|
||||
files for getting them found via CMake or pkg-config. To use these
|
||||
binaries, create a suitable folder for your OpenBLAS installation and unzip
|
||||
the `.zip` bundle there (note that you will need to edit the provided
|
||||
`openblas.pc` and `OpenBLASConfig.cmake` to reflect the installation path
|
||||
on your computer, as distributed they have "win" or "win64" reflecting the
|
||||
local paths on the system they were built on).
|
||||
|
||||
Note that the same binaries can be downloaded
|
||||
[from SourceForge](http://sourceforge.net/projects/openblas/files); this is
|
||||
mostly of historical interest.
|
||||
|
||||
|
||||
### macOS
|
||||
|
||||
To install OpenBLAS with a package manager on macOS, run:
|
||||
|
||||
=== "Homebrew"
|
||||
|
||||
```zsh
|
||||
% brew install openblas
|
||||
```
|
||||
|
||||
=== "MacPorts"
|
||||
|
||||
```zsh
|
||||
% sudo port install OpenBLAS-devel
|
||||
```
|
||||
|
||||
=== "Conda-forge"
|
||||
|
||||
```zsh
|
||||
% conda install openblas
|
||||
```
|
||||
|
||||
Conda-forge provides a method for switching the default BLAS implementation
|
||||
used by all packages. To use that for OpenBLAS, install `libblas=*=*openblas`
|
||||
(see [the docs on this mechanism](https://conda-forge.org/docs/maintainer/knowledge_base/#switching-blas-implementation)
|
||||
for more details).
|
||||
|
||||
|
||||
### FreeBSD
|
||||
|
||||
You can install OpenBLAS from the FreeBSD [Ports collection](https://www.freebsd.org/ports/index.html):
|
||||
```
|
||||
pkg install openblas
|
||||
```
|
||||
|
||||
|
||||
## Building from source
|
||||
|
||||
We recommend download the latest [stable version](https://github.com/OpenMathLib/OpenBLAS/releases)
|
||||
from the GitHub Releases page, or checking it out from a git tag, rather than a
|
||||
dev version from the `develop` branch.
|
||||
|
||||
!!! tip
|
||||
|
||||
The User manual contains [a section with detailed information on compiling OpenBLAS](user_manual.md#compiling-openblas),
|
||||
including how to customize builds and how to cross-compile. Please read
|
||||
that documentation first. This page contains only platform-specific build
|
||||
information, and assumes you already understand the general build system
|
||||
invocations to build OpenBLAS, with the specific build options you want to
|
||||
control multi-threading and other non-platform-specific behavior).
|
||||
|
||||
|
||||
### Linux and macOS
|
||||
|
||||
Ensure you have C and Fortran compilers installed, then simply type `make` to compile the library.
|
||||
There are no other build dependencies, nor unusual platform-specific
|
||||
environment variables to set or other system setup to do.
|
||||
|
||||
!!! note
|
||||
|
||||
When building in an emulator (KVM, QEMU, etc.), please make sure that the combination of CPU features exposed to
|
||||
the virtual environment matches that of an existing CPU to allow detection of the CPU model to succeed.
|
||||
(With `qemu`, this can be done by passing `-cpu host` or a supported model name at invocation).
|
||||
|
||||
|
||||
### Windows
|
||||
|
||||
We support building OpenBLAS with either MinGW or Visual Studio on Windows.
|
||||
Using MSVC will yield an OpenBLAS build with the Windows platform-native ABI.
|
||||
Using MinGW will yield a different ABI. We'll describe both methods in detail
|
||||
in this section, since the process for each is quite different.
|
||||
|
||||
#### Visual Studio & native Windows ABI
|
||||
|
||||
For Visual Studio, you can use CMake to generate Visual Studio solution files;
|
||||
note that you will need at least CMake 3.11 for linking to work correctly).
|
||||
|
||||
Note that you need a Fortran compiler if you plan to build and use the LAPACK
|
||||
functions included with OpenBLAS. The sections below describe using either
|
||||
`flang` as an add-on to clang/LLVM or `gfortran` as part of MinGW for this
|
||||
purpose. If you want to use the Intel Fortran compiler (`ifort` or `ifx`) for
|
||||
this, be sure to also use the Intel C compiler (`icc` or `icx`) for building
|
||||
the C parts, as the ABI imposed by `ifort` is incompatible with MSVC
|
||||
|
||||
A fully-optimized OpenBLAS that can be statically or dynamically linked to your
|
||||
application can currently be built for the 64-bit architecture with the LLVM
|
||||
compiler infrastructure. We're going to use [Miniconda3](https://docs.anaconda.com/miniconda/)
|
||||
to grab all of the tools we need, since some of them are in an experimental
|
||||
status. Before you begin, you'll need to have Microsoft Visual Studio 2015 or
|
||||
newer installed.
|
||||
|
||||
1. Install Miniconda3 for 64-bit Windows using `winget install --id Anaconda.Miniconda3`,
|
||||
or easily download from [conda.io](https://docs.conda.io/en/latest/miniconda.html).
|
||||
2. Open the "Anaconda Command Prompt" now available in the Start Menu, or at `%USERPROFILE%\miniconda3\shell\condabin\conda-hook.ps1`.
|
||||
3. In that command prompt window, use `cd` to change to the directory where you want to build OpenBLAS.
|
||||
4. Now install all of the tools we need:
|
||||
```
|
||||
conda update -n base conda
|
||||
conda config --add channels conda-forge
|
||||
conda install -y cmake flang clangdev perl libflang ninja
|
||||
```
|
||||
5. Still in the Anaconda Command Prompt window, activate the 64-bit MSVC environment with `vcvarsall x64`.
|
||||
On Windows 11 with Visual Studio 2022, this would be done by invoking:
|
||||
|
||||
```shell
|
||||
"c:\Program Files\Microsoft Visual Studio\2022\Community\vc\Auxiliary\Build\vcvars64.bat"
|
||||
```
|
||||
|
||||
With VS2019, the command should be the same (except for the year number of course).
|
||||
For other versions of MSVC, please check the Visual Studio documentation for
|
||||
exactly how to invoke the `vcvars64.bat` script.
|
||||
|
||||
Confirm that the environment is active by typing `link`. This should return
|
||||
a long list of possible options for the `link` command. If it just returns
|
||||
_"command not found"_ or similar, review and retype the call to `vcvars64.bat`.
|
||||
|
||||
!!! note
|
||||
|
||||
if you are working from a Visual Studio command prompt window instead
|
||||
(so that you do not have to do the `vcvars` call), you need to invoke
|
||||
`conda activate` so that `CONDA_PREFIX` etc. get set up correctly before
|
||||
proceeding to step 6. Failing to do so will lead to link errors like
|
||||
`libflangmain.lib` not getting found later in the build.
|
||||
|
||||
6. Now configure the project with CMake. Starting in the project directory, execute the following:
|
||||
```
|
||||
set "LIB=%CONDA_PREFIX%\Library\lib;%LIB%"
|
||||
set "CPATH=%CONDA_PREFIX%\Library\include;%CPATH%"
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -G "Ninja" -DCMAKE_CXX_COMPILER=clang-cl -DCMAKE_C_COMPILER=clang-cl -DCMAKE_Fortran_COMPILER=flang -DCMAKE_MT=mt -DBUILD_WITHOUT_LAPACK=no -DNOFORTRAN=0 -DDYNAMIC_ARCH=ON -DCMAKE_BUILD_TYPE=Release
|
||||
```
|
||||
|
||||
You may want to add further options in the `cmake` command here. For
|
||||
instance, the default only produces a static `.lib` version of the library.
|
||||
If you would rather have a DLL, add `-DBUILD_SHARED_LIBS=ON` above. Note that
|
||||
this step only creates some command files and directories, the actual build
|
||||
happens next.
|
||||
|
||||
7. Build the project:
|
||||
|
||||
```
|
||||
cmake --build . --config Release
|
||||
```
|
||||
This step will create the OpenBLAS library in the `lib` directory, and
|
||||
various build-time tests in the `test`, `ctest` and `openblas_utest`
|
||||
directories. However it will not separate the header files you might need
|
||||
for building your own programs from those used internally. To put all
|
||||
relevant files in a more convenient arrangement, run the next step.
|
||||
|
||||
8. Install all relevant files created by the build:
|
||||
|
||||
```
|
||||
cmake --install . --prefix c:\opt -v
|
||||
```
|
||||
This will copy all files that are needed for building and running your own
|
||||
programs with OpenBLAS to the given location, creating appropriate
|
||||
subdirectories for the individual kinds of files. In the case of `C:\opt` as
|
||||
given above, this would be:
|
||||
|
||||
- `C:\opt\include\openblas` for the header files,
|
||||
- `C:\opt\bin` for the `libopenblas.dll` shared library,
|
||||
- `C:\opt\lib` for the static library, and
|
||||
- `C:\opt\share` holds various support files that enable other cmake-based
|
||||
build scripts to find OpenBLAS automatically.
|
||||
|
||||
|
||||
!!! tip "Change in complex types for Visual Studio 2017 and up"
|
||||
|
||||
In newer Visual Studio versions, Microsoft has changed
|
||||
[how it handles complex types](https://docs.microsoft.com/en-us/cpp/c-runtime-library/complex-math-support?view=msvc-170#types-used-in-complex-math).
|
||||
Even when using a precompiled version of OpenBLAS, you might need to define
|
||||
`LAPACK_COMPLEX_CUSTOM` in order to define complex types properly for MSVC.
|
||||
For example, some variant of the following might help:
|
||||
|
||||
```c
|
||||
#if defined(_MSC_VER)
|
||||
#include <complex.h>
|
||||
#define LAPACK_COMPLEX_CUSTOM
|
||||
#define lapack_complex_float _Fcomplex
|
||||
#define lapack_complex_double _Dcomplex
|
||||
#endif
|
||||
```
|
||||
|
||||
For reference, see
|
||||
[openblas#3661](https://github.com/OpenMathLib/OpenBLAS/issues/3661),
|
||||
[lapack#683](https://github.com/Reference-LAPACK/lapack/issues/683), and
|
||||
[this Stack Overflow question](https://stackoverflow.com/questions/47520244/using-openblas-lapacke-in-visual-studio).
|
||||
|
||||
|
||||
!!! warning "Building 32-bit binaries with MSVC"
|
||||
|
||||
This method may produce binaries which demonstrate significantly lower
|
||||
performance than those built with the other methods. The Visual Studio
|
||||
compiler does not support the dialect of assembly used in the cpu-specific
|
||||
optimized files, so only the "generic" `TARGET` which is written in pure C
|
||||
will get built. For the same reason it is not possible (and not necessary)
|
||||
to use `-DDYNAMIC_ARCH=ON` in a Visual Studio build. You may consider
|
||||
building for the 32-bit architecture using the GNU (MinGW) ABI instead.
|
||||
|
||||
##### CMake & Visual Studio integration
|
||||
|
||||
To generate Visual Studio solution files, ensure CMake is installed and then run:
|
||||
```
|
||||
# Do this from Powershell so cmake can find visual studio
|
||||
cmake -G "Visual Studio 14 Win64" -DCMAKE_BUILD_TYPE=Release .
|
||||
```
|
||||
|
||||
To then build OpenBLAS using those solution files from within Visual Studio, we
|
||||
also need Perl. Please install it and ensure it's on the `PATH` (see, e.g.,
|
||||
[this Stack Overflow question for how](http://stackoverflow.com/questions/3051049/active-perl-installation-on-windows-operating-system)).
|
||||
|
||||
If you build from within Visual Studio, the dependencies may not be
|
||||
automatically configured: if you try to build `libopenblas` directly, it may
|
||||
fail with a message saying that some `.obj` files aren't found. If this
|
||||
happens, you can work around the problem by building the projects that
|
||||
`libopenblas` depends on before building `libopenblas` itself.
|
||||
|
||||
###### Build OpenBLAS for Universal Windows Platform
|
||||
|
||||
OpenBLAS can be built targeting [Universal Windows Platform](https://en.wikipedia.org/wiki/Universal_Windows_Platform)
|
||||
(UWP) like this:
|
||||
|
||||
1. Follow the steps above to build the Visual Studio solution files for
|
||||
Windows. This builds the helper executables which are required when building
|
||||
the OpenBLAS Visual Studio solution files for UWP in step 2.
|
||||
2. Remove the generated `CMakeCache.txt` and the `CMakeFiles` directory from
|
||||
the OpenBLAS source directory, then re-run CMake with the following options:
|
||||
|
||||
```
|
||||
# do this to build UWP compatible solution files
|
||||
cmake -G "Visual Studio 14 Win64" -DCMAKE_SYSTEM_NAME=WindowsStore -DCMAKE_SYSTEM_VERSION="10.0" -DCMAKE_SYSTEM_PROCESSOR=AMD64 -DVS_WINRT_COMPONENT=TRUE -DCMAKE_BUILD_TYPE=Release .
|
||||
```
|
||||
3. Now build the solution with Visual Studio.
|
||||
|
||||
|
||||
#### MinGW & GNU ABI
|
||||
|
||||
!!! note
|
||||
|
||||
The resulting library from building with MinGW as described below can be
|
||||
used in Visual Studio, but it can only be linked dynamically. This
|
||||
configuration has not been thoroughly tested and should be considered
|
||||
experimental.
|
||||
|
||||
|
||||
To build OpenBLAS on Windows with MinGW:
|
||||
|
||||
1. Install the MinGW (GCC) compiler suite, either the 32-bit
|
||||
[MinGW]((http://www.mingw.org/) or the 64-bit
|
||||
[MinGW-w64](http://mingw-w64.sourceforge.net/) toolchain. Be sure to install
|
||||
its `gfortran` package as well (unless you really want to build the BLAS part
|
||||
of OpenBLAS only) and check that `gcc` and `gfortran` are the same version.
|
||||
In addition, please install MSYS2 with MinGW.
|
||||
2. Build OpenBLAS in the MSYS2 shell. Usually, you can just type `make`.
|
||||
OpenBLAS will detect the compiler and CPU automatically.
|
||||
3. After the build is complete, OpenBLAS will generate the static library
|
||||
`libopenblas.a` and the shared library `libopenblas.dll` in the folder. You
|
||||
can type `make PREFIX=/your/installation/path install` to install the
|
||||
library to a certain location.
|
||||
|
||||
Note that OpenBLAS will generate the import library `libopenblas.dll.a` for
|
||||
`libopenblas.dll` by default.
|
||||
|
||||
If you want to generate Windows-native PDB files from a MinGW build, you can
|
||||
use the [cv2pdb](https://github.com/rainers/cv2pdb) tool to do so.
|
||||
|
||||
To then use the built OpenBLAS shared library in Visual Studio:
|
||||
|
||||
1. Copy the import library (`OPENBLAS_TOP_DIR/libopenblas.dll.a`) and the
|
||||
shared library (`libopenblas.dll`) into the same folder (this must be the
|
||||
folder of your project that is going to use the BLAS library. You may need
|
||||
to add `libopenblas.dll.a` to the linker input list: `properties->Linker->Input`).
|
||||
2. Please follow the Visual Studio documentation about using third-party .dll
|
||||
libraries, and make sure to link against a library for the correct
|
||||
architecture.[^1]
|
||||
3. If you need CBLAS, you should include `cblas.h` in
|
||||
`/your/installation/path/include` in Visual Studio. Please see
|
||||
[openblas#95](http://github.com/OpenMathLib/OpenBLAS/issues/95) for more details.
|
||||
|
||||
[^1]:
|
||||
If the OpenBLAS DLLs are not linked correctly, you may see an error like
|
||||
_"The application was unable to start correctly (0xc000007b)"_, which typically
|
||||
indicates a mismatch between 32-bit and 64-bit libraries.
|
||||
|
||||
!!! info "Limitations of using the MinGW build within Visual Studio"
|
||||
|
||||
- Both static and dynamic linking are supported with MinGW. With Visual
|
||||
Studio, however, only dynamic linking is supported and so you should use
|
||||
the import library.
|
||||
- Debugging from Visual Studio does not work because MinGW and Visual
|
||||
Studio have incompatible formats for debug information (PDB vs.
|
||||
DWARF/STABS). You should either debug with GDB on the command line or
|
||||
with a visual frontend, for instance [Eclipse](http://www.eclipse.org/cdt/) or
|
||||
[Qt Creator](http://qt.nokia.com/products/developer-tools/).
|
||||
|
||||
|
||||
#### Windows on Arm
|
||||
|
||||
The following tools needs to be installed to build for Windows on Arm (WoA):
|
||||
|
||||
- Clang for Windows on Arm.
|
||||
Find the latest LLVM build for WoA from [LLVM release page](https://releases.llvm.org/).
|
||||
E.g: LLVM 12 build for WoA64 can be found [here](https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.0/LLVM-12.0.0-woa64.exe)
|
||||
Run the LLVM installer and ensure that LLVM is added to environment PATH.
|
||||
- Download and install classic Flang for Windows on Arm.
|
||||
Classic Flang is the only available Fortran compiler for Windows on Arm for now.
|
||||
A pre-release build can be found [here](https://github.com/kaadam/flang/releases/tag/v0.1)
|
||||
There is no installer for classic flang and the zip package can be
|
||||
extracted and the path needs to be added to environment `PATH`.
|
||||
E.g., in PowerShell:
|
||||
```
|
||||
$env:Path += ";C:\flang_woa\bin"
|
||||
```
|
||||
|
||||
The following steps describe how to build the static library for OpenBLAS with and without LAPACK:
|
||||
|
||||
1. Build OpenBLAS static library with BLAS and LAPACK routines with Make:
|
||||
|
||||
```bash
|
||||
$ make CC="clang-cl" HOSTCC="clang-cl" AR="llvm-ar" BUILD_WITHOUT_LAPACK=0 NOFORTRAN=0 DYNAMIC_ARCH=0 TARGET=ARMV8 ARCH=arm64 BINARY=64 USE_OPENMP=0 PARALLEL=1 RANLIB="llvm-ranlib" MAKE=make F_COMPILER=FLANG FC=FLANG FFLAGS_NOOPT="-march=armv8-a -cpp" FFLAGS="-march=armv8-a -cpp" NEED_PIC=0 HOSTARCH=arm64 libs netlib
|
||||
```
|
||||
|
||||
2. Build static library with BLAS routines using CMake:
|
||||
|
||||
Classic Flang has compatibility issues with CMake, hence only BLAS routines can be compiled with CMake:
|
||||
|
||||
```bash
|
||||
$ mkdir build
|
||||
$ cd build
|
||||
$ cmake .. -G Ninja -DCMAKE_C_COMPILER=clang -DBUILD_WITHOUT_LAPACK=1 -DNOFORTRAN=1 -DDYNAMIC_ARCH=0 -DTARGET=ARMV8 -DARCH=arm64 -DBINARY=64 -DUSE_OPENMP=0 -DCMAKE_SYSTEM_PROCESSOR=ARM64 -DCMAKE_CROSSCOMPILING=1 -DCMAKE_SYSTEM_NAME=Windows
|
||||
$ cmake --build . --config Release
|
||||
```
|
||||
|
||||
!!! tip "`getarch.exe` execution error"
|
||||
|
||||
If you notice that platform-specific headers by `getarch.exe` are not
|
||||
generated correctly, this could be due to a known debug runtime DLL issue for
|
||||
arm64 platforms. Please check out [this page](https://linaro.atlassian.net/wiki/spaces/WOAR/pages/28677636097/Debug+run-time+DLL+issue#Workaround)
|
||||
for a workaround.
|
||||
|
||||
|
||||
#### Generating an import library
|
||||
|
||||
Microsoft Windows has this thing called "import libraries". You need it for
|
||||
MSVC; you don't need it for MinGW because the `ld` linker is smart enough -
|
||||
however, you may still want it for some reason, so we'll describe the process
|
||||
for both MSVC and MinGW.
|
||||
|
||||
Import libraries are compiled from a list of what symbols to use, which are
|
||||
contained in a `.def` file. A `.def` file should be already be present in the
|
||||
`exports` directory under the top-level OpenBLAS directory after you've run a build.
|
||||
In your shell, move to this directory: `cd exports`.
|
||||
|
||||
=== "MSVC"
|
||||
|
||||
Unlike MinGW, MSVC absolutely requires an import library. Now the C ABI of
|
||||
MSVC and MinGW are actually identical, so linking is actually okay (any
|
||||
incompatibility in the C ABI would be a bug).
|
||||
|
||||
The import libraries of MSVC have the suffix `.lib`. They are generated
|
||||
from a `.def` file using MSVC's `lib.exe`. See [the MSVC instructions](use_visual_studio.md#generate-import-library-before-0210-version).
|
||||
|
||||
=== "MinGW"
|
||||
|
||||
MinGW import libraries have the suffix `.a`, just like static libraries.
|
||||
Our goal is to produce the file `libopenblas.dll.a`.
|
||||
|
||||
You need to first insert a line `LIBRARY libopenblas.dll` in `libopenblas.def`:
|
||||
```
|
||||
cat <(echo "LIBRARY libopenblas.dll") libopenblas.def > libopenblas.def.1
|
||||
mv libopenblas.def.1 libopenblas.def
|
||||
```
|
||||
|
||||
Now the `.def` file probably looks like:
|
||||
```
|
||||
LIBRARY libopenblas.dll
|
||||
EXPORTS
|
||||
caxpy=caxpy_ @1
|
||||
caxpy_=caxpy_ @2
|
||||
...
|
||||
```
|
||||
Then, generate the import library: `dlltool -d libopenblas.def -l libopenblas.dll.a`
|
||||
|
||||
_Again, there is basically **no point** in making an import library for use in MinGW. It actually slows down linking._
|
||||
|
||||
|
||||
### Android
|
||||
|
||||
To build OpenBLAS for Android, you will need the following tools installed on your machine:
|
||||
|
||||
- [The Android NDK](https://developer.android.com/ndk/)
|
||||
- Perl
|
||||
- Clang compiler on the build machine
|
||||
|
||||
The next two sections below describe how to build with Clang for ARMV7 and
|
||||
ARMV8 targets, respectively. The same basic principles as described below for
|
||||
ARMV8 should also apply to building an x86 or x86-64 version (substitute
|
||||
something like `NEHALEM` for the target instead of `ARMV8`, and replace all the
|
||||
`aarch64` in the toolchain paths with `x86` or `x96_64` as appropriate).
|
||||
|
||||
!!! info "Historic note"
|
||||
|
||||
Since NDK version 19, the default toolchain is provided as a standalone
|
||||
toolchain, so building one yourself following
|
||||
[building a standalone toolchain](http://developer.android.com/ndk/guides/standalone_toolchain.html)
|
||||
should no longer be necessary.
|
||||
|
||||
|
||||
#### Building for ARMV7
|
||||
|
||||
```bash
|
||||
# Set path to ndk-bundle
|
||||
export NDK_BUNDLE_DIR=/path/to/ndk-bundle
|
||||
|
||||
# Set the PATH to contain paths to clang and arm-linux-androideabi-* utilities
|
||||
export PATH=${NDK_BUNDLE_DIR}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin:${NDK_BUNDLE_DIR}/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
|
||||
|
||||
# Set LDFLAGS so that the linker finds the appropriate libgcc
|
||||
export LDFLAGS="-L${NDK_BUNDLE_DIR}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/lib/gcc/arm-linux-androideabi/4.9.x"
|
||||
|
||||
# Set the clang cross compile flags
|
||||
export CLANG_FLAGS="-target arm-linux-androideabi -marm -mfpu=vfp -mfloat-abi=softfp --sysroot ${NDK_BUNDLE_DIR}/platforms/android-23/arch-arm -gcc-toolchain ${NDK_BUNDLE_DIR}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/"
|
||||
|
||||
#OpenBLAS Compile
|
||||
make TARGET=ARMV7 ONLY_CBLAS=1 AR=ar CC="clang ${CLANG_FLAGS}" HOSTCC=gcc ARM_SOFTFP_ABI=1 -j4
|
||||
```
|
||||
|
||||
On macOS, it may also be necessary to give the complete path to the `ar`
|
||||
utility in the make command above, like so:
|
||||
```bash
|
||||
AR=${NDK_BUNDLE_DIR}/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-gcc-ar
|
||||
```
|
||||
otherwise you may get a linker error complaining like `malformed archive header
|
||||
name at 8` when the native macOS `ar` command was invoked instead.
|
||||
|
||||
|
||||
#### Building for ARMV8
|
||||
|
||||
```bash
|
||||
# Set path to ndk-bundle
|
||||
export NDK_BUNDLE_DIR=/path/to/ndk-bundle/
|
||||
|
||||
# Export PATH to contain directories of clang and aarch64-linux-android-* utilities
|
||||
export PATH=${NDK_BUNDLE_DIR}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/bin/:${NDK_BUNDLE_DIR}/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
|
||||
|
||||
# Setup LDFLAGS so that loader can find libgcc and pass -lm for sqrt
|
||||
export LDFLAGS="-L${NDK_BUNDLE_DIR}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/lib/gcc/aarch64-linux-android/4.9.x -lm"
|
||||
|
||||
# Setup the clang cross compile options
|
||||
export CLANG_FLAGS="-target aarch64-linux-android --sysroot ${NDK_BUNDLE_DIR}/platforms/android-23/arch-arm64 -gcc-toolchain ${NDK_BUNDLE_DIR}/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/"
|
||||
|
||||
# Compile
|
||||
make TARGET=ARMV8 ONLY_CBLAS=1 AR=ar CC="clang ${CLANG_FLAGS}" HOSTCC=gcc -j4
|
||||
```
|
||||
Note: using `TARGET=CORTEXA57` in place of `ARMV8` will pick up better
|
||||
optimized routines. Implementations for the `CORTEXA57` target are compatible
|
||||
with all other `ARMV8` targets.
|
||||
|
||||
Note: for NDK 23b, something as simple as:
|
||||
```bash
|
||||
export PATH=/opt/android-ndk-r23b/toolchains/llvm/prebuilt/linux-x86_64/bin/:$PATH
|
||||
make HOSTCC=gcc CC=/opt/android-ndk-r23b/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android31-clang ONLY_CBLAS=1 TARGET=ARMV8
|
||||
```
|
||||
appears to be sufficient on Linux.
|
||||
|
||||
|
||||
??? note "Alternative build script for 3 architectures"
|
||||
|
||||
This script will build OpenBLAS for 3 architecture (`ARMV7`, `ARMV8`, `X86`) and install them to `/opt/OpenBLAS/lib`.
|
||||
It was tested on macOS with NDK version 21.3.6528147.
|
||||
|
||||
```bash
|
||||
export NDK=YOUR_PATH_TO_SDK/Android/sdk/ndk/21.3.6528147
|
||||
export TOOLCHAIN=$NDK/toolchains/llvm/prebuilt/darwin-x86_64
|
||||
|
||||
make clean
|
||||
make \
|
||||
TARGET=ARMV7 \
|
||||
ONLY_CBLAS=1 \
|
||||
CC="$TOOLCHAIN"/bin/armv7a-linux-androideabi21-clang \
|
||||
AR="$TOOLCHAIN"/bin/arm-linux-androideabi-ar \
|
||||
HOSTCC=gcc \
|
||||
ARM_SOFTFP_ABI=1 \
|
||||
-j4
|
||||
sudo make install
|
||||
|
||||
make clean
|
||||
make \
|
||||
TARGET=CORTEXA57 \
|
||||
ONLY_CBLAS=1 \
|
||||
CC=$TOOLCHAIN/bin/aarch64-linux-android21-clang \
|
||||
AR=$TOOLCHAIN/bin/aarch64-linux-android-ar \
|
||||
HOSTCC=gcc \
|
||||
-j4
|
||||
sudo make install
|
||||
|
||||
make clean
|
||||
make \
|
||||
TARGET=ATOM \
|
||||
ONLY_CBLAS=1 \
|
||||
CC="$TOOLCHAIN"/bin/i686-linux-android21-clang \
|
||||
AR="$TOOLCHAIN"/bin/i686-linux-android-ar \
|
||||
HOSTCC=gcc \
|
||||
ARM_SOFTFP_ABI=1 \
|
||||
-j4
|
||||
sudo make install
|
||||
|
||||
## This will build for x86_64
|
||||
make clean
|
||||
make \
|
||||
TARGET=ATOM BINARY=64\
|
||||
ONLY_CBLAS=1 \
|
||||
CC="$TOOLCHAIN"/bin/x86_64-linux-android21-clang \
|
||||
AR="$TOOLCHAIN"/bin/x86_64-linux-android-ar \
|
||||
HOSTCC=gcc \
|
||||
ARM_SOFTFP_ABI=1 \
|
||||
-j4
|
||||
sudo make install
|
||||
```
|
||||
You can find full list of target architectures in [TargetList.txt](https://github.com/OpenMathLib/OpenBLAS/blob/develop/TargetList.txt)
|
||||
|
||||
|
||||
### iPhone/iOS
|
||||
|
||||
As none of the current developers uses iOS, the following instructions are what
|
||||
was found to work in our Azure CI setup, but as far as we know this builds a
|
||||
fully working OpenBLAS for this platform.
|
||||
|
||||
Go to the directory where you unpacked OpenBLAS,and enter the following commands:
|
||||
```bash
|
||||
CC=/Applications/Xcode_12.4.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
|
||||
CFLAGS= -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_12.4.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS14.4.sdk -arch arm64 -miphoneos-version-min=10.0
|
||||
|
||||
make TARGET=ARMV8 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1
|
||||
```
|
||||
Adjust `MIN_IOS_VERSION` as necessary for your installation. E.g., change the version number
|
||||
to the minimum iOS version you want to target and execute this file to build the library.
|
||||
|
||||
|
||||
### MIPS
|
||||
|
||||
For MIPS targets you will need latest toolchains:
|
||||
|
||||
- P5600 - MTI GNU/Linux Toolchain
|
||||
- I6400, P6600 - IMG GNU/Linux Toolchain
|
||||
|
||||
You can use following commandlines for builds:
|
||||
|
||||
```bash
|
||||
IMG_TOOLCHAIN_DIR={full IMG GNU/Linux Toolchain path including "bin" directory -- for example, /opt/linux_toolchain/bin}
|
||||
IMG_GCC_PREFIX=mips-img-linux-gnu
|
||||
IMG_TOOLCHAIN=${IMG_TOOLCHAIN_DIR}/${IMG_GCC_PREFIX}
|
||||
|
||||
# I6400 Build (n32):
|
||||
make BINARY=32 BINARY32=1 CC=$IMG_TOOLCHAIN-gcc AR=$IMG_TOOLCHAIN-ar FC="$IMG_TOOLCHAIN-gfortran -EL -mabi=n32" RANLIB=$IMG_TOOLCHAIN-ranlib HOSTCC=gcc CFLAGS="-EL" FFLAGS=$CFLAGS LDFLAGS=$CFLAGS TARGET=I6400
|
||||
|
||||
# I6400 Build (n64):
|
||||
make BINARY=64 BINARY64=1 CC=$IMG_TOOLCHAIN-gcc AR=$IMG_TOOLCHAIN-ar FC="$IMG_TOOLCHAIN-gfortran -EL" RANLIB=$IMG_TOOLCHAIN-ranlib HOSTCC=gcc CFLAGS="-EL" FFLAGS=$CFLAGS LDFLAGS=$CFLAGS TARGET=I6400
|
||||
|
||||
# P6600 Build (n32):
|
||||
make BINARY=32 BINARY32=1 CC=$IMG_TOOLCHAIN-gcc AR=$IMG_TOOLCHAIN-ar FC="$IMG_TOOLCHAIN-gfortran -EL -mabi=n32" RANLIB=$IMG_TOOLCHAIN-ranlib HOSTCC=gcc CFLAGS="-EL" FFLAGS=$CFLAGS LDFLAGS=$CFLAGS TARGET=P6600
|
||||
|
||||
# P6600 Build (n64):
|
||||
make BINARY=64 BINARY64=1 CC=$IMG_TOOLCHAIN-gcc AR=$IMG_TOOLCHAIN-ar FC="$IMG_TOOLCHAIN-gfortran -EL" RANLIB=$IMG_TOOLCHAIN-ranlib HOSTCC=gcc CFLAGS="-EL" FFLAGS="$CFLAGS" LDFLAGS="$CFLAGS" TARGET=P6600
|
||||
|
||||
MTI_TOOLCHAIN_DIR={full MTI GNU/Linux Toolchain path including "bin" directory -- for example, /opt/linux_toolchain/bin}
|
||||
MTI_GCC_PREFIX=mips-mti-linux-gnu
|
||||
MTI_TOOLCHAIN=${IMG_TOOLCHAIN_DIR}/${IMG_GCC_PREFIX}
|
||||
|
||||
# P5600 Build:
|
||||
|
||||
make BINARY=32 BINARY32=1 CC=$MTI_TOOLCHAIN-gcc AR=$MTI_TOOLCHAIN-ar FC="$MTI_TOOLCHAIN-gfortran -EL" RANLIB=$MTI_TOOLCHAIN-ranlib HOSTCC=gcc CFLAGS="-EL" FFLAGS=$CFLAGS LDFLAGS=$CFLAGS TARGET=P5600
|
||||
```
|
||||
|
||||
|
||||
### FreeBSD
|
||||
|
||||
You will need to install the following tools from the FreeBSD ports tree:
|
||||
|
||||
* lang/gcc
|
||||
* lang/perl5.12
|
||||
* ftp/curl
|
||||
* devel/gmake
|
||||
* devel/patch
|
||||
|
||||
To compile run the command:
|
||||
```bash
|
||||
$ gmake CC=gcc FC=gfortran
|
||||
```
|
||||
|
||||
|
||||
### Cortex-M
|
||||
|
||||
Cortex-M is a widely used microcontroller that is present in a variety of
|
||||
industrial and consumer electronics. A common variant of the Cortex-M is the
|
||||
`STM32F4xx` series. Here, we will give instructions for building for that
|
||||
series.
|
||||
|
||||
First, install the embedded Arm GCC compiler from the Arm website. Then, create
|
||||
the following `toolchain.cmake` file:
|
||||
|
||||
```cmake
|
||||
set(CMAKE_SYSTEM_NAME Generic)
|
||||
set(CMAKE_SYSTEM_PROCESSOR arm)
|
||||
|
||||
set(CMAKE_C_COMPILER "arm-none-eabi-gcc.exe")
|
||||
set(CMAKE_CXX_COMPILER "arm-none-eabi-g++.exe")
|
||||
|
||||
set(CMAKE_EXE_LINKER_FLAGS "--specs=nosys.specs" CACHE INTERNAL "")
|
||||
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
|
||||
```
|
||||
|
||||
Then build OpenBLAS with:
|
||||
```bash
|
||||
$ cmake .. -G Ninja -DCMAKE_C_COMPILER=arm-none-eabi-gcc -DCMAKE_TOOLCHAIN_FILE:PATH="toolchain.cmake" -DNOFORTRAN=1 -DTARGET=ARMV5 -DEMBEDDED=1
|
||||
```
|
||||
|
||||
In your embedded application, the following functions need to be provided for OpenBLAS to work correctly:
|
||||
```C
|
||||
void free(void* ptr);
|
||||
void* malloc(size_t size);
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
If you are developing for an embedded platform, it is your responsibility
|
||||
to make sure that the device has sufficient memory for `malloc` calls.
|
||||
[Libmemory](https://github.com/embeddedartistry/libmemory)
|
||||
provides one implementation of `malloc` for embedded platforms.
|
|
@ -0,0 +1,450 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
role="img"
|
||||
viewBox="0 -1766.2 402.80686 546.01356"
|
||||
version="1.1"
|
||||
id="svg128"
|
||||
width="800"
|
||||
height="546.01355">
|
||||
<metadata
|
||||
id="metadata132">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title>OpenBLAS logo</dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<title
|
||||
id="title2">OpenBLAS logo</title>
|
||||
<defs
|
||||
id="defs16">
|
||||
<path
|
||||
id="a"
|
||||
d="m 247,-949 v 2399 h 269 v -62 H 309 V -887 h 207 v -62 z" />
|
||||
<path
|
||||
id="b"
|
||||
d="M 740,435 Q 740,320 676,213 612,106 511,42 410,-22 304,-22 207,-22 138,35 69,92 51,201 q -1,8 -1,43 0,102 48,194 48,92 129,163 124,103 249,103 38,0 48,-1 97,-14 156,-86 59,-72 60,-182 z m -103,41 q 0,89 -46,139 -46,50 -115,50 -80,0 -154,-60 -80,-63 -122,-177 -42,-114 -43,-212 0,-90 43,-143 43,-53 114,-54 90,0 171,79 81,79 123,215 29,95 29,163 z" />
|
||||
<path
|
||||
id="c"
|
||||
d="m 23,287 q 1,3 2,8 1,5 5,22 4,17 10,31 6,14 15,33 9,19 20,30 11,11 26,22 15,11 33,9 75,0 96,-64 l 10,9 q 62,55 118,55 65,0 102,-47 37,-47 37,-114 0,-108 -76,-199 -76,-91 -172,-92 -22,0 -39,6 -11,5 -23,15 -12,10 -19,17 l -7,8 q -1,-1 -22,-87 -21,-86 -21,-87 0,-6 8,-7 8,-1 37,-3 h 25 q 6,-7 6,-9 0,-2 -3,-18 -3,-12 -6,-15 -3,-3 -13,-4 h -11 q -9,0 -34,1 -25,1 -62,1 -70,0 -89,-2 h -8 q -7,7 -7,11 2,27 13,35 h 20 q 34,1 39,12 3,6 61,239 58,233 61,247 1,5 1,14 0,41 -25,41 -22,0 -37,-28 Q 79,349 71,316 63,283 59,280 57,278 43,278 H 29 q -6,6 -6,9 z M 178,102 q 22,-76 74,-76 30,0 58,23 28,23 46,58 18,34 36,108 18,74 19,110 v 6 q 0,74 -61,74 -11,0 -22,-3 -11,-3 -22,-9 -11,-6 -20,-13 -9,-7 -17,-15 -8,-8 -15,-15 -7,-7 -11,-14 -4,-7 -8,-10 l -3,-4 q 0,-1 -3,-14 -3,-13 -11,-44 -8,-31 -14,-52 -26,-106 -26,-110 z" />
|
||||
<path
|
||||
id="d"
|
||||
d="m 231,637 q -27,0 -32,1 -5,1 -5,11 0,27 11,33 1,1 130,1 259,0 273,-2 63,-10 105,-45 42,-35 43,-92 0,-64 -58,-115 -58,-51 -133,-69 l -10,-3 q 64,-9 105,-46 41,-37 42,-92 Q 702,146 630,78 558,10 453,1 446,0 242,0 42,0 39,2 q -4,3 -4,8 0,7 2,14 5,19 10,21 4,1 15,1 h 6 q 27,0 60,3 14,3 19,12 3,4 72,278 69,274 69,289 0,7 -57,9 z m 418,-93 q 0,30 -15,56 -15,26 -49,34 -7,2 -92,3 h -42 q -22,0 -35,-1 h -13 q -15,-1 -19,-10 -2,-4 -32,-120 0,-3 -1,-6 L 320,374 h 81 q 81,0 93,2 60,10 107,58 47,48 48,110 z M 595,229 q 0,44 -23,73 -23,29 -60,34 -6,1 -83,1 -118,0 -119,-1 0,-2 -17,-73 -17,-71 -35,-141 L 240,52 q 0,-4 12,-4 12,0 81,-2 89,0 96,1 62,7 114,58 52,51 52,124 z" />
|
||||
<path
|
||||
id="e"
|
||||
d="m 228,637 q -34,0 -36,4 -1,2 -1,8 0,24 11,33 2,1 15,1 54,-3 127,-3 141,0 162,3 h 12 q 6,-6 6,-9 0,-3 -2,-18 -5,-15 -9,-19 h -38 q -69,-1 -81,-9 -7,-4 -14,-28 Q 373,576 313,336 297,271 279,198 261,125 252,88 l -9,-36 q 0,-4 9,-4 9,0 59,-2 h 17 q 32,0 51,1 19,1 49,7 30,6 50,18 20,12 44,34 24,22 42,55 16,30 30,67 14,37 17,42 5,3 17,3 h 13 q 6,-9 6,-11 0,-2 -20,-59 Q 607,146 583,83 559,20 557,9 555,4 553,3 551,2 537,0 523,-2 494,-1 H 418 Q 353,-1 294,0 H 116 q -84,0 -84,10 0,7 2,14 5,19 10,21 4,1 15,1 h 6 q 27,0 60,3 14,3 19,12 3,4 72,278 69,274 69,289 0,7 -57,9 z" />
|
||||
<path
|
||||
id="f"
|
||||
d="m 11,1388 v 62 H 280 V -949 H 11 v 62 h 207 v 2275 z" />
|
||||
<path
|
||||
id="g"
|
||||
d="m 55,642 v 6 l 4,11 7,7 5,2 h 637 q 15,-8 15,-20 0,-12 -15,-20 H 409 V 15 Q 402,2 391,0 q -4,0 -7,1 -3,1 -5,2 -2,1 -4,3 -2,2 -2,3 0,1 -2,4 -2,3 -2,3 V 628 H 71 q -1,0 -4,2 -3,2 -8,7 z" />
|
||||
<path
|
||||
id="h"
|
||||
d="M 630,29 Q 630,9 609,9 604,9 587,25 570,41 493,118 L 389,222 284,117 Q 178,13 175,11 q -4,-2 -7,-2 -8,0 -14,6 -6,6 -7,14 0,7 14,22 14,15 94,95 L 359,250 255,354 q -81,81 -94,95 -13,14 -14,22 0,9 6,14 6,5 15,5 5,0 7,-1 3,-2 109,-106 L 389,278 493,382 q 77,77 94,93 17,16 22,16 21,0 21,-20 0,-7 -10,-18 -10,-11 -98,-98 L 418,250 522,145 q 84,-84 96,-97 12,-13 12,-19 z" />
|
||||
<path
|
||||
id="i"
|
||||
d="m 39,168 q 0,57 19,104 19,47 49,78 30,31 67,52 37,21 70,31 33,10 63,9 h 3 q 45,0 78,-22 33,-22 33,-65 0,-90 -111,-118 -49,-13 -134,-14 -37,0 -38,-2 0,-2 -6,-35 -6,-33 -7,-58 0,-47 21,-74 21,-27 63,-28 42,-1 93,19 51,20 92,66 9,10 12,10 4,0 13,-9 9,-9 10,-14 1,-5 -9,-16 Q 410,71 390,55 370,39 344,24 318,9 281,-1 244,-11 205,-11 126,-11 83,42 40,95 39,168 Z m 334,185 q -6,52 -68,52 -33,0 -61,-14 -28,-14 -45,-34 -17,-20 -29,-41 -12,-21 -16,-36 -4,-15 -5,-19 0,-1 20,-1 113,0 158,24 45,24 46,69 z" />
|
||||
<path
|
||||
id="j"
|
||||
d="m 21,287 q 1,6 3,16 2,10 12,38 10,28 20,47 10,19 33,37 23,18 46,17 36,0 60,-18 24,-18 30,-34 6,-16 6,-21 0,-2 1,-2 l 11,11 q 61,64 139,64 54,0 87,-27 33,-27 34,-79 1,-52 -38,-157 -39,-105 -38,-127 0,-26 17,-26 6,0 9,1 29,5 52,38 23,33 35,80 2,8 20,8 20,0 20,-8 0,-1 -4,-15 -8,-29 -22,-57 -14,-28 -46,-56 -32,-28 -69,-27 -47,0 -68,27 -21,27 -21,56 0,19 36,120 36,101 37,152 0,59 -44,59 h -5 Q 288,404 229,303 L 222,291 189,157 Q 156,26 151,16 138,-11 108,-11 95,-11 87,-5 79,1 76,7 q -3,6 -2,10 0,13 38,163 38,150 40,163 1,5 1,23 0,39 -24,39 -38,0 -63,-100 -6,-20 -6,-21 -2,-6 -19,-6 H 27 q -6,6 -6,9 z" />
|
||||
<path
|
||||
id="k"
|
||||
d="m 208,74 q 0,-24 46,-28 18,0 18,-11 0,-1 -2,-13 Q 267,8 264,4 261,0 251,0 H 239 Q 229,0 205,1 181,2 141,2 70,2 50,0 h -8 q -7,7 -7,11 2,27 13,35 h 14 q 70,3 102,50 6,6 181,305 175,299 178,303 7,12 24,12 h 25 q 6,-9 6,-10 l 28,-323 q 28,-323 30,-326 5,-11 65,-11 25,0 25,-10 0,-2 -3,-14 Q 720,7 718,4 716,1 704,0 H 690 Q 679,0 651,1 623,2 578,2 484,2 455,0 h -12 q -6,6 -6,9 0,3 2,18 4,13 6,16 l 4,3 h 20 q 54,3 64,17 L 521,213 H 283 L 249,155 Q 208,86 208,74 Z m 308,186 q 0,11 -12,156 -12,145 -14,146 L 463,519 Q 447,492 400,412 l -90,-152 103,-1 q 103,0 103,1 z" />
|
||||
<path
|
||||
id="l"
|
||||
d="m 308,24 q 59,0 108,52 49,52 50,121 0,63 -52,87 -106,27 -136,37 -30,10 -42,20 -60,42 -60,121 0,61 32,111 32,50 65,75 29,25 70,40 41,15 64,16 h 18 q 96,0 139,-64 1,0 13,13 12,13 26,29 14,16 20,22 h 4 q 3,0 5,1 13,0 13,-7 0,-7 -28,-121 -28,-114 -32,-118 -4,-4 -16,-3 -20,0 -20,9 0,6 1,10 0,3 1,19 1,16 2,26 0,34 -9,59 -9,25 -18,37 -9,12 -25,25 -36,21 -82,21 -57,0 -106,-46 -49,-46 -50,-106 0,-30 15,-52 15,-22 41,-31 4,-2 70,-19 66,-17 67,-18 34,-11 66,-48 32,-37 32,-100 0,-26 -8,-56 -5,-22 -18,-49 -13,-27 -36,-59 -23,-32 -66,-60 -43,-28 -94,-38 -12,-2 -34,-2 -99,0 -154,55 L 134,44 106,13 q -23,-27 -28,-31 -5,-4 -13,-4 -13,0 -13,8 0,3 58,235 2,6 20,6 h 13 q 6,-6 6,-11 0,-2 -1,-9 -1,-7 -4,-21 -3,-14 -2,-33 2,-39 18,-66 16,-27 43,-40 27,-13 52,-18 25,-5 53,-5 z" />
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter4682"
|
||||
x="-0.23803717"
|
||||
width="1.4760743"
|
||||
y="-0.02669112"
|
||||
height="1.0533822">
|
||||
<feGaussianBlur
|
||||
stdDeviation="26.68"
|
||||
id="feGaussianBlur4684" />
|
||||
</filter>
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter4686"
|
||||
x="-0.23803717"
|
||||
width="1.4760743"
|
||||
y="-0.02669112"
|
||||
height="1.0533822">
|
||||
<feGaussianBlur
|
||||
stdDeviation="26.68"
|
||||
id="feGaussianBlur4688" />
|
||||
</filter>
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter4690"
|
||||
x="-0.23803717"
|
||||
width="1.4760743"
|
||||
y="-0.02669112"
|
||||
height="1.0533822">
|
||||
<feGaussianBlur
|
||||
stdDeviation="26.68"
|
||||
id="feGaussianBlur4692" />
|
||||
</filter>
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter4694"
|
||||
x="-0.23803717"
|
||||
width="1.4760743"
|
||||
y="-0.02669112"
|
||||
height="1.0533822">
|
||||
<feGaussianBlur
|
||||
stdDeviation="26.68"
|
||||
id="feGaussianBlur4696" />
|
||||
</filter>
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter4706"
|
||||
x="-0.14424978"
|
||||
width="1.2884996"
|
||||
y="-0.14424978"
|
||||
height="1.2884996">
|
||||
<feGaussianBlur
|
||||
stdDeviation="12.211888"
|
||||
id="feGaussianBlur4708" />
|
||||
</filter>
|
||||
<filter
|
||||
style="color-interpolation-filters:sRGB"
|
||||
id="filter4820"
|
||||
x="-1.0222467"
|
||||
width="3.0444934"
|
||||
y="-0.11330566"
|
||||
height="1.2266113">
|
||||
<feGaussianBlur
|
||||
stdDeviation="18.128906"
|
||||
id="feGaussianBlur4822" />
|
||||
</filter>
|
||||
</defs>
|
||||
<g
|
||||
id="g945"
|
||||
transform="matrix(0.19294479,0,0,0.20162962,-4.9748406,-1413.1818)">
|
||||
<g
|
||||
transform="translate(-1204.9765,-59.854446)"
|
||||
style="fill:#000080;stroke:currentColor;stroke-width:0"
|
||||
id="g126">
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g124"
|
||||
data-mml-node="math">
|
||||
<g
|
||||
transform="translate(-31.5)"
|
||||
style="display:inline;fill:#000080"
|
||||
id="g72"
|
||||
data-mml-node="msup">
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g62"
|
||||
data-mml-node="TeXAtom">
|
||||
<g
|
||||
style="fill:#000080;stroke:#02a0c6"
|
||||
id="g60"
|
||||
data-mml-node="mstyle">
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g58"
|
||||
data-mml-node="mrow">
|
||||
<g
|
||||
style="fill:#000080;fill-opacity:1;filter:url(#filter4686)"
|
||||
id="g20"
|
||||
transform="matrix(1.0114584,0,0,-0.94200534,149.28532,-51.320028)"
|
||||
data-mml-node="mo">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#000080;fill-opacity:1"
|
||||
id="use18"
|
||||
xlink:href="#a" />
|
||||
</g>
|
||||
<g
|
||||
transform="translate(0,151.87763)"
|
||||
style="fill:#000080"
|
||||
id="g52"
|
||||
data-mml-node="mtable">
|
||||
<g
|
||||
transform="translate(0,-39.676711)"
|
||||
style="fill:#000080"
|
||||
id="g34"
|
||||
data-mml-node="mtr">
|
||||
<g
|
||||
transform="translate(0,63.961331)"
|
||||
style="fill:#000080"
|
||||
id="g32"
|
||||
data-mml-node="mtd">
|
||||
<g
|
||||
style="fill:#000080;stroke:#44a8fc"
|
||||
id="g30"
|
||||
data-mml-node="mstyle">
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g24"
|
||||
transform="matrix(1,0,0,-1,615,-682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#000080"
|
||||
id="use22"
|
||||
xlink:href="#b" />
|
||||
</g>
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g28"
|
||||
transform="matrix(1,0,0,-1,1378,-682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#000080"
|
||||
id="use26"
|
||||
xlink:href="#c" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
transform="translate(0,-195.40765)"
|
||||
style="fill:#800000"
|
||||
id="g48"
|
||||
data-mml-node="mtr">
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g46"
|
||||
data-mml-node="mtd">
|
||||
<g
|
||||
style="fill:#800000;stroke:#ee82ee"
|
||||
id="g44"
|
||||
data-mml-node="mstyle">
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g38"
|
||||
transform="matrix(1,0,0,-1,528,682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#800000"
|
||||
id="use36"
|
||||
xlink:href="#d" />
|
||||
</g>
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g42"
|
||||
transform="matrix(1,0,0,-1,1287,682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#800000"
|
||||
id="use40"
|
||||
xlink:href="#e" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
style="fill:#000080;fill-opacity:1;filter:url(#filter4682)"
|
||||
id="g56"
|
||||
transform="matrix(1.0115444,0,0,-0.9420306,1854.1256,-51.345617)"
|
||||
data-mml-node="mo">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#000080;fill-opacity:1"
|
||||
id="use54"
|
||||
xlink:href="#f" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
transform="translate(-1724.9,4)"
|
||||
style="fill:#800000;stroke:#02a0c6"
|
||||
id="g122"
|
||||
data-mml-node="mstyle">
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g120"
|
||||
data-mml-node="TeXAtom">
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g118"
|
||||
data-mml-node="mrow">
|
||||
<g
|
||||
style="fill:#800000;fill-opacity:1;filter:url(#filter4694)"
|
||||
id="g80"
|
||||
transform="matrix(1.0117369,0,0,-0.94026514,3995.3166,-53.55715)"
|
||||
data-mml-node="mo">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#800000;fill-opacity:1"
|
||||
id="use78"
|
||||
xlink:href="#a" />
|
||||
</g>
|
||||
<g
|
||||
transform="matrix(1,0,0,0.9988905,-33.172206,176.08811)"
|
||||
style="fill:#800000"
|
||||
id="g112"
|
||||
data-mml-node="mtable">
|
||||
<g
|
||||
transform="translate(0,-1.9281484)"
|
||||
style="fill:#000080"
|
||||
id="g94"
|
||||
data-mml-node="mtr">
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g92"
|
||||
data-mml-node="mtd">
|
||||
<g
|
||||
style="fill:#000080;stroke:#44a8fc"
|
||||
id="g90"
|
||||
data-mml-node="mstyle">
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g84"
|
||||
transform="matrix(1,0,0,-1,4564.1,-682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#000080"
|
||||
id="use82"
|
||||
xlink:href="#i" />
|
||||
</g>
|
||||
<g
|
||||
style="fill:#000080"
|
||||
id="g88"
|
||||
transform="matrix(1,0,0,-1,5030.1,-682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#000080"
|
||||
id="use86"
|
||||
xlink:href="#j" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
transform="translate(0,-236.82271)"
|
||||
style="fill:#800000"
|
||||
id="g108"
|
||||
data-mml-node="mtr">
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g106"
|
||||
data-mml-node="mtd">
|
||||
<g
|
||||
style="fill:#800000;stroke:#ee82ee"
|
||||
id="g104"
|
||||
data-mml-node="mstyle">
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g98"
|
||||
transform="matrix(1,0,0,-1,4415.6,682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#800000"
|
||||
id="use96"
|
||||
xlink:href="#k" />
|
||||
</g>
|
||||
<g
|
||||
style="fill:#800000"
|
||||
id="g102"
|
||||
transform="matrix(1,0,0,-1,5165.6,682.3)"
|
||||
data-mml-node="mi">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#800000"
|
||||
id="use100"
|
||||
xlink:href="#l" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g
|
||||
style="fill:#800000;filter:url(#filter4690)"
|
||||
id="g116"
|
||||
transform="matrix(1.0115372,0,0,-0.94204911,5706.1889,-55.364368)"
|
||||
data-mml-node="mo">
|
||||
<use
|
||||
height="100%"
|
||||
width="100%"
|
||||
y="0"
|
||||
x="0"
|
||||
style="fill:#800000"
|
||||
id="use114"
|
||||
xlink:href="#f" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<text
|
||||
transform="rotate(45,-2691.9673,-4469.3442)"
|
||||
id="text4704"
|
||||
y="-4166.0034"
|
||||
x="2812.582"
|
||||
style="font-style:normal;font-weight:normal;font-size:192px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#806600;fill-opacity:1;stroke:none;filter:url(#filter4706)"
|
||||
xml:space="preserve"><tspan
|
||||
style="font-size:229.33332825px;fill:#806600"
|
||||
y="-4166.0034"
|
||||
x="2812.582"
|
||||
id="tspan4702">+</tspan></text>
|
||||
<text
|
||||
id="text4746"
|
||||
y="-1429.2532"
|
||||
x="994.73688"
|
||||
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:384px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';letter-spacing:0px;word-spacing:0px;fill:#806600;fill-opacity:1;stroke:none;filter:url(#filter4820)"
|
||||
xml:space="preserve"><tspan
|
||||
y="-1429.2532"
|
||||
x="994.73688"
|
||||
id="tspan4744">|</tspan></text>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 20 KiB |
|
@ -0,0 +1,305 @@
|
|||
|
||||
This user manual covers compiling OpenBLAS itself, linking your code to OpenBLAS,
|
||||
example code to use the C (CBLAS) and Fortran (BLAS) APIs, and some troubleshooting
|
||||
tips. Compiling OpenBLAS is optional, since you may be able to install with a
|
||||
package manager.
|
||||
|
||||
!!! Note BLAS API reference documentation
|
||||
|
||||
The OpenBLAS documentation does not contain API reference documentation for
|
||||
BLAS or LAPACK, since these are standardized APIs, the documentation for
|
||||
which can be found in other places. If you want to understand every BLAS
|
||||
and LAPACK function and definition, we recommend reading the
|
||||
[Netlib BLAS ](http://netlib.org/blas/) and [Netlib LAPACK](http://netlib.org/lapack/)
|
||||
documentation.
|
||||
|
||||
OpenBLAS does contain a limited number of functions that are non-standard,
|
||||
these are documented at [OpenBLAS extension functions](extensions.md).
|
||||
|
||||
|
||||
## Compiling OpenBLAS
|
||||
|
||||
### Normal compile
|
||||
|
||||
The default way to build and install OpenBLAS from source is with Make:
|
||||
```
|
||||
make # add `-j4` to compile in parallel with 4 processes
|
||||
make install
|
||||
```
|
||||
|
||||
By default, the CPU architecture is detected automatically when invoking
|
||||
`make`, and the build is optimized for the detected CPU. To override the
|
||||
autodetection, use the `TARGET` flag:
|
||||
|
||||
```
|
||||
# `make TARGET=xxx` sets target CPU: e.g. for an Intel Nehalem CPU:
|
||||
make TARGET=NEHALEM
|
||||
```
|
||||
The full list of known target CPU architectures can be found in
|
||||
`TargetList.txt` in the root of the repository.
|
||||
|
||||
### Cross compile
|
||||
|
||||
For a basic cross-compilation with Make, three steps need to be taken:
|
||||
|
||||
- Set the `CC` and `FC` environment variables to select the cross toolchains
|
||||
for C and Fortran.
|
||||
- Set the `HOSTCC` environment variable to select the host C compiler (i.e. the
|
||||
regular C compiler for the machine on which you are invoking the build).
|
||||
- Set `TARGET` explicitly to the CPU architecture on which the produced
|
||||
OpenBLAS binaries will be used.
|
||||
|
||||
#### Cross-compilation examples
|
||||
|
||||
Compile the library for ARM Cortex-A9 linux on an x86-64 machine
|
||||
_(note: install only `gnueabihf` versions of the cross toolchain - see
|
||||
[this issue comment](https://github.com/OpenMathLib/OpenBLAS/issues/936#issuecomment-237596847)
|
||||
for why_):
|
||||
```
|
||||
make CC=arm-linux-gnueabihf-gcc FC=arm-linux-gnueabihf-gfortran HOSTCC=gcc TARGET=CORTEXA9
|
||||
```
|
||||
|
||||
Compile OpenBLAS for a loongson3a CPU on an x86-64 machine:
|
||||
```
|
||||
make BINARY=64 CC=mips64el-unknown-linux-gnu-gcc FC=mips64el-unknown-linux-gnu-gfortran HOSTCC=gcc TARGET=LOONGSON3A
|
||||
```
|
||||
|
||||
Compile OpenBLAS for loongson3a CPU with the `loongcc` (based on Open64) compiler on an x86-64 machine:
|
||||
```
|
||||
make CC=loongcc FC=loongf95 HOSTCC=gcc TARGET=LOONGSON3A CROSS=1 CROSS_SUFFIX=mips64el-st-linux-gnu- NO_LAPACKE=1 NO_SHARED=1 BINARY=32
|
||||
```
|
||||
|
||||
### Building a debug version
|
||||
|
||||
Add `DEBUG=1` to your build command, e.g.:
|
||||
```
|
||||
make DEBUG=1
|
||||
```
|
||||
|
||||
### Install to a specific directory
|
||||
|
||||
!!! note
|
||||
|
||||
Installing to a directory is optional; it is also possible to use the shared or static
|
||||
libraries directly from the build directory.
|
||||
|
||||
Use `make install` with the `PREFIX` flag to install to a specific directory:
|
||||
|
||||
```
|
||||
make install PREFIX=/path/to/installation/directory
|
||||
```
|
||||
|
||||
The default directory is `/opt/OpenBLAS`.
|
||||
|
||||
!!! important
|
||||
|
||||
Note that any flags passed to `make` during build should also be passed to
|
||||
`make install` to circumvent any install errors, i.e. some headers not
|
||||
being copied over correctly.
|
||||
|
||||
For more detailed information on building/installing from source, please read
|
||||
the [Installation Guide](install.md).
|
||||
|
||||
|
||||
## Linking to OpenBLAS
|
||||
|
||||
OpenBLAS can be used as a shared or a static library.
|
||||
|
||||
### Link a shared library
|
||||
|
||||
The shared library is normally called `libopenblas.so`, but not that the name
|
||||
may be different as a result of build flags used or naming choices by a distro
|
||||
packager (see [distributing.md] for details). To link a shared library named
|
||||
`libopenblas.so`, the flag `-lopenblas` is needed. To find the OpenBLAS headers,
|
||||
a `-I/path/to/includedir` is needed. And unless the library is installed in a
|
||||
directory that the linker searches by default, also `-L` and `-Wl,-rpath` flags
|
||||
are needed. For a source file `test.c` (e.g., the example code under _Call
|
||||
CBLAS interface_ further down), the shared library can then be linked with:
|
||||
```
|
||||
gcc -o test test.c -I/your_path/OpenBLAS/include/ -L/your_path/OpenBLAS/lib -Wl,-rpath,/your_path/OpenBLAS/lib -lopenblas
|
||||
```
|
||||
|
||||
The `-Wl,-rpath,/your_path/OpenBLAS/lib` linker flag can be omitted if you
|
||||
ran `ldconfig` to update linker cache, put `/your_path/OpenBLAS/lib` in
|
||||
`/etc/ld.so.conf` or a file in `/etc/ld.so.conf.d`, or installed OpenBLAS in a
|
||||
location that is part of the `ld.so` default search path (usually `/lib`,
|
||||
`/usr/lib` and `/usr/local/lib`). Alternatively, you can set the environment
|
||||
variable `LD_LIBRARY_PATH` to point to the folder that contains `libopenblas.so`.
|
||||
Otherwise, the build may succeed but at runtime loading the library will fail
|
||||
with a message like:
|
||||
```
|
||||
cannot open shared object file: no such file or directory
|
||||
```
|
||||
|
||||
More flags may be needed, depending on how OpenBLAS was built:
|
||||
|
||||
- If `libopenblas` is multi-threaded, please add `-lpthread`.
|
||||
- If the library contains LAPACK functions (usually also true), please add
|
||||
`-lgfortran` (other Fortran libraries may also be needed, e.g. `-lquadmath`).
|
||||
Note that if you only make calls to LAPACKE routines, i.e. your code has
|
||||
`#include "lapacke.h"` and makes calls to methods like `LAPACKE_dgeqrf`,
|
||||
then `-lgfortran` is not needed.
|
||||
|
||||
!!! tip Use pkg-config
|
||||
|
||||
Usually a pkg-config file (e.g., `openblas.pc`) is installed together
|
||||
with a `libopenblas` shared library. pkg-config is a tool that will
|
||||
tell you the exact flags needed for linking. For example:
|
||||
|
||||
```
|
||||
$ pkg-config --cflags openblas
|
||||
-I/usr/local/include
|
||||
$ pkg-config --libs openblas
|
||||
-L/usr/local/lib -lopenblas
|
||||
```
|
||||
|
||||
### Link a static library
|
||||
|
||||
Linking a static library is simpler - add the path to the static OpenBLAS
|
||||
library to the compile command:
|
||||
```
|
||||
gcc -o test test.c /your/path/libopenblas.a
|
||||
```
|
||||
|
||||
|
||||
## Code examples
|
||||
|
||||
### Call CBLAS interface
|
||||
|
||||
This example shows calling `cblas_dgemm` in C:
|
||||
|
||||
<!-- Source: https://gist.github.com/xianyi/6930656 -->
|
||||
```c
|
||||
#include <cblas.h>
|
||||
#include <stdio.h>
|
||||
|
||||
void main()
|
||||
{
|
||||
int i=0;
|
||||
double A[6] = {1.0,2.0,1.0,-3.0,4.0,-1.0};
|
||||
double B[6] = {1.0,2.0,1.0,-3.0,4.0,-1.0};
|
||||
double C[9] = {.5,.5,.5,.5,.5,.5,.5,.5,.5};
|
||||
cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans,3,3,2,1,A, 3, B, 3,2,C,3);
|
||||
|
||||
for(i=0; i<9; i++)
|
||||
printf("%lf ", C[i]);
|
||||
printf("\n");
|
||||
}
|
||||
```
|
||||
|
||||
To compile this file, save it as `test_cblas_dgemm.c` and then run:
|
||||
```
|
||||
gcc -o test_cblas_open test_cblas_dgemm.c -I/your_path/OpenBLAS/include/ -L/your_path/OpenBLAS/lib -lopenblas -lpthread -lgfortran
|
||||
```
|
||||
will result in a `test_cblas_open` executable.
|
||||
|
||||
### Call BLAS Fortran interface
|
||||
|
||||
This example shows calling the `dgemm` Fortran interface in C:
|
||||
|
||||
<!-- Source: https://gist.github.com/xianyi/5780018 -->
|
||||
```c
|
||||
#include "stdio.h"
|
||||
#include "stdlib.h"
|
||||
#include "sys/time.h"
|
||||
#include "time.h"
|
||||
|
||||
extern void dgemm_(char*, char*, int*, int*,int*, double*, double*, int*, double*, int*, double*, double*, int*);
|
||||
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
int i;
|
||||
printf("test!\n");
|
||||
if(argc<4){
|
||||
printf("Input Error\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
int m = atoi(argv[1]);
|
||||
int n = atoi(argv[2]);
|
||||
int k = atoi(argv[3]);
|
||||
int sizeofa = m * k;
|
||||
int sizeofb = k * n;
|
||||
int sizeofc = m * n;
|
||||
char ta = 'N';
|
||||
char tb = 'N';
|
||||
double alpha = 1.2;
|
||||
double beta = 0.001;
|
||||
|
||||
struct timeval start,finish;
|
||||
double duration;
|
||||
|
||||
double* A = (double*)malloc(sizeof(double) * sizeofa);
|
||||
double* B = (double*)malloc(sizeof(double) * sizeofb);
|
||||
double* C = (double*)malloc(sizeof(double) * sizeofc);
|
||||
|
||||
srand((unsigned)time(NULL));
|
||||
|
||||
for (i=0; i<sizeofa; i++)
|
||||
A[i] = i%3+1;//(rand()%100)/10.0;
|
||||
|
||||
for (i=0; i<sizeofb; i++)
|
||||
B[i] = i%3+1;//(rand()%100)/10.0;
|
||||
|
||||
for (i=0; i<sizeofc; i++)
|
||||
C[i] = i%3+1;//(rand()%100)/10.0;
|
||||
//#if 0
|
||||
printf("m=%d,n=%d,k=%d,alpha=%lf,beta=%lf,sizeofc=%d\n",m,n,k,alpha,beta,sizeofc);
|
||||
gettimeofday(&start, NULL);
|
||||
dgemm_(&ta, &tb, &m, &n, &k, &alpha, A, &m, B, &k, &beta, C, &m);
|
||||
gettimeofday(&finish, NULL);
|
||||
|
||||
duration = ((double)(finish.tv_sec-start.tv_sec)*1000000 + (double)(finish.tv_usec-start.tv_usec)) / 1000000;
|
||||
double gflops = 2.0 * m *n*k;
|
||||
gflops = gflops/duration*1.0e-6;
|
||||
|
||||
FILE *fp;
|
||||
fp = fopen("timeDGEMM.txt", "a");
|
||||
fprintf(fp, "%dx%dx%d\t%lf s\t%lf MFLOPS\n", m, n, k, duration, gflops);
|
||||
fclose(fp);
|
||||
|
||||
free(A);
|
||||
free(B);
|
||||
free(C);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
To compile this file, save it as `time_dgemm.c` and then run:
|
||||
```
|
||||
gcc -o time_dgemm time_dgemm.c /your/path/libopenblas.a -lpthread
|
||||
```
|
||||
You can then run it as: `./time_dgemm <m> <n> <k>`, with `m`, `n`, and `k` input
|
||||
parameters to the `time_dgemm` executable.
|
||||
|
||||
!!! note
|
||||
|
||||
When calling the Fortran interface from C, you have to deal with symbol name
|
||||
differences caused by compiler conventions. That is why the `dgemm_` function
|
||||
call in the example above has a trailing underscore. This is what it looks like
|
||||
when using `gcc`/`gfortran`, however such details may change for different
|
||||
compilers. Hence it requires extra support code. The CBLAS interface may be
|
||||
more portable when writing C code.
|
||||
|
||||
When writing code that needs to be portable and work across different
|
||||
platforms and compilers, the above code example is not recommended for
|
||||
usage. Instead, we advise looking at how OpenBLAS (or BLAS in general, since
|
||||
this problem isn't specific to OpenBLAS) functions are called in widely
|
||||
used projects like Julia, SciPy, or R.
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* Please read the [FAQ](faq.md) first, your problem may be described there.
|
||||
* Please ensure you are using a recent enough compiler, that supports the
|
||||
features your CPU provides (example: GCC versions before 4.6 were known to
|
||||
not support AVX kernels, and before 6.1 AVX512CD kernels).
|
||||
* The number of CPU cores supported by default is <=256. On Linux x86-64, there
|
||||
is experimental support for up to 1024 cores and 128 NUMA nodes if you build
|
||||
the library with `BIGNUMA=1`.
|
||||
* OpenBLAS does not set processor affinity by default. On Linux, you can enable
|
||||
processor affinity by commenting out the line `NO_AFFINITY=1` in
|
||||
`Makefile.rule`.
|
||||
* On Loongson 3A, `make test` is known to fail with a `pthread_create` error
|
||||
and an `EAGAIN` error code. However, it will be OK when you run the same
|
||||
testcase in a shell.
|
|
@ -68,6 +68,8 @@ if (USE_THREAD)
|
|||
endif ()
|
||||
|
||||
foreach (float_type ${FLOAT_TYPES})
|
||||
GenerateNamedObjects("gemm_batch_thread.c" "" "gemm_batch_thread" 0 "" "" false ${float_type})
|
||||
|
||||
if (${float_type} STREQUAL "COMPLEX" OR ${float_type} STREQUAL "ZCOMPLEX")
|
||||
GenerateCombinationObjects("zherk_kernel.c" "LOWER;CONJ" "U;N" "HERK" 2 "herk_kernel" false ${float_type})
|
||||
# TRANS needs to be set/unset when CONJ is set/unset, so can't use it as a combination
|
||||
|
|
|
@ -37,7 +37,7 @@ SBLASOBJS += \
|
|||
ssyrk_UN.$(SUFFIX) ssyrk_UT.$(SUFFIX) ssyrk_LN.$(SUFFIX) ssyrk_LT.$(SUFFIX) \
|
||||
ssyr2k_UN.$(SUFFIX) ssyr2k_UT.$(SUFFIX) ssyr2k_LN.$(SUFFIX) ssyr2k_LT.$(SUFFIX) \
|
||||
ssyrk_kernel_U.$(SUFFIX) ssyrk_kernel_L.$(SUFFIX) \
|
||||
ssyr2k_kernel_U.$(SUFFIX) ssyr2k_kernel_L.$(SUFFIX)
|
||||
ssyr2k_kernel_U.$(SUFFIX) ssyr2k_kernel_L.$(SUFFIX) sgemm_batch_thread.$(SUFFIX)
|
||||
|
||||
DBLASOBJS += \
|
||||
dgemm_nn.$(SUFFIX) dgemm_nt.$(SUFFIX) dgemm_tn.$(SUFFIX) dgemm_tt.$(SUFFIX) \
|
||||
|
@ -53,7 +53,7 @@ DBLASOBJS += \
|
|||
dsyrk_UN.$(SUFFIX) dsyrk_UT.$(SUFFIX) dsyrk_LN.$(SUFFIX) dsyrk_LT.$(SUFFIX) \
|
||||
dsyr2k_UN.$(SUFFIX) dsyr2k_UT.$(SUFFIX) dsyr2k_LN.$(SUFFIX) dsyr2k_LT.$(SUFFIX) \
|
||||
dsyrk_kernel_U.$(SUFFIX) dsyrk_kernel_L.$(SUFFIX) \
|
||||
dsyr2k_kernel_U.$(SUFFIX) dsyr2k_kernel_L.$(SUFFIX)
|
||||
dsyr2k_kernel_U.$(SUFFIX) dsyr2k_kernel_L.$(SUFFIX) dgemm_batch_thread.$(SUFFIX)
|
||||
|
||||
QBLASOBJS += \
|
||||
qgemm_nn.$(SUFFIX) qgemm_nt.$(SUFFIX) qgemm_tn.$(SUFFIX) qgemm_tt.$(SUFFIX) \
|
||||
|
@ -103,7 +103,7 @@ CBLASOBJS += \
|
|||
cherk_kernel_LN.$(SUFFIX) cherk_kernel_LC.$(SUFFIX) \
|
||||
csyr2k_kernel_U.$(SUFFIX) csyr2k_kernel_L.$(SUFFIX) \
|
||||
cher2k_kernel_UN.$(SUFFIX) cher2k_kernel_UC.$(SUFFIX) \
|
||||
cher2k_kernel_LN.$(SUFFIX) cher2k_kernel_LC.$(SUFFIX)
|
||||
cher2k_kernel_LN.$(SUFFIX) cher2k_kernel_LC.$(SUFFIX) cgemm_batch_thread.$(SUFFIX)
|
||||
|
||||
ZBLASOBJS += \
|
||||
zgemm_nn.$(SUFFIX) zgemm_cn.$(SUFFIX) zgemm_tn.$(SUFFIX) zgemm_nc.$(SUFFIX) \
|
||||
|
@ -137,7 +137,7 @@ ZBLASOBJS += \
|
|||
zherk_kernel_LN.$(SUFFIX) zherk_kernel_LC.$(SUFFIX) \
|
||||
zsyr2k_kernel_U.$(SUFFIX) zsyr2k_kernel_L.$(SUFFIX) \
|
||||
zher2k_kernel_UN.$(SUFFIX) zher2k_kernel_UC.$(SUFFIX) \
|
||||
zher2k_kernel_LN.$(SUFFIX) zher2k_kernel_LC.$(SUFFIX)
|
||||
zher2k_kernel_LN.$(SUFFIX) zher2k_kernel_LC.$(SUFFIX) zgemm_batch_thread.$(SUFFIX)
|
||||
|
||||
|
||||
XBLASOBJS += \
|
||||
|
@ -2942,6 +2942,21 @@ gemm_thread_variable.$(PSUFFIX) : gemm_thread_variable.c ../../common.h
|
|||
beta_thread.$(PSUFFIX) : beta_thread.c ../../common.h
|
||||
$(CC) -c $(PFLAGS) $< -o $(@F)
|
||||
|
||||
sbgemm_batch_thread.$(SUFFIX) : gemm_batch_thread.c ../../common.h
|
||||
$(CC) -c $(CFLAGS) $< -o $(@F)
|
||||
|
||||
sgemm_batch_thread.$(SUFFIX) : gemm_batch_thread.c ../../common.h
|
||||
$(CC) -c $(CFLAGS) $< -o $(@F)
|
||||
|
||||
dgemm_batch_thread.$(SUFFIX) : gemm_batch_thread.c ../../common.h
|
||||
$(CC) -c $(CFLAGS) $< -o $(@F)
|
||||
|
||||
cgemm_batch_thread.$(SUFFIX) : gemm_batch_thread.c ../../common.h
|
||||
$(CC) -c $(CFLAGS) $< -o $(@F)
|
||||
|
||||
zgemm_batch_thread.$(SUFFIX) : gemm_batch_thread.c ../../common.h
|
||||
$(CC) -c $(CFLAGS) $< -o $(@F)
|
||||
|
||||
|
||||
sbgemm_thread_nn.$(PSUFFIX) : gemm.c level3_thread.c ../../param.h
|
||||
$(CC) $(PFLAGS) $(BLOCKS) -c -DTHREADED_LEVEL3 -DHALF -UDOUBLE -UCOMPLEX -DNN $< -o $(@F)
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
/*****************************************************************************
|
||||
Copyright (c) 2020, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
**********************************************************************************/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
void openblas_warning(int verbose, const char * msg);
|
||||
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
static int inner_small_matrix_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, IFLOAT *sa, IFLOAT *sb, BLASLONG mypos){
|
||||
int routine_mode;
|
||||
#ifndef COMPLEX
|
||||
int (*gemm_small_kernel)(BLASLONG, BLASLONG, BLASLONG, FLOAT *, BLASLONG, FLOAT ,FLOAT *, BLASLONG, FLOAT, FLOAT *, BLASLONG);
|
||||
int (*gemm_small_kernel_b0)(BLASLONG, BLASLONG, BLASLONG, FLOAT *, BLASLONG, FLOAT, FLOAT *, BLASLONG, FLOAT *, BLASLONG);
|
||||
#else
|
||||
int (*zgemm_small_kernel)(BLASLONG, BLASLONG, BLASLONG, FLOAT *, BLASLONG, FLOAT , FLOAT, FLOAT *, BLASLONG, FLOAT , FLOAT, FLOAT *, BLASLONG);
|
||||
int (*zgemm_small_kernel_b0)(BLASLONG, BLASLONG, BLASLONG, FLOAT *, BLASLONG, FLOAT , FLOAT, FLOAT *, BLASLONG, FLOAT *, BLASLONG);
|
||||
FLOAT alpha[2], beta[2];
|
||||
#endif
|
||||
routine_mode=args->routine_mode;
|
||||
if((routine_mode & BLAS_SMALL_B0_OPT) == BLAS_SMALL_B0_OPT){
|
||||
#ifndef COMPLEX
|
||||
gemm_small_kernel_b0=args->routine;
|
||||
gemm_small_kernel_b0(args->m, args->n, args->k, args->a, args->lda, *(FLOAT *)(args->alpha), args->b, args->ldb, args->c, args->ldc);
|
||||
#else
|
||||
zgemm_small_kernel_b0=args->routine;
|
||||
alpha[0] = *((FLOAT *)args -> alpha + 0);
|
||||
alpha[1] = *((FLOAT *)args -> alpha + 1);
|
||||
zgemm_small_kernel_b0(args->m, args->n, args->k, args->a, args->lda, alpha[0], alpha[1], args->b, args->ldb, args->c, args->ldc);
|
||||
#endif
|
||||
return(0);
|
||||
}else if(routine_mode & BLAS_SMALL_OPT){
|
||||
#ifndef COMPLEX
|
||||
gemm_small_kernel=args->routine;
|
||||
gemm_small_kernel(args->m, args->n, args->k, args->a, args->lda, *(FLOAT *)(args->alpha), args->b, args->ldb, *(FLOAT *)(args->beta), args->c, args->ldc);
|
||||
#else
|
||||
zgemm_small_kernel=args->routine;
|
||||
alpha[0] = *((FLOAT *)args -> alpha + 0);
|
||||
alpha[1] = *((FLOAT *)args -> alpha + 1);
|
||||
beta[0] = *((FLOAT *)args -> beta + 0);
|
||||
beta[1] = *((FLOAT *)args -> beta + 1);
|
||||
zgemm_small_kernel(args->m, args->n, args->k, args->a, args->lda, alpha[0], alpha[1], args->b, args->ldb, beta[0], beta[1], args->c, args->ldc);
|
||||
#endif
|
||||
return(0);
|
||||
}
|
||||
return(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
int CNAME(blas_arg_t * args_array, BLASLONG nums){
|
||||
XFLOAT *buffer;
|
||||
XFLOAT *sa, *sb;
|
||||
int nthreads=1;
|
||||
int (*routine)(blas_arg_t *, void *, void *, XFLOAT *, XFLOAT *, BLASLONG);
|
||||
int i=0, /*j,*/ current_nums;
|
||||
|
||||
#ifdef SMP
|
||||
blas_queue_t * queue=NULL;
|
||||
#endif
|
||||
|
||||
if(nums <=0 ) return 0;
|
||||
|
||||
buffer = (XFLOAT *)blas_memory_alloc(0);
|
||||
sa = (XFLOAT *)((BLASLONG)buffer +GEMM_OFFSET_A);
|
||||
sb = (XFLOAT *)(((BLASLONG)sa + ((GEMM_P * GEMM_Q * COMPSIZE * SIZE + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
|
||||
#ifdef SMP
|
||||
nthreads=num_cpu_avail(3);
|
||||
|
||||
if(nthreads==1){
|
||||
|
||||
#endif
|
||||
//single thread
|
||||
for(i=0; i<nums; i++){
|
||||
routine=args_array[i].routine;
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
if(args_array[i].routine_mode & BLAS_SMALL_OPT){
|
||||
inner_small_matrix_thread(&args_array[i], NULL, NULL, NULL, NULL, 0);
|
||||
}else{
|
||||
#endif
|
||||
routine(&args_array[i], NULL, NULL, sa, sb, 0);
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#ifdef SMP
|
||||
} else {
|
||||
//multi thread
|
||||
|
||||
queue=(blas_queue_t *)malloc((nums+1) * sizeof(blas_queue_t));
|
||||
if(queue == NULL){
|
||||
openblas_warning(0, "memory alloc failed!\n");
|
||||
return(1);
|
||||
}
|
||||
for(i=0; i<nums; i++){
|
||||
queue[i].args=&args_array[i];
|
||||
queue[i].range_m=NULL;
|
||||
queue[i].range_n=NULL;
|
||||
queue[i].sa=NULL;
|
||||
queue[i].sb=NULL;
|
||||
queue[i].next=&queue[i+1];
|
||||
|
||||
queue[i].mode=args_array[i].routine_mode;
|
||||
queue[i].routine=args_array[i].routine;
|
||||
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
if((args_array[i].routine_mode & BLAS_SMALL_B0_OPT) || (args_array[i].routine_mode & BLAS_SMALL_OPT)){
|
||||
queue[i].routine=inner_small_matrix_thread;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
for(i=0; i<nums; i+=nthreads){
|
||||
current_nums=((nums-i)>nthreads)? nthreads: (nums-i);
|
||||
|
||||
queue[i].sa=sa;
|
||||
queue[i].sb=sb;
|
||||
queue[i+current_nums-1].next=NULL;
|
||||
|
||||
exec_blas(current_nums, &queue[i]);
|
||||
}
|
||||
free(queue);
|
||||
}
|
||||
#endif
|
||||
blas_memory_free(buffer);
|
||||
return 0;
|
||||
}
|
|
@ -570,6 +570,8 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
|
|||
InitializeCriticalSection((PCRITICAL_SECTION)&level3_lock);
|
||||
#else
|
||||
static pthread_mutex_t level3_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_cond_t level3_wakeup = PTHREAD_COND_INITIALIZER;
|
||||
volatile static BLASLONG CPU_AVAILABLE = MAX_CPU_NUMBER;
|
||||
#endif
|
||||
|
||||
blas_arg_t newarg;
|
||||
|
@ -639,6 +641,12 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
|
|||
EnterCriticalSection((PCRITICAL_SECTION)&level3_lock);
|
||||
#else
|
||||
pthread_mutex_lock(&level3_lock);
|
||||
while(CPU_AVAILABLE < nthreads) {
|
||||
pthread_cond_wait(&level3_wakeup, &level3_lock);
|
||||
}
|
||||
CPU_AVAILABLE -= nthreads;
|
||||
WMB;
|
||||
pthread_mutex_unlock(&level3_lock);
|
||||
#endif
|
||||
|
||||
#ifdef USE_ALLOC_HEAP
|
||||
|
@ -783,6 +791,10 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
|
|||
#elif defined(OS_WINDOWS)
|
||||
LeaveCriticalSection((PCRITICAL_SECTION)&level3_lock);
|
||||
#else
|
||||
pthread_mutex_lock(&level3_lock);
|
||||
CPU_AVAILABLE += nthreads;
|
||||
WMB;
|
||||
pthread_cond_signal(&level3_wakeup);
|
||||
pthread_mutex_unlock(&level3_lock);
|
||||
#endif
|
||||
|
||||
|
@ -826,6 +838,16 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, IFLOAT *sa, IF
|
|||
if (nthreads_m * nthreads_n > args -> nthreads) {
|
||||
nthreads_n = blas_quickdivide(args -> nthreads, nthreads_m);
|
||||
}
|
||||
/* The nthreads_m and nthreads_n are adjusted so that the submatrix */
|
||||
/* to be handled by each thread preferably becomes a square matrix */
|
||||
/* by minimizing an objective function 'n * nthreads_m + m * nthreads_n'. */
|
||||
/* Objective function come from sum of partitions in m and n. */
|
||||
/* (n / nthreads_n) + (m / nthreads_m) */
|
||||
/* = (n * nthreads_m + m * nthreads_n) / (nthreads_n * nthreads_m) */
|
||||
while (nthreads_m % 2 == 0 && n * nthreads_m + m * nthreads_n > n * (nthreads_m / 2) + m * (nthreads_n * 2)) {
|
||||
nthreads_m /= 2;
|
||||
nthreads_n *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
/* Execute serial or parallel computation */
|
||||
|
|
|
@ -25,6 +25,7 @@ if (USE_THREAD)
|
|||
${BLAS_SERVER}
|
||||
divtable.c # TODO: Makefile has -UDOUBLE
|
||||
blas_l1_thread.c
|
||||
blas_server_callback.c
|
||||
)
|
||||
|
||||
if (NOT NO_AFFINITY)
|
||||
|
@ -51,6 +52,8 @@ if (DYNAMIC_ARCH)
|
|||
list(APPEND COMMON_SOURCES dynamic_arm64.c)
|
||||
elseif (POWER)
|
||||
list(APPEND COMMON_SOURCES dynamic_power.c)
|
||||
elseif (RISCV64)
|
||||
list(APPEND COMMON_SOURCES dynamic_riscv64.c detect_riscv64.c)
|
||||
else ()
|
||||
list(APPEND COMMON_SOURCES dynamic.c)
|
||||
endif ()
|
||||
|
|
|
@ -6,7 +6,7 @@ COMMONOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) c_abs.$(SUFFIX) z_abs.$(SUFFIX)
|
|||
#COMMONOBJS += slamch.$(SUFFIX) slamc3.$(SUFFIX) dlamch.$(SUFFIX) dlamc3.$(SUFFIX)
|
||||
|
||||
ifdef SMP
|
||||
COMMONOBJS += blas_server.$(SUFFIX) divtable.$(SUFFIX) blasL1thread.$(SUFFIX)
|
||||
COMMONOBJS += blas_server.$(SUFFIX) divtable.$(SUFFIX) blasL1thread.$(SUFFIX) blas_server_callback.$(SUFFIX)
|
||||
ifneq ($(NO_AFFINITY), 1)
|
||||
COMMONOBJS += init.$(SUFFIX)
|
||||
endif
|
||||
|
@ -30,12 +30,16 @@ else
|
|||
ifeq ($(ARCH),loongarch64)
|
||||
COMMONOBJS += dynamic_loongarch64.$(SUFFIX)
|
||||
else
|
||||
ifeq ($(ARCH),riscv64)
|
||||
COMMONOBJS += dynamic_riscv64.$(SUFFIX) detect_riscv64.$(SUFFIX)
|
||||
else
|
||||
COMMONOBJS += dynamic.$(SUFFIX)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
else
|
||||
COMMONOBJS += parameter.$(SUFFIX)
|
||||
endif
|
||||
|
@ -106,12 +110,16 @@ else
|
|||
ifeq ($(ARCH),loongarch64)
|
||||
HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_loongarch64.$(SUFFIX)
|
||||
else
|
||||
ifeq ($(ARCH),riscv64)
|
||||
HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic_riscv64.$(SUFFIX) detect_riscv64.$(SUFFIX)
|
||||
else
|
||||
HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) dynamic.$(SUFFIX)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
else
|
||||
HPLOBJS = memory.$(SUFFIX) xerbla.$(SUFFIX) parameter.$(SUFFIX)
|
||||
endif
|
||||
|
@ -140,6 +148,9 @@ memory.$(SUFFIX) : $(MEMORY) ../../common.h ../../param.h
|
|||
blas_server.$(SUFFIX) : $(BLAS_SERVER) ../../common.h ../../common_thread.h ../../param.h
|
||||
$(CC) $(CFLAGS) -c $< -o $(@F)
|
||||
|
||||
blas_server_callback.$(SUFFIX) : blas_server_callback.c ../../common.h
|
||||
$(CC) $(CFLAGS) -c $< -o $(@F)
|
||||
|
||||
openblas_set_num_threads.$(SUFFIX) : openblas_set_num_threads.c
|
||||
$(CC) $(CFLAGS) -c $< -o $(@F)
|
||||
|
||||
|
@ -206,6 +217,9 @@ addx.$(SUFFIX) : $(ARCH)/addx.c
|
|||
mulx.$(SUFFIX) : $(ARCH)/mulx.c
|
||||
$(CC) $(CFLAGS) -c -DXDOUBLE -UCOMPLEX $< -o $(@F)
|
||||
|
||||
detect_riscv64.$(SUFFIX): detect_riscv64.c
|
||||
$(CC) $(CFLAGS) -c -march=rv64imafdcv $< -o $(@F)
|
||||
|
||||
xerbla.$(PSUFFIX) : xerbla.c
|
||||
$(CC) $(PFLAGS) -c $< -o $(@F)
|
||||
|
||||
|
|
|
@ -115,6 +115,8 @@ int blas_server_avail __attribute__((aligned(ATTRIBUTE_SIZE))) = 0;
|
|||
|
||||
int blas_omp_threads_local = 1;
|
||||
|
||||
static void * blas_thread_buffer[MAX_CPU_NUMBER];
|
||||
|
||||
/* Local Variables */
|
||||
#if defined(USE_PTHREAD_LOCK)
|
||||
static pthread_mutex_t server_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
@ -190,6 +192,10 @@ static int main_status[MAX_CPU_NUMBER];
|
|||
BLASLONG exit_time[MAX_CPU_NUMBER];
|
||||
#endif
|
||||
|
||||
//Prototypes
|
||||
static void exec_threads(int , blas_queue_t *, int);
|
||||
static void adjust_thread_buffers();
|
||||
|
||||
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
|
||||
|
||||
if (!(mode & BLAS_COMPLEX)){
|
||||
|
@ -375,7 +381,6 @@ static void* blas_thread_server(void *arg){
|
|||
/* Thread identifier */
|
||||
BLASLONG cpu = (BLASLONG)arg;
|
||||
unsigned int last_tick;
|
||||
void *buffer, *sa, *sb;
|
||||
blas_queue_t *queue;
|
||||
|
||||
blas_queue_t *tscq;
|
||||
|
@ -395,8 +400,6 @@ blas_queue_t *tscq;
|
|||
main_status[cpu] = MAIN_ENTER;
|
||||
#endif
|
||||
|
||||
buffer = blas_memory_alloc(2);
|
||||
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Thread has just been spawned!\n", cpu);
|
||||
#endif
|
||||
|
@ -415,7 +418,7 @@ blas_queue_t *tscq;
|
|||
|
||||
tscq = atomic_load_queue(&thread_status[cpu].queue);
|
||||
|
||||
while(!tscq) {
|
||||
while(!tscq || tscq == 0x1) {
|
||||
YIELDING;
|
||||
|
||||
if ((unsigned int)rpcc() - last_tick > thread_timeout) {
|
||||
|
@ -456,108 +459,8 @@ blas_queue_t *tscq;
|
|||
start = rpcc();
|
||||
#endif
|
||||
|
||||
if (queue) {
|
||||
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = (int (*)(blas_arg_t *, void *, void *, void *, void *, BLASLONG))queue -> routine;
|
||||
|
||||
atomic_store_queue(&thread_status[cpu].queue, (blas_queue_t *)1);
|
||||
|
||||
sa = queue -> sa;
|
||||
sb = queue -> sb;
|
||||
|
||||
#ifdef SMP_DEBUG
|
||||
if (queue -> args) {
|
||||
fprintf(STDERR, "Server[%2ld] Calculation started. Mode = 0x%03x M = %3ld N=%3ld K=%3ld\n",
|
||||
cpu, queue->mode, queue-> args ->m, queue->args->n, queue->args->k);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONSISTENT_FPCSR
|
||||
#ifdef __aarch64__
|
||||
__asm__ __volatile__ ("msr fpcr, %0" : : "r" (queue -> sse_mode));
|
||||
#else
|
||||
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
|
||||
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_RUNNING1;
|
||||
#endif
|
||||
|
||||
if (sa == NULL) sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
|
||||
|
||||
if (sb == NULL) {
|
||||
if (!(queue -> mode & BLAS_COMPLEX)){
|
||||
#ifdef EXPRECISION
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
|
||||
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
} else
|
||||
#endif
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE) {
|
||||
#ifdef BUILD_DOUBLE
|
||||
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) {
|
||||
#ifdef BUILD_SINGLE
|
||||
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else {
|
||||
/* Other types in future */
|
||||
}
|
||||
} else {
|
||||
#ifdef EXPRECISION
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
|
||||
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
} else
|
||||
#endif
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){
|
||||
#ifdef BUILD_COMPLEX16
|
||||
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) {
|
||||
#ifdef BUILD_COMPLEX
|
||||
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else {
|
||||
/* Other types in future */
|
||||
}
|
||||
}
|
||||
queue->sb=sb;
|
||||
}
|
||||
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_RUNNING2;
|
||||
#endif
|
||||
|
||||
if (queue -> mode & BLAS_LEGACY) {
|
||||
legacy_exec(routine, queue -> mode, queue -> args, sb);
|
||||
} else
|
||||
if (queue -> mode & BLAS_PTHREAD) {
|
||||
void (*pthreadcompat)(void *) = (void(*)(void*))queue -> routine;
|
||||
(pthreadcompat)(queue -> args);
|
||||
} else
|
||||
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
|
||||
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Calculation finished!\n", cpu);
|
||||
#endif
|
||||
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_FINISH;
|
||||
#endif
|
||||
|
||||
// arm: make sure all results are written out _before_
|
||||
// thread is marked as done and other threads use them
|
||||
MB;
|
||||
atomic_store_queue(&thread_status[cpu].queue, (blas_queue_t *)0);
|
||||
|
||||
|
||||
if(queue) {
|
||||
exec_threads(cpu, queue, 0);
|
||||
}
|
||||
|
||||
#ifdef MONITOR
|
||||
|
@ -580,8 +483,6 @@ blas_queue_t *tscq;
|
|||
fprintf(STDERR, "Server[%2ld] Shutdown!\n", cpu);
|
||||
#endif
|
||||
|
||||
blas_memory_free(buffer);
|
||||
|
||||
//pthread_exit(NULL);
|
||||
|
||||
return NULL;
|
||||
|
@ -663,6 +564,9 @@ int blas_thread_init(void){
|
|||
|
||||
LOCK_COMMAND(&server_lock);
|
||||
|
||||
// Adjust thread buffers
|
||||
adjust_thread_buffers();
|
||||
|
||||
if (!blas_server_avail){
|
||||
|
||||
thread_timeout_env=openblas_thread_timeout();
|
||||
|
@ -691,6 +595,8 @@ int blas_thread_init(void){
|
|||
struct rlimit rlim;
|
||||
const char *msg = strerror(ret);
|
||||
fprintf(STDERR, "OpenBLAS blas_thread_init: pthread_create failed for thread %ld of %d: %s\n", i+1,blas_num_threads,msg);
|
||||
fprintf(STDERR, "OpenBLAS blas_thread_init: ensure that your address space and process count limits are big enough (ulimit -a)\n");
|
||||
fprintf(STDERR, "OpenBLAS blas_thread_init: or set a smaller OPENBLAS_NUM_THREADS to fit into what you have available\n");
|
||||
#ifdef RLIMIT_NPROC
|
||||
if(0 == getrlimit(RLIMIT_NPROC, &rlim)) {
|
||||
fprintf(STDERR, "OpenBLAS blas_thread_init: RLIMIT_NPROC "
|
||||
|
@ -893,6 +799,18 @@ int exec_blas(BLASLONG num, blas_queue_t *queue){
|
|||
fprintf(STDERR, "Exec_blas is called. Number of executing threads : %ld\n", num);
|
||||
#endif
|
||||
|
||||
//Redirect to caller's callback routine
|
||||
if (openblas_threads_callback_) {
|
||||
int buf_index = 0, i = 0;
|
||||
#ifndef USE_SIMPLE_THREADED_LEVEL3
|
||||
for (i = 0; i < num; i ++)
|
||||
queue[i].position = i;
|
||||
#endif
|
||||
openblas_threads_callback_(1, (openblas_dojob_callback) exec_threads, num, sizeof(blas_queue_t), (void*) queue, buf_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef __ELF__
|
||||
if (omp_in_parallel && (num > 1)) {
|
||||
if (omp_in_parallel() > 0) {
|
||||
|
@ -1006,7 +924,7 @@ void goto_set_num_threads(int num_threads) {
|
|||
|
||||
blas_cpu_number = num_threads;
|
||||
|
||||
#if defined(ARCH_MIPS64)
|
||||
#if defined(ARCH_MIPS64) || defined(ARCH_LOONGARCH64)
|
||||
#ifndef DYNAMIC_ARCH
|
||||
//set parameters for different number of threads.
|
||||
blas_set_parameter();
|
||||
|
@ -1066,6 +984,14 @@ int BLASFUNC(blas_thread_shutdown)(void){
|
|||
|
||||
LOCK_COMMAND(&server_lock);
|
||||
|
||||
//Free buffers allocated for threads
|
||||
for(i=0; i<MAX_CPU_NUMBER; i++){
|
||||
if(blas_thread_buffer[i]!=NULL){
|
||||
blas_memory_free(blas_thread_buffer[i]);
|
||||
blas_thread_buffer[i]=NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (blas_server_avail) {
|
||||
|
||||
for (i = 0; i < blas_num_threads - 1; i++) {
|
||||
|
@ -1102,5 +1028,132 @@ int BLASFUNC(blas_thread_shutdown)(void){
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void adjust_thread_buffers() {
|
||||
|
||||
int i=0;
|
||||
|
||||
//adjust buffer for each thread
|
||||
for(i=0; i < blas_cpu_number; i++){
|
||||
if(blas_thread_buffer[i] == NULL){
|
||||
blas_thread_buffer[i] = blas_memory_alloc(2);
|
||||
}
|
||||
}
|
||||
for(; i < MAX_CPU_NUMBER; i++){
|
||||
if(blas_thread_buffer[i] != NULL){
|
||||
blas_memory_free(blas_thread_buffer[i]);
|
||||
blas_thread_buffer[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void exec_threads(int cpu, blas_queue_t *queue, int buf_index) {
|
||||
|
||||
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = (int (*)(blas_arg_t *, void *, void *, void *, void *, BLASLONG))queue -> routine;
|
||||
|
||||
atomic_store_queue(&thread_status[cpu].queue, (blas_queue_t *)1);
|
||||
|
||||
void *buffer = blas_thread_buffer[cpu];
|
||||
void *sa = queue -> sa;
|
||||
void *sb = queue -> sb;
|
||||
|
||||
#ifdef SMP_DEBUG
|
||||
if (queue -> args) {
|
||||
fprintf(STDERR, "Server[%2ld] Calculation started. Mode = 0x%03x M = %3ld N=%3ld K=%3ld\n",
|
||||
cpu, queue->mode, queue-> args ->m, queue->args->n, queue->args->k);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONSISTENT_FPCSR
|
||||
#ifdef __aarch64__
|
||||
__asm__ __volatile__ ("msr fpcr, %0" : : "r" (queue -> sse_mode));
|
||||
#else
|
||||
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
|
||||
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_RUNNING1;
|
||||
#endif
|
||||
|
||||
//For target LOONGSON3R5, applying an offset to the buffer is essential
|
||||
//for minimizing cache conflicts and optimizing performance.
|
||||
#if defined(ARCH_LOONGARCH64) && !defined(NO_AFFINITY)
|
||||
if (sa == NULL) sa = (void *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A);
|
||||
#endif
|
||||
if (sa == NULL) sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
|
||||
|
||||
if (sb == NULL) {
|
||||
if (!(queue -> mode & BLAS_COMPLEX)){
|
||||
#ifdef EXPRECISION
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
|
||||
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
} else
|
||||
#endif
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE) {
|
||||
#ifdef BUILD_DOUBLE
|
||||
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) {
|
||||
#ifdef BUILD_SINGLE
|
||||
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else {
|
||||
/* Other types in future */
|
||||
}
|
||||
} else {
|
||||
#ifdef EXPRECISION
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
|
||||
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
} else
|
||||
#endif
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){
|
||||
#ifdef BUILD_COMPLEX16
|
||||
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) {
|
||||
#ifdef BUILD_COMPLEX
|
||||
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
#endif
|
||||
} else {
|
||||
/* Other types in future */
|
||||
}
|
||||
}
|
||||
queue->sb=sb;
|
||||
}
|
||||
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_RUNNING2;
|
||||
#endif
|
||||
|
||||
if (queue -> mode & BLAS_LEGACY) {
|
||||
legacy_exec(routine, queue -> mode, queue -> args, sb);
|
||||
} else
|
||||
if (queue -> mode & BLAS_PTHREAD) {
|
||||
void (*pthreadcompat)(void *) = (void(*)(void*))queue -> routine;
|
||||
(pthreadcompat)(queue -> args);
|
||||
} else
|
||||
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
|
||||
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Calculation finished!\n", cpu);
|
||||
#endif
|
||||
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_FINISH;
|
||||
#endif
|
||||
|
||||
// arm: make sure all results are written out _before_
|
||||
// thread is marked as done and other threads use them
|
||||
MB;
|
||||
atomic_store_queue(&thread_status[cpu].queue, (blas_queue_t *)0);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
#include "common.h"
|
||||
|
||||
/* global variable to change threading backend from openblas-managed to caller-managed */
|
||||
openblas_threads_callback openblas_threads_callback_ = 0;
|
||||
|
||||
/* non-threadsafe function should be called before any other
|
||||
openblas function to change how threads are managed */
|
||||
|
||||
void openblas_set_threads_callback_function(openblas_threads_callback callback)
|
||||
{
|
||||
openblas_threads_callback_ = callback;
|
||||
}
|
|
@ -113,7 +113,7 @@ void goto_set_num_threads(int num_threads) {
|
|||
blas_cpu_number = num_threads;
|
||||
|
||||
adjust_thread_buffers();
|
||||
#if defined(ARCH_MIPS64)
|
||||
#if defined(ARCH_MIPS64) || defined(ARCH_LOONGARCH64)
|
||||
//set parameters for different number of threads.
|
||||
blas_set_parameter();
|
||||
#endif
|
||||
|
@ -285,7 +285,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
|
|||
}
|
||||
}
|
||||
|
||||
static void exec_threads(blas_queue_t *queue, int buf_index){
|
||||
static void exec_threads(int thread_num, blas_queue_t *queue, int buf_index){
|
||||
|
||||
void *buffer, *sa, *sb;
|
||||
int pos=0, release_flag=0;
|
||||
|
@ -305,7 +305,7 @@ static void exec_threads(blas_queue_t *queue, int buf_index){
|
|||
|
||||
if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) {
|
||||
|
||||
pos = omp_get_thread_num();
|
||||
pos= thread_num;
|
||||
buffer = blas_thread_buffer[buf_index][pos];
|
||||
|
||||
//fallback
|
||||
|
@ -420,18 +420,25 @@ while (true) {
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (i != MAX_PARALLEL_NUMBER)
|
||||
if(i != MAX_PARALLEL_NUMBER)
|
||||
break;
|
||||
}
|
||||
if (openblas_omp_adaptive_env() != 0) {
|
||||
#pragma omp parallel for num_threads(num) schedule(OMP_SCHED)
|
||||
for (i = 0; i < num; i ++) {
|
||||
}
|
||||
/*For caller-managed threading, if caller has registered the callback, pass exec_thread as callback function*/
|
||||
if (openblas_threads_callback_) {
|
||||
#ifndef USE_SIMPLE_THREADED_LEVEL3
|
||||
for (i = 0; i < num; i ++)
|
||||
queue[i].position = i;
|
||||
#endif
|
||||
openblas_threads_callback_(1, (openblas_dojob_callback) exec_threads, num, sizeof(blas_queue_t), (void*) queue, buf_index);
|
||||
} else {
|
||||
|
||||
if (openblas_omp_adaptive_env() != 0) {
|
||||
#pragma omp parallel for num_threads(num) schedule(OMP_SCHED)
|
||||
for (i = 0; i < num; i ++) {
|
||||
#ifndef USE_SIMPLE_THREADED_LEVEL3
|
||||
queue[i].position = i;
|
||||
#endif
|
||||
|
||||
exec_threads(&queue[i], buf_index);
|
||||
exec_threads(omp_get_thread_num(), &queue[i], buf_index);
|
||||
}
|
||||
} else {
|
||||
#pragma omp parallel for schedule(OMP_SCHED)
|
||||
|
@ -441,9 +448,10 @@ if (openblas_omp_adaptive_env() != 0) {
|
|||
queue[i].position = i;
|
||||
#endif
|
||||
|
||||
exec_threads(&queue[i], buf_index);
|
||||
exec_threads(omp_get_thread_num(), &queue[i], buf_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HAVE_C11
|
||||
atomic_store(&blas_buffer_inuse[buf_index], false);
|
||||
|
|
|
@ -48,38 +48,34 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef SMP_DEBUG
|
||||
# define MT_TRACE(...) fprintf(stderr, __VA_ARGS__)
|
||||
#else
|
||||
# define MT_TRACE(...)
|
||||
#endif
|
||||
|
||||
/* This is a thread implementation for Win32 lazy implementation */
|
||||
|
||||
/* Thread server common information */
|
||||
typedef struct{
|
||||
CRITICAL_SECTION lock;
|
||||
HANDLE filled;
|
||||
HANDLE killed;
|
||||
|
||||
static blas_queue_t *work_queue = NULL;
|
||||
static HANDLE kickoff_event = NULL;
|
||||
static CRITICAL_SECTION queue_lock;
|
||||
blas_queue_t *queue; /* Parameter Pointer */
|
||||
int shutdown; /* server shutdown flag */
|
||||
|
||||
} blas_pool_t;
|
||||
|
||||
/* We need this global for checking if initialization is finished. */
|
||||
int blas_server_avail = 0;
|
||||
|
||||
int blas_omp_threads_local = 1;
|
||||
|
||||
/* Local Variables */
|
||||
static BLASULONG server_lock = 0;
|
||||
|
||||
static blas_pool_t pool;
|
||||
static HANDLE blas_threads [MAX_CPU_NUMBER];
|
||||
static DWORD blas_threads_id[MAX_CPU_NUMBER];
|
||||
static volatile int thread_target; // target num of live threads, volatile for cross-thread reads
|
||||
|
||||
//
|
||||
// Legacy code path
|
||||
//
|
||||
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
||||
|
||||
if (!(mode & BLAS_COMPLEX)) {
|
||||
|
||||
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
|
||||
|
||||
if (!(mode & BLAS_COMPLEX)){
|
||||
#ifdef EXPRECISION
|
||||
if ((mode & BLAS_PREC) == BLAS_XDOUBLE){
|
||||
/* REAL / Extended Double */
|
||||
|
@ -94,7 +90,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
args -> c, args -> ldc, sb);
|
||||
} else
|
||||
#endif
|
||||
if ((mode & BLAS_PREC) == BLAS_DOUBLE) {
|
||||
if ((mode & BLAS_PREC) == BLAS_DOUBLE){
|
||||
/* REAL / Double */
|
||||
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
|
||||
double *, BLASLONG, double *, BLASLONG,
|
||||
|
@ -105,7 +101,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
args -> a, args -> lda,
|
||||
args -> b, args -> ldb,
|
||||
args -> c, args -> ldc, sb);
|
||||
} else if ((mode & BLAS_PREC) == BLAS_SINGLE) {
|
||||
} else if ((mode & BLAS_PREC) == BLAS_SINGLE){
|
||||
/* REAL / Single */
|
||||
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
|
||||
float *, BLASLONG, float *, BLASLONG,
|
||||
|
@ -117,7 +113,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
args -> b, args -> ldb,
|
||||
args -> c, args -> ldc, sb);
|
||||
#ifdef BUILD_BFLOAT16
|
||||
} else if ((mode & BLAS_PREC) == BLAS_BFLOAT16) {
|
||||
} else if ((mode & BLAS_PREC) == BLAS_BFLOAT16){
|
||||
/* REAL / BFLOAT16 */
|
||||
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, bfloat16,
|
||||
bfloat16 *, BLASLONG, bfloat16 *, BLASLONG,
|
||||
|
@ -128,7 +124,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
args -> a, args -> lda,
|
||||
args -> b, args -> ldb,
|
||||
args -> c, args -> ldc, sb);
|
||||
} else if ((mode & BLAS_PREC) == BLAS_STOBF16) {
|
||||
} else if ((mode & BLAS_PREC) == BLAS_STOBF16){
|
||||
/* REAL / BLAS_STOBF16 */
|
||||
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
|
||||
float *, BLASLONG, bfloat16 *, BLASLONG,
|
||||
|
@ -139,7 +135,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
args -> a, args -> lda,
|
||||
args -> b, args -> ldb,
|
||||
args -> c, args -> ldc, sb);
|
||||
} else if ((mode & BLAS_PREC) == BLAS_DTOBF16) {
|
||||
} else if ((mode & BLAS_PREC) == BLAS_DTOBF16){
|
||||
/* REAL / BLAS_DTOBF16 */
|
||||
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
|
||||
double *, BLASLONG, bfloat16 *, BLASLONG,
|
||||
|
@ -156,7 +152,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
}
|
||||
} else {
|
||||
#ifdef EXPRECISION
|
||||
if ((mode & BLAS_PREC) == BLAS_XDOUBLE) {
|
||||
if ((mode & BLAS_PREC) == BLAS_XDOUBLE){
|
||||
/* COMPLEX / Extended Double */
|
||||
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble,
|
||||
xdouble *, BLASLONG, xdouble *, BLASLONG,
|
||||
|
@ -170,7 +166,7 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
args -> c, args -> ldc, sb);
|
||||
} else
|
||||
#endif
|
||||
if ((mode & BLAS_PREC) == BLAS_DOUBLE) {
|
||||
if ((mode & BLAS_PREC) == BLAS_DOUBLE){
|
||||
/* COMPLEX / Double */
|
||||
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double,
|
||||
double *, BLASLONG, double *, BLASLONG,
|
||||
|
@ -200,78 +196,88 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This is a main routine of threads. Each thread waits until job is queued.
|
||||
//
|
||||
static DWORD WINAPI blas_thread_server(void *arg) {
|
||||
/* This is a main routine of threads. Each thread waits until job is */
|
||||
/* queued. */
|
||||
|
||||
static DWORD WINAPI blas_thread_server(void *arg){
|
||||
|
||||
/* Thread identifier */
|
||||
#ifdef SMP_DEBUG
|
||||
BLASLONG cpu = (BLASLONG)arg;
|
||||
#endif
|
||||
|
||||
void *buffer, *sa, *sb;
|
||||
blas_queue_t *queue;
|
||||
DWORD action;
|
||||
HANDLE handles[] = {pool.filled, pool.killed};
|
||||
|
||||
/* Each server needs each buffer */
|
||||
buffer = blas_memory_alloc(2);
|
||||
|
||||
MT_TRACE("Server[%2ld] Thread is started!\n", cpu);
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Thread is started!\n", cpu);
|
||||
#endif
|
||||
|
||||
while (1) {
|
||||
while (1){
|
||||
|
||||
/* Waiting for Queue */
|
||||
|
||||
MT_TRACE("Server[%2ld] Waiting for Queue.\n", cpu);
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Waiting for Queue.\n", cpu);
|
||||
#endif
|
||||
|
||||
// event raised when work is added to the queue
|
||||
WaitForSingleObject(kickoff_event, INFINITE);
|
||||
do {
|
||||
action = WaitForMultipleObjects(2, handles, FALSE, INFINITE);
|
||||
} while ((action != WAIT_OBJECT_0) && (action != WAIT_OBJECT_0 + 1));
|
||||
|
||||
if (cpu > thread_target - 2) {
|
||||
//MT_TRACE("thread [%d] exiting.\n", cpu);
|
||||
break; // excess thread, so worker thread exits
|
||||
}
|
||||
if (action == WAIT_OBJECT_0 + 1) break;
|
||||
|
||||
MT_TRACE("Server[%2ld] Got it.\n", cpu);
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Got it.\n", cpu);
|
||||
#endif
|
||||
|
||||
EnterCriticalSection(&queue_lock);
|
||||
EnterCriticalSection(&pool.lock);
|
||||
|
||||
queue = work_queue;
|
||||
if (queue)
|
||||
work_queue = work_queue->next;
|
||||
queue = pool.queue;
|
||||
if (queue) pool.queue = queue->next;
|
||||
|
||||
LeaveCriticalSection(&queue_lock);
|
||||
LeaveCriticalSection(&pool.lock);
|
||||
|
||||
if (queue) {
|
||||
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
|
||||
|
||||
if (pool.queue) SetEvent(pool.filled);
|
||||
|
||||
sa = queue -> sa;
|
||||
sb = queue -> sb;
|
||||
|
||||
#ifdef CONSISTENT_FPCSR
|
||||
#ifdef CONSISTENT_FPCSR
|
||||
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
|
||||
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
|
||||
#endif
|
||||
#endif
|
||||
|
||||
MT_TRACE("Server[%2ld] Started. Mode = 0x%03x M = %3ld N=%3ld K=%3ld\n",
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Started. Mode = 0x%03x M = %3ld N=%3ld K=%3ld\n",
|
||||
cpu, queue->mode, queue-> args ->m, queue->args->n, queue->args->k);
|
||||
#endif
|
||||
|
||||
// fprintf(stderr, "queue start[%ld]!!!\n", cpu);
|
||||
|
||||
#ifdef MONITOR
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_RUNNING1;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (sa == NULL)
|
||||
sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
|
||||
if (sa == NULL) sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
|
||||
|
||||
if (sb == NULL) {
|
||||
if (!(queue -> mode & BLAS_COMPLEX)) {
|
||||
if (!(queue -> mode & BLAS_COMPLEX)){
|
||||
#ifdef EXPRECISION
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE) {
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
|
||||
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * sizeof(xdouble)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
} else
|
||||
#endif
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE) {
|
||||
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){
|
||||
#ifdef BUILD_DOUBLE
|
||||
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
|
||||
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
|
@ -308,56 +314,67 @@ static DWORD WINAPI blas_thread_server(void *arg) {
|
|||
queue->sb=sb;
|
||||
}
|
||||
|
||||
#ifdef MONITOR
|
||||
#ifdef MONITOR
|
||||
main_status[cpu] = MAIN_RUNNING2;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (!(queue -> mode & BLAS_LEGACY)) {
|
||||
|
||||
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
|
||||
} else {
|
||||
legacy_exec(routine, queue -> mode, queue -> args, sb);
|
||||
}
|
||||
} else {
|
||||
}else{
|
||||
continue; //if queue == NULL
|
||||
}
|
||||
|
||||
MT_TRACE("Server[%2ld] Finished!\n", cpu);
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Finished!\n", cpu);
|
||||
#endif
|
||||
|
||||
queue->finished = 1;
|
||||
EnterCriticalSection(&queue->lock);
|
||||
|
||||
queue -> status = BLAS_STATUS_FINISHED;
|
||||
|
||||
LeaveCriticalSection(&queue->lock);
|
||||
|
||||
SetEvent(queue->finish);
|
||||
}
|
||||
|
||||
/* Shutdown procedure */
|
||||
|
||||
MT_TRACE("Server[%2ld] Shutdown!\n", cpu);
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Server[%2ld] Shutdown!\n", cpu);
|
||||
#endif
|
||||
|
||||
blas_memory_free(buffer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Initializing routine
|
||||
//
|
||||
int blas_thread_init(void) {
|
||||
/* Initializing routine */
|
||||
int blas_thread_init(void){
|
||||
BLASLONG i;
|
||||
|
||||
if (blas_server_avail || (blas_cpu_number <= 1)) return 0;
|
||||
|
||||
LOCK_COMMAND(&server_lock);
|
||||
|
||||
MT_TRACE("Initializing Thread(Num. threads = %d)\n", blas_cpu_number);
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Initializing Thread(Num. threads = %d)\n",
|
||||
blas_cpu_number);
|
||||
#endif
|
||||
|
||||
if (!blas_server_avail) {
|
||||
// create the kickoff Event
|
||||
kickoff_event = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
if (!blas_server_avail){
|
||||
|
||||
thread_target = blas_cpu_number;
|
||||
InitializeCriticalSection(&pool.lock);
|
||||
pool.filled = CreateEvent(NULL, FALSE, FALSE, NULL);
|
||||
pool.killed = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
|
||||
InitializeCriticalSection(&queue_lock);
|
||||
|
||||
for(i = 0; i < blas_cpu_number - 1; i++) {
|
||||
//MT_TRACE("thread_init: creating thread [%d]\n", i);
|
||||
pool.shutdown = 0;
|
||||
pool.queue = NULL;
|
||||
|
||||
for(i = 0; i < blas_cpu_number - 1; i++){
|
||||
blas_threads[i] = CreateThread(NULL, 0,
|
||||
blas_thread_server, (void *)i,
|
||||
0, &blas_threads_id[i]);
|
||||
|
@ -371,12 +388,15 @@ int blas_thread_init(void) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// User can call one of two routines.
|
||||
// exec_blas_async ... immediately returns after jobs are queued.
|
||||
// exec_blas ... returns after jobs are finished.
|
||||
//
|
||||
int exec_blas_async(BLASLONG pos, blas_queue_t *queue) {
|
||||
/*
|
||||
User can call one of two routines.
|
||||
|
||||
exec_blas_async ... immediately returns after jobs are queued.
|
||||
|
||||
exec_blas ... returns after jobs are finished.
|
||||
*/
|
||||
|
||||
int exec_blas_async(BLASLONG pos, blas_queue_t *queue){
|
||||
|
||||
#if defined(SMP_SERVER)
|
||||
// Handle lazy re-init of the thread-pool after a POSIX fork
|
||||
|
@ -389,6 +409,8 @@ int exec_blas_async(BLASLONG pos, blas_queue_t *queue) {
|
|||
current = queue;
|
||||
|
||||
while (current) {
|
||||
InitializeCriticalSection(¤t -> lock);
|
||||
current -> finish = CreateEvent(NULL, FALSE, FALSE, NULL);
|
||||
current -> position = pos;
|
||||
|
||||
#ifdef CONSISTENT_FPCSR
|
||||
|
@ -396,71 +418,56 @@ int exec_blas_async(BLASLONG pos, blas_queue_t *queue) {
|
|||
__asm__ __volatile__ ("stmxcsr %0" : "=m" (current -> sse_mode));
|
||||
#endif
|
||||
|
||||
current->finished = 0;
|
||||
current = current -> next;
|
||||
pos ++;
|
||||
}
|
||||
|
||||
EnterCriticalSection(&queue_lock);
|
||||
EnterCriticalSection(&pool.lock);
|
||||
|
||||
if (!work_queue)
|
||||
{
|
||||
work_queue = queue;
|
||||
}
|
||||
else
|
||||
{
|
||||
blas_queue_t *queue_item = work_queue;
|
||||
|
||||
// find the end of the work queue
|
||||
while (queue_item->next)
|
||||
queue_item = queue_item->next;
|
||||
|
||||
// add new work to the end
|
||||
queue_item->next = queue;
|
||||
if (pool.queue) {
|
||||
current = pool.queue;
|
||||
while (current -> next) current = current -> next;
|
||||
current -> next = queue;
|
||||
} else {
|
||||
pool.queue = queue;
|
||||
}
|
||||
|
||||
LeaveCriticalSection(&queue_lock);
|
||||
LeaveCriticalSection(&pool.lock);
|
||||
|
||||
SetEvent(kickoff_event);
|
||||
SetEvent(pool.filled);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Join. Wait for all queued tasks to complete
|
||||
//
|
||||
int exec_blas_async_wait(BLASLONG num, blas_queue_t *queue) {
|
||||
int exec_blas_async_wait(BLASLONG num, blas_queue_t *queue){
|
||||
|
||||
MT_TRACE("Synchronization Waiting.\n");
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Synchronization Waiting.\n");
|
||||
#endif
|
||||
|
||||
while (num) {
|
||||
MT_TRACE("Waiting Queue ..\n");
|
||||
while (num){
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Waiting Queue ..\n");
|
||||
#endif
|
||||
|
||||
while (!queue->finished)
|
||||
YIELDING;
|
||||
WaitForSingleObject(queue->finish, INFINITE);
|
||||
|
||||
queue = queue->next;
|
||||
num--;
|
||||
CloseHandle(queue->finish);
|
||||
DeleteCriticalSection(&queue -> lock);
|
||||
|
||||
queue = queue -> next;
|
||||
num --;
|
||||
}
|
||||
|
||||
MT_TRACE("Completely Done.\n\n");
|
||||
|
||||
// if work was added to the queue after this batch we can't sleep the worker threads
|
||||
// by resetting the event
|
||||
EnterCriticalSection(&queue_lock);
|
||||
|
||||
if (work_queue == NULL)
|
||||
ResetEvent(kickoff_event);
|
||||
|
||||
LeaveCriticalSection(&queue_lock);
|
||||
#ifdef SMP_DEBUG
|
||||
fprintf(STDERR, "Completely Done.\n\n");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Execute Threads
|
||||
//
|
||||
int exec_blas(BLASLONG num, blas_queue_t *queue) {
|
||||
/* Execute Threads */
|
||||
int exec_blas(BLASLONG num, blas_queue_t *queue){
|
||||
|
||||
#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT)
|
||||
// Handle lazy re-init of the thread-pool after a POSIX fork
|
||||
|
@ -473,33 +480,29 @@ int exec_blas(BLASLONG num, blas_queue_t *queue) {
|
|||
|
||||
if ((num <= 0) || (queue == NULL)) return 0;
|
||||
|
||||
if ((num > 1) && queue -> next)
|
||||
exec_blas_async(1, queue -> next);
|
||||
if ((num > 1) && queue -> next) exec_blas_async(1, queue -> next);
|
||||
|
||||
routine = queue -> routine;
|
||||
|
||||
if (queue -> mode & BLAS_LEGACY) {
|
||||
legacy_exec(routine, queue -> mode, queue -> args, queue -> sb);
|
||||
} else {
|
||||
} else
|
||||
if (queue -> mode & BLAS_PTHREAD) {
|
||||
void (*pthreadcompat)(void *) = queue -> routine;
|
||||
(pthreadcompat)(queue -> args);
|
||||
} else
|
||||
(routine)(queue -> args, queue -> range_m, queue -> range_n,
|
||||
queue -> sa, queue -> sb, 0);
|
||||
}
|
||||
|
||||
if ((num > 1) && queue -> next)
|
||||
exec_blas_async_wait(num - 1, queue -> next);
|
||||
if ((num > 1) && queue -> next) exec_blas_async_wait(num - 1, queue -> next);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Shutdown procedure, but user don't have to call this routine. The
|
||||
// kernel automatically kill threads.
|
||||
//
|
||||
int BLASFUNC(blas_thread_shutdown)(void) {
|
||||
/* Shutdown procedure, but user don't have to call this routine. The */
|
||||
/* kernel automatically kill threads. */
|
||||
|
||||
int BLASFUNC(blas_thread_shutdown)(void){
|
||||
|
||||
int i;
|
||||
|
||||
|
@ -507,9 +510,11 @@ int BLASFUNC(blas_thread_shutdown)(void) {
|
|||
|
||||
LOCK_COMMAND(&server_lock);
|
||||
|
||||
if (blas_server_avail) {
|
||||
if (blas_server_avail){
|
||||
|
||||
for (i = 0; i < blas_num_threads - 1; i++) {
|
||||
SetEvent(pool.killed);
|
||||
|
||||
for(i = 0; i < blas_num_threads - 1; i++){
|
||||
// Could also just use WaitForMultipleObjects
|
||||
DWORD wait_thread_value = WaitForSingleObject(blas_threads[i], 50);
|
||||
|
||||
|
@ -523,6 +528,9 @@ int BLASFUNC(blas_thread_shutdown)(void) {
|
|||
CloseHandle(blas_threads[i]);
|
||||
}
|
||||
|
||||
CloseHandle(pool.filled);
|
||||
CloseHandle(pool.killed);
|
||||
|
||||
blas_server_avail = 0;
|
||||
}
|
||||
|
||||
|
@ -531,9 +539,6 @@ int BLASFUNC(blas_thread_shutdown)(void) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// Legacy function to set numbef of threads
|
||||
//
|
||||
void goto_set_num_threads(int num_threads)
|
||||
{
|
||||
long i;
|
||||
|
@ -547,48 +552,23 @@ void goto_set_num_threads(int num_threads)
|
|||
|
||||
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
|
||||
|
||||
if (blas_server_avail && num_threads < blas_num_threads) {
|
||||
LOCK_COMMAND(&server_lock);
|
||||
|
||||
thread_target = num_threads;
|
||||
|
||||
SetEvent(kickoff_event);
|
||||
|
||||
for (i = num_threads - 1; i < blas_num_threads - 1; i++) {
|
||||
//MT_TRACE("set_num_threads: waiting on thread [%d] to quit.\n", i);
|
||||
|
||||
WaitForSingleObject(blas_threads[i], INFINITE);
|
||||
|
||||
//MT_TRACE("set_num_threads: thread [%d] has quit.\n", i);
|
||||
|
||||
CloseHandle(blas_threads[i]);
|
||||
}
|
||||
|
||||
blas_num_threads = num_threads;
|
||||
|
||||
ResetEvent(kickoff_event);
|
||||
|
||||
UNLOCK_COMMAND(&server_lock);
|
||||
}
|
||||
|
||||
if (num_threads > blas_num_threads) {
|
||||
|
||||
LOCK_COMMAND(&server_lock);
|
||||
|
||||
thread_target = num_threads;
|
||||
|
||||
//increased_threads = 1;
|
||||
if (!blas_server_avail) {
|
||||
// create the kickoff Event
|
||||
kickoff_event = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
if (!blas_server_avail){
|
||||
|
||||
InitializeCriticalSection(&queue_lock);
|
||||
InitializeCriticalSection(&pool.lock);
|
||||
pool.filled = CreateEvent(NULL, FALSE, FALSE, NULL);
|
||||
pool.killed = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
|
||||
pool.shutdown = 0;
|
||||
pool.queue = NULL;
|
||||
blas_server_avail = 1;
|
||||
}
|
||||
|
||||
for (i = (blas_num_threads > 0) ? blas_num_threads - 1 : 0; i < num_threads - 1; i++) {
|
||||
//MT_TRACE("set_num_threads: creating thread [%d]\n", i);
|
||||
for(i = (blas_num_threads > 0) ? blas_num_threads - 1 : 0; i < num_threads - 1; i++){
|
||||
|
||||
blas_threads[i] = CreateThread(NULL, 0,
|
||||
blas_thread_server, (void *)i,
|
||||
|
@ -603,9 +583,6 @@ void goto_set_num_threads(int num_threads)
|
|||
blas_cpu_number = num_threads;
|
||||
}
|
||||
|
||||
//
|
||||
// Openblas function to set thread count
|
||||
//
|
||||
void openblas_set_num_threads(int num)
|
||||
{
|
||||
goto_set_num_threads(num);
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*****************************************************************************
|
||||
Copyright (c) 2024, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
**********************************************************************************/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __riscv_v_intrinsic
|
||||
#include <riscv_vector.h>
|
||||
#endif
|
||||
|
||||
unsigned detect_riscv64_get_vlenb(void) {
|
||||
#ifdef __riscv_v_intrinsic
|
||||
return __riscv_vlenb();
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Based on the approach taken here:
|
||||
* https://code.videolan.org/videolan/dav1d/-/merge_requests/1629
|
||||
*
|
||||
* Only to be called after we've determined we have some sort of
|
||||
* RVV support.
|
||||
*/
|
||||
|
||||
uint64_t detect_riscv64_rvv100(void)
|
||||
{
|
||||
uint64_t rvv10_supported;
|
||||
|
||||
/*
|
||||
* After the vsetvli statement vtype will either be a value > 0 if the
|
||||
* vsetvli succeeded or less than 0 if it failed. If 0 < vtype
|
||||
* we're good and the function will return 1, otherwise there's no
|
||||
* RVV 1.0 and we return 0.
|
||||
*/
|
||||
|
||||
asm volatile("vsetvli x0, x0, e8, m1, ta, ma\n\t"
|
||||
"csrr %0, vtype\n\t"
|
||||
"slt %0, x0, %0\n"
|
||||
: "=r" (rvv10_supported)
|
||||
:
|
||||
:);
|
||||
|
||||
return rvv10_supported;
|
||||
}
|
||||
|
|
@ -927,6 +927,7 @@ static gotoblas_t *get_coretype(void){
|
|||
case 0x7:
|
||||
switch (exmodel) {
|
||||
case 5:
|
||||
case 6:
|
||||
if (support_avx2())
|
||||
return &gotoblas_ZEN;
|
||||
else
|
||||
|
|
|
@ -120,6 +120,11 @@ extern gotoblas_t gotoblas_CORTEXA55;
|
|||
#else
|
||||
#define gotoblas_CORTEXA55 gotoblas_ARMV8
|
||||
#endif
|
||||
#ifdef DYN_A64FX
|
||||
extern gotoblas_t gotoblas_A64FX;
|
||||
#else
|
||||
#define gotoblas_A64FX gotoblas_ARMV8
|
||||
#endif
|
||||
#else
|
||||
extern gotoblas_t gotoblas_CORTEXA53;
|
||||
#define gotoblas_CORTEXA55 gotoblas_CORTEXA53
|
||||
|
@ -136,10 +141,12 @@ extern gotoblas_t gotoblas_NEOVERSEN1;
|
|||
extern gotoblas_t gotoblas_NEOVERSEV1;
|
||||
extern gotoblas_t gotoblas_NEOVERSEN2;
|
||||
extern gotoblas_t gotoblas_ARMV8SVE;
|
||||
extern gotoblas_t gotoblas_A64FX;
|
||||
#else
|
||||
#define gotoblas_NEOVERSEV1 gotoblas_ARMV8
|
||||
#define gotoblas_NEOVERSEN2 gotoblas_ARMV8
|
||||
#define gotoblas_ARMV8SVE gotoblas_ARMV8
|
||||
#define gotoblas_A64FX gotoblas_ARMV8
|
||||
#endif
|
||||
extern gotoblas_t gotoblas_THUNDERX3T110;
|
||||
#endif
|
||||
|
@ -149,7 +156,7 @@ extern void openblas_warning(int verbose, const char * msg);
|
|||
#define FALLBACK_VERBOSE 1
|
||||
#define NEOVERSEN1_FALLBACK "OpenBLAS : Your OS does not support SVE instructions. OpenBLAS is using Neoverse N1 kernels as a fallback, which may give poorer performance.\n"
|
||||
|
||||
#define NUM_CORETYPES 17
|
||||
#define NUM_CORETYPES 18
|
||||
|
||||
/*
|
||||
* In case asm/hwcap.h is outdated on the build system, make sure
|
||||
|
@ -184,6 +191,7 @@ static char *corename[] = {
|
|||
"thunderx3t110",
|
||||
"cortexa55",
|
||||
"armv8sve",
|
||||
"a64fx",
|
||||
"unknown"
|
||||
};
|
||||
|
||||
|
@ -205,6 +213,7 @@ char *gotoblas_corename(void) {
|
|||
if (gotoblas == &gotoblas_THUNDERX3T110) return corename[14];
|
||||
if (gotoblas == &gotoblas_CORTEXA55) return corename[15];
|
||||
if (gotoblas == &gotoblas_ARMV8SVE) return corename[16];
|
||||
if (gotoblas == &gotoblas_A64FX) return corename[17];
|
||||
return corename[NUM_CORETYPES];
|
||||
}
|
||||
|
||||
|
@ -241,6 +250,7 @@ static gotoblas_t *force_coretype(char *coretype) {
|
|||
case 14: return (&gotoblas_THUNDERX3T110);
|
||||
case 15: return (&gotoblas_CORTEXA55);
|
||||
case 16: return (&gotoblas_ARMV8SVE);
|
||||
case 17: return (&gotoblas_A64FX);
|
||||
}
|
||||
snprintf(message, 128, "Core not found: %s\n", coretype);
|
||||
openblas_warning(1, message);
|
||||
|
@ -346,6 +356,15 @@ static gotoblas_t *get_coretype(void) {
|
|||
return &gotoblas_THUNDERX3T110;
|
||||
}
|
||||
break;
|
||||
case 0x46: // Fujitsu
|
||||
switch (part)
|
||||
{
|
||||
#ifndef NO_SVE
|
||||
case 0x001: // A64FX
|
||||
return &gotoblas_A64FX;
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case 0x48: // HiSilicon
|
||||
switch (part)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,269 @@
|
|||
/*****************************************************************************
|
||||
Copyright (c) 2024, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
**********************************************************************************/
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
/*
|
||||
* OpenBLAS contains some kernels that are optimised for RVV 1.0. Before we
|
||||
* can use these kernels we need to determine whether the device supports
|
||||
* RVV 1.0 and what the device's VLEN is. Our strategy will be as follows.
|
||||
*
|
||||
* First we'll invoke the hwprobe syscall to detect RVV 1.0. In an ideal world,
|
||||
* this is all we should need to do. If the syscall is not implemented we
|
||||
* should be able to deduce that RVV 1.0 is not supported (as it was added to
|
||||
* Linux after hwprobe) and if the syscall is implemented we can use it to
|
||||
* determine whether RVV 1.0 is supported. However, there are some riscv64
|
||||
* boards out there that implement RVV 1.0 but ship with a Linux kernel that
|
||||
* predates RVV vector support and hwprobe support. These kernels contain
|
||||
* the backported RVV patches but not the hwprobe patches and so they
|
||||
* advertise support for RVV via hwcap. To cater for these boards we need
|
||||
* to fall back to hwcap if hwprobe is not supported. Unfortunately, some
|
||||
* boards indicate support for RVV via hwcap even though they only support
|
||||
* RVV 0.7.1, which is incompatible with RVV 1.0. So an additional check is
|
||||
* required to test if the devices advertising support for RVV via hwcap really
|
||||
* support RVV 1.0. This test works by executing a vsetvli instruction that
|
||||
* sets the tail agnostic and mask agnostic bits in the vtype register.
|
||||
* These bits are not supported prior to RVV 0.9 so will cause the VIL bit to
|
||||
* be set on the VTYPE register in CPUs supporting 0.7.1. If this bit is set
|
||||
* we can determine that RVV 1.0 is not supported.
|
||||
*
|
||||
* This approach is borrowed from
|
||||
* VideoLan dav1d:
|
||||
* (https://code.videolan.org/videolan/dav1d/-/merge_requests/1629).
|
||||
*
|
||||
* We assume that if a kernel reports the presence of RVV via hwcap that
|
||||
* the device supports the vsetvli instruction.
|
||||
*
|
||||
* For now we're just going to invoke the hwprobe syscall directly, rather than
|
||||
* invoking it through glibc. Support for hwprobe has been added to glibc but
|
||||
* at the time of writing this support has not yet been included in a glibc
|
||||
* release. Once it has, it will be better to invoke hwprobe via glibc as doing
|
||||
* so should take advantage of the vdso entry and be more efficient.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This should work on Android as well but I have no way of testing.
|
||||
*/
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/auxv.h>
|
||||
|
||||
#define DETECT_RISCV64_HWCAP_ISA_V (1 << ('V' - 'A'))
|
||||
|
||||
struct riscv_hwprobe {
|
||||
int64_t key;
|
||||
uint64_t value;
|
||||
};
|
||||
|
||||
/* The constants below are copied from
|
||||
* /usr/include/riscv64-linux-gnu/asm/hwprobe.h. We duplicate the
|
||||
* constants as the header file from which they are copied will only
|
||||
* be present if we're building on a device with Linux 6.5 or greater.
|
||||
*/
|
||||
|
||||
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
|
||||
#define RISCV_HWPROBE_IMA_V (1 << 2)
|
||||
|
||||
#ifndef NR_riscv_hwprobe
|
||||
#ifndef NR_arch_specific_syscall
|
||||
#define NR_arch_specific_syscall 244
|
||||
#endif
|
||||
#define NR_riscv_hwprobe (NR_arch_specific_syscall + 14)
|
||||
#endif
|
||||
#endif // defined(OS_LINUX)
|
||||
|
||||
unsigned detect_riscv64_get_vlenb(void);
|
||||
uint64_t detect_riscv64_rvv100(void);
|
||||
|
||||
extern gotoblas_t gotoblas_RISCV64_GENERIC;
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B)
|
||||
extern gotoblas_t gotoblas_RISCV64_ZVL256B;
|
||||
#endif
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B)
|
||||
extern gotoblas_t gotoblas_RISCV64_ZVL128B;
|
||||
#endif
|
||||
|
||||
#define CPU_GENERIC 0
|
||||
#define CPU_RISCV64_ZVL256B 1
|
||||
#define CPU_RISCV64_ZVL128B 2
|
||||
|
||||
static char *cpuname[] = {
|
||||
"riscv64_generic",
|
||||
"riscv64_zvl256b",
|
||||
"riscv64_zvl128b"
|
||||
};
|
||||
#define NUM_CORETYPES (sizeof(cpuname)/sizeof(char*))
|
||||
|
||||
extern int openblas_verbose(void);
|
||||
extern void openblas_warning(int verbose, const char* msg);
|
||||
|
||||
char* gotoblas_corename(void) {
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B)
|
||||
if (gotoblas == &gotoblas_RISCV64_ZVL256B)
|
||||
return cpuname[CPU_RISCV64_ZVL256B];
|
||||
#endif
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B)
|
||||
if (gotoblas == &gotoblas_RISCV64_ZVL128B)
|
||||
return cpuname[CPU_RISCV64_ZVL128B];
|
||||
#endif
|
||||
if (gotoblas == &gotoblas_RISCV64_GENERIC)
|
||||
return cpuname[CPU_GENERIC];
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static gotoblas_t* get_coretype(void) {
|
||||
unsigned vlenb = 0;
|
||||
|
||||
#if !defined(OS_LINUX)
|
||||
return NULL;
|
||||
#else
|
||||
|
||||
/*
|
||||
* See the hwprobe documentation
|
||||
*
|
||||
* ( https://docs.kernel.org/arch/riscv/hwprobe.html )
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
struct riscv_hwprobe pairs[] = {
|
||||
{ .key = RISCV_HWPROBE_KEY_IMA_EXT_0, },
|
||||
};
|
||||
int ret = syscall(NR_riscv_hwprobe, pairs, 1, 0, NULL, 0);
|
||||
if (ret == 0) {
|
||||
if (!(pairs[0].value & RISCV_HWPROBE_IMA_V))
|
||||
return NULL;
|
||||
} else {
|
||||
if (!(getauxval(AT_HWCAP) & DETECT_RISCV64_HWCAP_ISA_V))
|
||||
return NULL;
|
||||
|
||||
if (!detect_riscv64_rvv100())
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* RVV 1.0 is supported. We now just need to determine the coretype
|
||||
* based on the VLEN.
|
||||
*/
|
||||
|
||||
vlenb = detect_riscv64_get_vlenb();
|
||||
|
||||
if (vlenb < 16)
|
||||
return NULL;
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B)
|
||||
if (vlenb >= 32)
|
||||
return &gotoblas_RISCV64_ZVL256B;
|
||||
#endif
|
||||
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B)
|
||||
return &gotoblas_RISCV64_ZVL128B;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
|
||||
#endif // !defined(OS_LINUX)
|
||||
}
|
||||
|
||||
static gotoblas_t* force_coretype(char* coretype) {
|
||||
size_t i;
|
||||
char message[128];
|
||||
|
||||
for (i = 0; i < NUM_CORETYPES && strcasecmp(coretype, cpuname[i]); i++);
|
||||
|
||||
if (i == CPU_GENERIC)
|
||||
return &gotoblas_RISCV64_GENERIC;
|
||||
|
||||
if (i == CPU_RISCV64_ZVL256B) {
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL256B)
|
||||
return &gotoblas_RISCV64_ZVL256B;
|
||||
#else
|
||||
openblas_warning(1,
|
||||
"riscv64_zvl256b support not compiled in\n");
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (i == CPU_RISCV64_ZVL128B) {
|
||||
#if !defined(DYNAMIC_LIST) || defined(DYN_RISCV64_ZVL128B)
|
||||
return &gotoblas_RISCV64_ZVL128B;
|
||||
#else
|
||||
openblas_warning(1,
|
||||
"riscv64_zvl128b support not compiled in\n");
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
snprintf(message, sizeof(message), "Core not found: %s\n", coretype);
|
||||
openblas_warning(1, message);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void gotoblas_dynamic_init(void) {
|
||||
|
||||
char coremsg[128];
|
||||
char* p;
|
||||
|
||||
if (gotoblas) return;
|
||||
|
||||
p = getenv("OPENBLAS_CORETYPE");
|
||||
if (p)
|
||||
gotoblas = force_coretype(p);
|
||||
else
|
||||
gotoblas = get_coretype();
|
||||
|
||||
if (!gotoblas) {
|
||||
snprintf(coremsg, sizeof(coremsg), "Falling back to generic riscv64 core\n");
|
||||
openblas_warning(1, coremsg);
|
||||
gotoblas = &gotoblas_RISCV64_GENERIC;
|
||||
}
|
||||
|
||||
if (gotoblas->init) {
|
||||
snprintf(coremsg, sizeof(coremsg), "Core: %s\n",
|
||||
gotoblas_corename());
|
||||
openblas_warning(2, coremsg);
|
||||
gotoblas->init();
|
||||
return;
|
||||
}
|
||||
|
||||
openblas_warning(0, "OpenBLAS : Architecture Initialization failed. No initialization function found.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void gotoblas_dynamic_quit(void) {
|
||||
gotoblas = NULL;
|
||||
}
|
|
@ -964,7 +964,9 @@ static void *alloc_shm(void *address){
|
|||
return map_address;
|
||||
}
|
||||
|
||||
#if defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS
|
||||
#endif
|
||||
|
||||
#if ((defined ALLOC_HUGETLB) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS))
|
||||
|
||||
static void alloc_hugetlb_free(struct alloc_t *alloc_info){
|
||||
|
||||
|
@ -1066,7 +1068,8 @@ static void *alloc_hugetlb(void *address){
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#ifdef ALLOC_HUGETLBFILE
|
||||
|
||||
|
@ -1165,11 +1168,10 @@ void *blas_memory_alloc(int procpos){
|
|||
#ifdef ALLOC_DEVICEDRIVER
|
||||
alloc_devicedirver,
|
||||
#endif
|
||||
/* Hugetlb implicitly assumes ALLOC_SHM */
|
||||
#ifdef ALLOC_SHM
|
||||
#ifdef ALLOC_SHM && !defined(ALLOC_HUGETLB)
|
||||
alloc_shm,
|
||||
#endif
|
||||
#if ((defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS))
|
||||
#if ((defined ALLOC_HUGETLB) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS))
|
||||
alloc_hugetlb,
|
||||
#endif
|
||||
#ifdef ALLOC_MMAP
|
||||
|
@ -1190,7 +1192,6 @@ void *blas_memory_alloc(int procpos){
|
|||
struct alloc_t * alloc_info;
|
||||
struct alloc_t ** alloc_table;
|
||||
|
||||
|
||||
#if defined(SMP) && !defined(USE_OPENMP)
|
||||
int mi;
|
||||
LOCK_COMMAND(&alloc_lock);
|
||||
|
@ -1219,7 +1220,7 @@ UNLOCK_COMMAND(&alloc_lock);
|
|||
if (!blas_num_threads) blas_cpu_number = blas_get_cpu_number();
|
||||
#endif
|
||||
|
||||
#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64)
|
||||
#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64) || defined(ARCH_LOONGARCH64)
|
||||
#ifndef DYNAMIC_ARCH
|
||||
blas_set_parameter();
|
||||
#endif
|
||||
|
@ -1282,7 +1283,7 @@ UNLOCK_COMMAND(&alloc_lock);
|
|||
}
|
||||
#endif
|
||||
|
||||
#if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
#if (defined ALLOC_HUGETLB) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1;
|
||||
#endif
|
||||
|
||||
|
@ -2494,7 +2495,7 @@ static void *alloc_devicedirver(void *address){
|
|||
|
||||
#endif
|
||||
|
||||
#ifdef ALLOC_SHM
|
||||
#if defined(ALLOC_SHM) && !defined(ALLOC_HUGETLB)
|
||||
|
||||
static void alloc_shm_free(struct release_t *release){
|
||||
|
||||
|
@ -2506,7 +2507,9 @@ static void alloc_shm_free(struct release_t *release){
|
|||
static void *alloc_shm(void *address){
|
||||
void *map_address;
|
||||
int shmid;
|
||||
|
||||
#ifdef DEBUG
|
||||
fprintf(stderr,"alloc_shm got called\n");
|
||||
#endif
|
||||
shmid = shmget(IPC_PRIVATE, BUFFER_SIZE,IPC_CREAT | 0600);
|
||||
|
||||
map_address = (void *)shmat(shmid, address, 0);
|
||||
|
@ -2533,6 +2536,7 @@ static void *alloc_shm(void *address){
|
|||
|
||||
return map_address;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS
|
||||
|
||||
|
@ -2562,6 +2566,10 @@ static void *alloc_hugetlb(void *address){
|
|||
|
||||
void *map_address = (void *)-1;
|
||||
|
||||
#ifdef DEBUG
|
||||
fprintf(stderr,"alloc_hugetlb got called\n");
|
||||
#endif
|
||||
|
||||
#if defined(OS_LINUX) || defined(OS_AIX)
|
||||
int shmid;
|
||||
|
||||
|
@ -2583,7 +2591,7 @@ static void *alloc_hugetlb(void *address){
|
|||
|
||||
if (map_address != (void *)-1){
|
||||
shmctl(shmid, IPC_RMID, 0);
|
||||
}
|
||||
}else printf("alloc_hugetlb failed\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2645,7 +2653,6 @@ static void *alloc_hugetlb(void *address){
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef ALLOC_HUGETLBFILE
|
||||
|
||||
|
@ -2739,7 +2746,7 @@ struct newmemstruct
|
|||
};
|
||||
static volatile struct newmemstruct *newmemory;
|
||||
|
||||
static int memory_initialized = 0;
|
||||
static volatile int memory_initialized = 0;
|
||||
static int memory_overflowed = 0;
|
||||
/* Memory allocation routine */
|
||||
/* procpos ... indicates where it comes from */
|
||||
|
@ -2762,11 +2769,10 @@ void *blas_memory_alloc(int procpos){
|
|||
#ifdef ALLOC_DEVICEDRIVER
|
||||
alloc_devicedirver,
|
||||
#endif
|
||||
/* Hugetlb implicitly assumes ALLOC_SHM */
|
||||
#ifdef ALLOC_SHM
|
||||
#if defined(ALLOC_SHM) && !defined(ALLOC_HUGETLB)
|
||||
alloc_shm,
|
||||
#endif
|
||||
#if ((defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS))
|
||||
#if ((defined ALLOC_HUGETLB) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS))
|
||||
alloc_hugetlb,
|
||||
#endif
|
||||
#ifdef ALLOC_MMAP
|
||||
|
@ -2785,14 +2791,12 @@ void *blas_memory_alloc(int procpos){
|
|||
};
|
||||
void *(**func)(void *address);
|
||||
|
||||
#if defined(USE_OPENMP)
|
||||
if (!memory_initialized) {
|
||||
#if defined(SMP) && !defined(USE_OPENMP)
|
||||
LOCK_COMMAND(&alloc_lock);
|
||||
if (!memory_initialized) {
|
||||
#endif
|
||||
|
||||
LOCK_COMMAND(&alloc_lock);
|
||||
|
||||
if (!memory_initialized) {
|
||||
|
||||
#if defined(WHEREAMI) && !defined(USE_OPENMP)
|
||||
for (position = 0; position < NUM_BUFFERS; position ++){
|
||||
memory[position].addr = (void *)0;
|
||||
|
@ -2814,19 +2818,19 @@ void *blas_memory_alloc(int procpos){
|
|||
if (!blas_num_threads) blas_cpu_number = blas_get_cpu_number();
|
||||
#endif
|
||||
|
||||
#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64)
|
||||
#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_IA64) || defined(ARCH_MIPS64) || defined(ARCH_ARM64) || defined(ARCH_LOONGARCH64)
|
||||
#ifndef DYNAMIC_ARCH
|
||||
blas_set_parameter();
|
||||
#endif
|
||||
#endif
|
||||
|
||||
memory_initialized = 1;
|
||||
|
||||
WMB;
|
||||
#if defined(SMP) && !defined(USE_OPENMP)
|
||||
}
|
||||
UNLOCK_COMMAND(&alloc_lock);
|
||||
#if defined(USE_OPENMP)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("Alloc Start ...\n");
|
||||
|
@ -2945,8 +2949,22 @@ void *blas_memory_alloc(int procpos){
|
|||
}
|
||||
#endif
|
||||
|
||||
#if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
#if (defined ALLOC_HUGETLB) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1;
|
||||
#ifdef DEBUG
|
||||
if (hugetlb_allocated) printf("allocating via shared memory with large page support (hugetlb)\n");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
#ifdef DEBUG
|
||||
printf("allocating via shared memory\n");
|
||||
#endif
|
||||
if ((*func == alloc_shm) && (map_address == (void *)-1)) {
|
||||
#ifndef OS_WINDOWS
|
||||
fprintf(stderr, "OpenBLAS Warning ... shared memory allocation was failed.\n");
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
func ++;
|
||||
|
@ -3061,10 +3079,23 @@ allocation2:
|
|||
}
|
||||
#endif
|
||||
|
||||
#if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
#if (defined ALLOC_HUGETLB) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
#ifdef DEBUG
|
||||
fprintf(stderr,"OpenBLAS: allocating via shared memory with large page support (hugetlb)\n");
|
||||
#endif
|
||||
if ((*func == alloc_hugetlb) && (map_address != (void *)-1)) hugetlb_allocated = 1;
|
||||
#endif
|
||||
|
||||
#if (defined ALLOC_SHM) && (defined OS_LINUX || defined OS_AIX || defined __sun__ || defined OS_WINDOWS)
|
||||
#ifdef DEBUG
|
||||
fprintf(stderr,"allocating via shared memory\n");
|
||||
#endif
|
||||
if ((*func == alloc_shm) && (map_address == (void *)-1)) {
|
||||
#ifndef OS_WINDOWS
|
||||
fprintf(stderr, "OpenBLAS Warning ... shared memory allocation was failed.\n");
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
func ++;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,9 @@ static char* openblas_config_str=""
|
|||
#ifdef USE_TLS
|
||||
"USE_TLS "
|
||||
#endif
|
||||
#ifdef USE_LOCKING
|
||||
"USE_LOCKING "
|
||||
#endif
|
||||
#ifndef DYNAMIC_ARCH
|
||||
CHAR_CORENAME
|
||||
#endif
|
||||
|
|
|
@ -739,6 +739,100 @@ void blas_set_parameter(void){
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(ARCH_LOONGARCH64)
|
||||
int get_L3_size() {
|
||||
int ret = 0, id = 0x14;
|
||||
__asm__ volatile (
|
||||
"cpucfg %[ret], %[id]"
|
||||
: [ret]"=r"(ret)
|
||||
: [id]"r"(id)
|
||||
: "memory"
|
||||
);
|
||||
return ((ret & 0xffff) + 1) * pow(2, ((ret >> 16) & 0xff)) * pow(2, ((ret >> 24) & 0x7f)) / 1024 / 1024; // MB
|
||||
}
|
||||
|
||||
void blas_set_parameter(void){
|
||||
#if defined(LOONGSON3R5)
|
||||
int L3_size = get_L3_size();
|
||||
#ifdef SMP
|
||||
if(blas_num_threads == 1){
|
||||
#endif
|
||||
//single thread
|
||||
if (L3_size == 32){ // 3C5000 and 3D5000
|
||||
sgemm_p = 256;
|
||||
sgemm_q = 384;
|
||||
sgemm_r = 8192;
|
||||
|
||||
dgemm_p = 112;
|
||||
dgemm_q = 289;
|
||||
dgemm_r = 4096;
|
||||
|
||||
cgemm_p = 128;
|
||||
cgemm_q = 256;
|
||||
cgemm_r = 4096;
|
||||
|
||||
zgemm_p = 128;
|
||||
zgemm_q = 128;
|
||||
zgemm_r = 2048;
|
||||
} else { // 3A5000 and 3C5000L
|
||||
sgemm_p = 256;
|
||||
sgemm_q = 384;
|
||||
sgemm_r = 4096;
|
||||
|
||||
dgemm_p = 112;
|
||||
dgemm_q = 300;
|
||||
dgemm_r = 3024;
|
||||
|
||||
cgemm_p = 128;
|
||||
cgemm_q = 256;
|
||||
cgemm_r = 2048;
|
||||
|
||||
zgemm_p = 128;
|
||||
zgemm_q = 128;
|
||||
zgemm_r = 1024;
|
||||
}
|
||||
#ifdef SMP
|
||||
}else{
|
||||
//multi thread
|
||||
if (L3_size == 32){ // 3C5000 and 3D5000
|
||||
sgemm_p = 256;
|
||||
sgemm_q = 384;
|
||||
sgemm_r = 1024;
|
||||
|
||||
dgemm_p = 112;
|
||||
dgemm_q = 289;
|
||||
dgemm_r = 342;
|
||||
|
||||
cgemm_p = 128;
|
||||
cgemm_q = 256;
|
||||
cgemm_r = 512;
|
||||
|
||||
zgemm_p = 128;
|
||||
zgemm_q = 128;
|
||||
zgemm_r = 512;
|
||||
} else { // 3A5000 and 3C5000L
|
||||
sgemm_p = 256;
|
||||
sgemm_q = 384;
|
||||
sgemm_r = 2048;
|
||||
|
||||
dgemm_p = 112;
|
||||
dgemm_q = 300;
|
||||
dgemm_r = 738;
|
||||
|
||||
cgemm_p = 128;
|
||||
cgemm_q = 256;
|
||||
cgemm_r = 1024;
|
||||
|
||||
zgemm_p = 128;
|
||||
zgemm_q = 128;
|
||||
zgemm_r = 1024;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(ARCH_ARM64)
|
||||
|
||||
void blas_set_parameter(void)
|
||||
|
|
|
@ -137,7 +137,7 @@ libgoto_hpl.def : $(GENSYM)
|
|||
|
||||
ifeq ($(OSNAME), Darwin)
|
||||
ifeq ($(FIXED_LIBNAME),1)
|
||||
INTERNALNAME = $(LIBPREFIX)$(LIBNAMESUFFIX).dylib
|
||||
INTERNALNAME = $(LIBPREFIX).dylib
|
||||
else
|
||||
INTERNALNAME = $(LIBPREFIX).$(MAJOR_VERSION).dylib
|
||||
endif
|
||||
|
@ -178,7 +178,7 @@ FEXTRALIB += -lm
|
|||
EXTRALIB += -lm
|
||||
else
|
||||
ifeq ($(FIXED_LIBNAME),1)
|
||||
INTERNALNAME = $(LIBPREFIX)$(LIBNAMESUFFIX).so
|
||||
INTERNALNAME = $(LIBPREFIX).so
|
||||
else
|
||||
INTERNALNAME = $(LIBPREFIX).so.$(MAJOR_VERSION)
|
||||
endif
|
||||
|
@ -315,11 +315,6 @@ test : linktest.c
|
|||
|
||||
linktest.c : $(GENSYM) ../Makefile.system ../getarch.c
|
||||
./$(GENSYM) linktest $(ARCH) "$(BU)" $(EXPRECISION) $(NO_CBLAS) $(NO_LAPACK) $(NO_LAPACKE) $(NEED2UNDERSCORES) $(ONLY_CBLAS) "$(SYMBOLPREFIX)" "$(SYMBOLSUFFIX)" $(BUILD_LAPACK_DEPRECATED) $(BUILD_BFLOAT16) $(BUILD_SINGLE) $(BUILD_DOUBLE) $(BUILD_COMPLEX) $(BUILD_COMPLEX16) > linktest.c
|
||||
ifeq ($(F_COMPILER), IBM)
|
||||
mv linktest.c linktest.c.FIRST
|
||||
egrep -v 'second_|dsecnd_' linktest.c.FIRST > linktest.c
|
||||
rm linktest.c.FIRST
|
||||
endif
|
||||
|
||||
clean ::
|
||||
@rm -f *.def *.dylib __.SYMDEF* *.renamed
|
||||
|
|
|
@ -60,7 +60,7 @@ cblasobjsc="
|
|||
cblas_ctbsv cblas_ctpmv cblas_ctpsv cblas_ctrmm cblas_ctrmv cblas_ctrsm cblas_ctrsv
|
||||
cblas_scnrm2 cblas_scasum cblas_cgemmt
|
||||
cblas_icamax cblas_icamin cblas_icmin cblas_icmax cblas_scsum cblas_cimatcopy cblas_comatcopy
|
||||
cblas_caxpyc cblas_crotg cblas_csrot cblas_scamax cblas_scamin
|
||||
cblas_caxpyc cblas_crotg cblas_csrot cblas_scamax cblas_scamin cblas_cgemm_batch
|
||||
"
|
||||
cblasobjsd="
|
||||
cblas_dasum cblas_daxpy cblas_dcopy cblas_ddot
|
||||
|
@ -70,7 +70,7 @@ cblasobjsd="
|
|||
cblas_dsyr2k cblas_dsyr cblas_dsyrk cblas_dtbmv cblas_dtbsv cblas_dtpmv cblas_dtpsv
|
||||
cblas_dtrmm cblas_dtrmv cblas_dtrsm cblas_dtrsv cblas_daxpby cblas_dgeadd cblas_dgemmt
|
||||
cblas_idamax cblas_idamin cblas_idmin cblas_idmax cblas_dsum cblas_dimatcopy cblas_domatcopy
|
||||
cblas_damax cblas_damin
|
||||
cblas_damax cblas_damin cblas_dgemm_batch
|
||||
"
|
||||
|
||||
cblasobjss="
|
||||
|
@ -82,7 +82,7 @@ cblasobjss="
|
|||
cblas_stbmv cblas_stbsv cblas_stpmv cblas_stpsv cblas_strmm cblas_strmv cblas_strsm
|
||||
cblas_strsv cblas_sgeadd cblas_sgemmt
|
||||
cblas_isamax cblas_isamin cblas_ismin cblas_ismax cblas_ssum cblas_simatcopy cblas_somatcopy
|
||||
cblas_samax cblas_samin
|
||||
cblas_samax cblas_samin cblas_sgemm_batch
|
||||
"
|
||||
|
||||
cblasobjsz="
|
||||
|
@ -94,12 +94,12 @@ cblasobjsz="
|
|||
cblas_ztrsv cblas_cdotc_sub cblas_cdotu_sub cblas_zdotc_sub cblas_zdotu_sub
|
||||
cblas_zaxpby cblas_zgeadd cblas_zgemmt
|
||||
cblas_izamax cblas_izamin cblas_izmin cblas_izmax cblas_dzsum cblas_zimatcopy cblas_zomatcopy
|
||||
cblas_zaxpyc cblas_zdrot cblas_zrotg cblas_dzamax cblas_dzamin
|
||||
cblas_zaxpyc cblas_zdrot cblas_zrotg cblas_dzamax cblas_dzamin cblas_zgemm_batch
|
||||
"
|
||||
|
||||
cblasobjs="cblas_xerbla"
|
||||
|
||||
bfcblasobjs="cblas_sbgemm cblas_sbgemv cblas_sbdot cblas_sbstobf16 cblas_sbdtobf16 cblas_sbf16tos cblas_dbf16tod"
|
||||
bfcblasobjs="cblas_sbgemm cblas_sbgemv cblas_sbdot cblas_sbstobf16 cblas_sbdtobf16 cblas_sbf16tos cblas_dbf16tod cblas_sbgemm_batch"
|
||||
|
||||
exblasobjs="
|
||||
qamax qamin qasum qaxpy qcabs1 qcopy qdot qgbmv qgemm
|
||||
|
|
2
f_check
2
f_check
|
@ -86,7 +86,7 @@ else
|
|||
vendor=CRAY
|
||||
openmp='-fopenmp'
|
||||
;;
|
||||
*Arm\ F90*)
|
||||
*Arm\ F90*|*F90\ Flang*)
|
||||
vendor=FLANG
|
||||
openmp='-fopenmp'
|
||||
;;
|
||||
|
|
|
@ -97,6 +97,9 @@ foreach (CBLAS_FLAG ${CBLAS_FLAGS})
|
|||
#sdsdot, dsdot
|
||||
if (BUILD_SINGLE OR BUILD_DOUBLE)
|
||||
GenerateNamedObjects("sdsdot.c" "" "sdsdot" ${CBLAS_FLAG} "" "" true "SINGLE")
|
||||
if(CBLAS_FLAG EQUAL 1)
|
||||
GenerateNamedObjects("gemm_batch.c" "" "gemm_batch" ${CBLAS_FLAG} "" "" false)
|
||||
endif ()
|
||||
endif ()
|
||||
if (BUILD_DOUBLE)
|
||||
GenerateNamedObjects("dsdot.c" "" "dsdot" ${CBLAS_FLAG} "" "" true "SINGLE")
|
||||
|
@ -125,13 +128,16 @@ if (BUILD_BFLOAT16)
|
|||
GenerateNamedObjects("tobf16.c" "DOUBLE_PREC" "sbdtobf16" ${CBLAS_FLAG} "" "" true "BFLOAT16")
|
||||
GenerateNamedObjects("bf16to.c" "SINGLE_PREC" "sbf16tos" ${CBLAS_FLAG} "" "" true "BFLOAT16")
|
||||
GenerateNamedObjects("bf16to.c" "DOUBLE_PREC" "dbf16tod" ${CBLAS_FLAG} "" "" true "BFLOAT16")
|
||||
if(CBLAS_FLAG EQUAL 1)
|
||||
GenerateNamedObjects("gemm_batch.c" "" "sbgemm_batch" ${CBLAS_FLAG} "" "" true "BFLOAT16")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
# complex-specific sources
|
||||
foreach (float_type ${FLOAT_TYPES})
|
||||
|
||||
if (${float_type} STREQUAL "COMPLEX" OR ${float_type} STREQUAL "ZCOMPLEX")
|
||||
GenerateNamedObjects("zaxpy.c" "" "axpyc" ${CBLAS_FLAG} "" "" false ${float_type})
|
||||
GenerateNamedObjects("zaxpy.c" "CONJ" "axpyc" ${CBLAS_FLAG} "" "" false ${float_type})
|
||||
|
||||
GenerateNamedObjects("zger.c" "" "geru" ${CBLAS_FLAG} "" "" false ${float_type})
|
||||
GenerateNamedObjects("zger.c" "CONJ" "gerc" ${CBLAS_FLAG} "" "" false ${float_type})
|
||||
|
@ -154,6 +160,9 @@ foreach (float_type ${FLOAT_TYPES})
|
|||
GenerateNamedObjects("max.c" "USE_ABS" "scamax" ${CBLAS_FLAG} "" "" true "COMPLEX")
|
||||
GenerateNamedObjects("asum.c" "" "scasum" ${CBLAS_FLAG} "" "" true "COMPLEX")
|
||||
GenerateNamedObjects("sum.c" "" "scsum" ${CBLAS_FLAG} "" "" true "COMPLEX")
|
||||
if(CBLAS_FLAG EQUAL 1)
|
||||
GenerateNamedObjects("gemm_batch.c" "" "cgemm_batch" ${CBLAS_FLAG} "" "" true "COMPLEX")
|
||||
endif ()
|
||||
endif ()
|
||||
if (${float_type} STREQUAL "ZCOMPLEX")
|
||||
GenerateNamedObjects("zscal.c" "SSCAL" "dscal" ${CBLAS_FLAG} "" "" false "ZCOMPLEX")
|
||||
|
@ -163,6 +172,9 @@ foreach (float_type ${FLOAT_TYPES})
|
|||
GenerateNamedObjects("max.c" "USE_ABS" "dzamax" ${CBLAS_FLAG} "" "" true "ZCOMPLEX")
|
||||
GenerateNamedObjects("asum.c" "" "dzasum" ${CBLAS_FLAG} "" "" true "ZCOMPLEX")
|
||||
GenerateNamedObjects("sum.c" "" "dzsum" ${CBLAS_FLAG} "" "" true "ZCOMPLEX")
|
||||
if(CBLAS_FLAG EQUAL 1)
|
||||
GenerateNamedObjects("gemm_batch.c" "" "zgemm_batch" ${CBLAS_FLAG} "" "" true "ZCOMPLEX")
|
||||
endif ()
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
|
@ -212,6 +224,7 @@ if ( BUILD_COMPLEX AND NOT BUILD_SINGLE)
|
|||
GenerateNamedObjects("nrm2.c" "" "nrm2" 0 "" "" false "SINGLE")
|
||||
GenerateNamedObjects("gemv.c" "" "gemv" 0 "" "" false "SINGLE")
|
||||
GenerateNamedObjects("gemm.c" "" "gemm" 0 "" "" false "SINGLE")
|
||||
GenerateNamedObjects("gemm_batch.c" "" "gemm_batch" 1 "" "" false "SINGLE")
|
||||
GenerateNamedObjects("asum.c" "" "asum" 0 "" "" false "SINGLE")
|
||||
GenerateNamedObjects("swap.c" "" "swap" 0 "" "" false "SINGLE")
|
||||
GenerateNamedObjects("axpy.c" "" "axpy" 0 "" "" false "SINGLE")
|
||||
|
@ -225,6 +238,7 @@ if ( BUILD_COMPLEX16 AND NOT BUILD_DOUBLE)
|
|||
GenerateNamedObjects("nrm2.c" "" "nrm2" 0 "" "" false "DOUBLE")
|
||||
GenerateNamedObjects("gemv.c" "" "gemv" 0 "" "" false "DOUBLE")
|
||||
GenerateNamedObjects("gemm.c" "" "gemm" 0 "" "" false "DOUBLE")
|
||||
GenerateNamedObjects("gemm_batch.c" "" "gemm_batch" 1 "" "" false "DOUBLE")
|
||||
GenerateNamedObjects("asum.c" "" "asum" 0 "" "" false "DOUBLE")
|
||||
GenerateNamedObjects("swap.c" "" "swap" 0 "" "" false "DOUBLE")
|
||||
GenerateNamedObjects("axpy.c" "" "axpy" 0 "" "" false "DOUBLE")
|
||||
|
|
|
@ -282,12 +282,12 @@ CSBLAS2OBJS = \
|
|||
CSBLAS3OBJS = \
|
||||
cblas_sgemm.$(SUFFIX) cblas_ssymm.$(SUFFIX) cblas_strmm.$(SUFFIX) cblas_strsm.$(SUFFIX) \
|
||||
cblas_ssyrk.$(SUFFIX) cblas_ssyr2k.$(SUFFIX) cblas_somatcopy.$(SUFFIX) cblas_simatcopy.$(SUFFIX)\
|
||||
cblas_sgeadd.$(SUFFIX) cblas_sgemmt.$(SUFFIX)
|
||||
cblas_sgeadd.$(SUFFIX) cblas_sgemmt.$(SUFFIX) cblas_sgemm_batch.$(SUFFIX)
|
||||
|
||||
ifeq ($(BUILD_BFLOAT16),1)
|
||||
CSBBLAS1OBJS = cblas_sbdot.$(SUFFIX)
|
||||
CSBBLAS2OBJS = cblas_sbgemv.$(SUFFIX)
|
||||
CSBBLAS3OBJS = cblas_sbgemm.$(SUFFIX) cblas_sbgemmt.$(SUFFIX)
|
||||
CSBBLAS3OBJS = cblas_sbgemm.$(SUFFIX) cblas_sbgemmt.$(SUFFIX) cblas_sbgemm_batch.$(SUFFIX)
|
||||
CSBEXTOBJS = cblas_sbstobf16.$(SUFFIX) cblas_sbdtobf16.$(SUFFIX) cblas_sbf16tos.$(SUFFIX) cblas_dbf16tod.$(SUFFIX)
|
||||
endif
|
||||
|
||||
|
@ -308,7 +308,7 @@ CDBLAS2OBJS = \
|
|||
CDBLAS3OBJS += \
|
||||
cblas_dgemm.$(SUFFIX) cblas_dsymm.$(SUFFIX) cblas_dtrmm.$(SUFFIX) cblas_dtrsm.$(SUFFIX) \
|
||||
cblas_dsyrk.$(SUFFIX) cblas_dsyr2k.$(SUFFIX) cblas_domatcopy.$(SUFFIX) cblas_dimatcopy.$(SUFFIX) \
|
||||
cblas_dgeadd.$(SUFFIX) cblas_dgemmt.$(SUFFIX)
|
||||
cblas_dgeadd.$(SUFFIX) cblas_dgemmt.$(SUFFIX) cblas_dgemm_batch.$(SUFFIX)
|
||||
|
||||
CCBLAS1OBJS = \
|
||||
cblas_icamax.$(SUFFIX) cblas_icamin.$(SUFFIX) cblas_scasum.$(SUFFIX) cblas_caxpy.$(SUFFIX) \
|
||||
|
@ -333,7 +333,7 @@ CCBLAS3OBJS = \
|
|||
cblas_csyrk.$(SUFFIX) cblas_csyr2k.$(SUFFIX) \
|
||||
cblas_chemm.$(SUFFIX) cblas_cherk.$(SUFFIX) cblas_cher2k.$(SUFFIX) \
|
||||
cblas_comatcopy.$(SUFFIX) cblas_cimatcopy.$(SUFFIX)\
|
||||
cblas_cgeadd.$(SUFFIX) cblas_cgemmt.$(SUFFIX)
|
||||
cblas_cgeadd.$(SUFFIX) cblas_cgemmt.$(SUFFIX) cblas_cgemm_batch.$(SUFFIX)
|
||||
|
||||
CXERBLAOBJ = \
|
||||
cblas_xerbla.$(SUFFIX)
|
||||
|
@ -364,7 +364,7 @@ CZBLAS3OBJS = \
|
|||
cblas_zsyrk.$(SUFFIX) cblas_zsyr2k.$(SUFFIX) \
|
||||
cblas_zhemm.$(SUFFIX) cblas_zherk.$(SUFFIX) cblas_zher2k.$(SUFFIX)\
|
||||
cblas_zomatcopy.$(SUFFIX) cblas_zimatcopy.$(SUFFIX) \
|
||||
cblas_zgeadd.$(SUFFIX) cblas_zgemmt.$(SUFFIX)
|
||||
cblas_zgeadd.$(SUFFIX) cblas_zgemmt.$(SUFFIX) cblas_zgemm_batch.$(SUFFIX)
|
||||
|
||||
|
||||
ifeq ($(SUPPORT_GEMM3M), 1)
|
||||
|
@ -2419,3 +2419,17 @@ cblas_zgeadd.$(SUFFIX) cblas_zgeadd.$(PSUFFIX) : zgeadd.c
|
|||
cblas_xerbla.$(SUFFIX) cblas_xerbla.$(PSUFFIX) : xerbla.c
|
||||
$(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F)
|
||||
|
||||
cblas_sbgemm_batch.$(SUFFIX) cblas_sbgemm_batch.$(PSUFFIX) : gemm_batch.c ../param.h
|
||||
$(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F)
|
||||
|
||||
cblas_sgemm_batch.$(SUFFIX) cblas_sgemm_batch.$(PSUFFIX) : gemm_batch.c ../param.h
|
||||
$(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F)
|
||||
|
||||
cblas_dgemm_batch.$(SUFFIX) cblas_dgemm_batch.$(PSUFFIX) : gemm_batch.c ../param.h
|
||||
$(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F)
|
||||
|
||||
cblas_cgemm_batch.$(SUFFIX) cblas_cgemm_batch.$(PSUFFIX) : gemm_batch.c ../param.h
|
||||
$(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F)
|
||||
|
||||
cblas_zgemm_batch.$(SUFFIX) cblas_zgemm_batch.$(PSUFFIX) : gemm_batch.c ../param.h
|
||||
$(CC) -c $(CFLAGS) -DCBLAS $< -o $(@F)
|
||||
|
|
|
@ -227,6 +227,9 @@ void CNAME(enum CBLAS_ORDER order,
|
|||
buffer = (FLOAT *)blas_memory_alloc(1);
|
||||
|
||||
#ifdef SMP
|
||||
if (m * n < 250000 || kl+ku < 15 )
|
||||
nthreads = 1;
|
||||
else
|
||||
nthreads = num_cpu_avail(2);
|
||||
|
||||
if (nthreads == 1) {
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*********************************************************************/
|
||||
/* Copyright 2024 The OpenBLAS Project */
|
||||
/* Copyright 2009, 2010 The University of Texas at Austin. */
|
||||
/* All rights reserved. */
|
||||
/* */
|
||||
|
@ -47,12 +48,16 @@
|
|||
#define SMP_THRESHOLD_MIN 65536.0
|
||||
#ifdef XDOUBLE
|
||||
#define ERROR_NAME "QGEMM "
|
||||
#define GEMV BLASFUNC(qgemv)
|
||||
#elif defined(DOUBLE)
|
||||
#define ERROR_NAME "DGEMM "
|
||||
#define GEMV BLASFUNC(dgemv)
|
||||
#elif defined(BFLOAT16)
|
||||
#define ERROR_NAME "SBGEMM "
|
||||
#define GEMV BLASFUNC(sbgemv)
|
||||
#else
|
||||
#define ERROR_NAME "SGEMM "
|
||||
#define GEMV BLASFUNC(sgemv)
|
||||
#endif
|
||||
#else
|
||||
#define SMP_THRESHOLD_MIN 8192.0
|
||||
|
@ -493,6 +498,52 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS
|
|||
args.m, args.n, args.k, args.lda, args.ldb, args.ldc);
|
||||
#endif
|
||||
|
||||
#if defined(GEMM_GEMV_FORWARD) && !defined(GEMM3M) && !defined(COMPLEX) && !defined(BFLOAT16)
|
||||
// Check if we can convert GEMM -> GEMV
|
||||
if (args.k != 0) {
|
||||
if (args.n == 1) {
|
||||
blasint inc_x = 1;
|
||||
blasint inc_y = 1;
|
||||
// These were passed in as blasint, but the struct translates them to blaslong
|
||||
blasint m = args.m;
|
||||
blasint n = args.k;
|
||||
blasint lda = args.lda;
|
||||
// Create new transpose parameters
|
||||
char NT = 'N';
|
||||
if (transa & 1) {
|
||||
NT = 'T';
|
||||
m = args.k;
|
||||
n = args.m;
|
||||
}
|
||||
if (transb & 1) {
|
||||
inc_x = args.ldb;
|
||||
}
|
||||
GEMV(&NT, &m, &n, args.alpha, args.a, &lda, args.b, &inc_x, args.beta, args.c, &inc_y);
|
||||
return;
|
||||
}
|
||||
if (args.m == 1) {
|
||||
blasint inc_x = args.lda;
|
||||
blasint inc_y = args.ldc;
|
||||
// These were passed in as blasint, but the struct translates them to blaslong
|
||||
blasint m = args.k;
|
||||
blasint n = args.n;
|
||||
blasint ldb = args.ldb;
|
||||
// Create new transpose parameters
|
||||
char NT = 'T';
|
||||
if (transa & 1) {
|
||||
inc_x = 1;
|
||||
}
|
||||
if (transb & 1) {
|
||||
NT = 'N';
|
||||
m = args.n;
|
||||
n = args.k;
|
||||
}
|
||||
GEMV(&NT, &m, &n, args.alpha, args.b, &ldb, args.a, &inc_x, args.beta, args.c, &inc_y);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
IDEBUG_START;
|
||||
|
||||
FUNCTION_PROFILE_START();
|
||||
|
@ -521,7 +572,13 @@ void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANS
|
|||
|
||||
buffer = (XFLOAT *)blas_memory_alloc(0);
|
||||
|
||||
//For target LOONGSON3R5, applying an offset to the buffer is essential
|
||||
//for minimizing cache conflicts and optimizing performance.
|
||||
#if defined(ARCH_LOONGARCH64) && !defined(NO_AFFINITY)
|
||||
sa = (XFLOAT *)((BLASLONG)buffer + (WhereAmI() & 0xf) * GEMM_OFFSET_A);
|
||||
#else
|
||||
sa = (XFLOAT *)((BLASLONG)buffer +GEMM_OFFSET_A);
|
||||
#endif
|
||||
sb = (XFLOAT *)(((BLASLONG)sa + ((GEMM_P * GEMM_Q * COMPSIZE * SIZE + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
|
||||
|
||||
#ifdef SMP
|
||||
|
|
|
@ -0,0 +1,372 @@
|
|||
/*****************************************************************************
|
||||
Copyright (c) 2020, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
**********************************************************************************/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "common.h"
|
||||
|
||||
void openblas_warning(int verbose, const char * msg);
|
||||
|
||||
#ifndef COMPLEX
|
||||
#ifdef XDOUBLE
|
||||
#define ERROR_NAME "QGEMM_BATCH "
|
||||
#elif defined(DOUBLE)
|
||||
#define ERROR_NAME "DGEMM_BATCH "
|
||||
#define GEMM_BATCH_THREAD dgemm_batch_thread
|
||||
#else
|
||||
#define ERROR_NAME "SGEMM_BATCH "
|
||||
#define GEMM_BATCH_THREAD sgemm_batch_thread
|
||||
#endif
|
||||
#else
|
||||
#ifdef XDOUBLE
|
||||
#define ERROR_NAME "XGEMM_BATCH "
|
||||
#elif defined(DOUBLE)
|
||||
#define ERROR_NAME "ZGEMM_BATCH "
|
||||
#define GEMM_BATCH_THREAD zgemm_batch_thread
|
||||
#else
|
||||
#define ERROR_NAME "CGEMM_BATCH "
|
||||
#define GEMM_BATCH_THREAD cgemm_batch_thread
|
||||
#endif
|
||||
#endif
|
||||
static int (*gemm[])(blas_arg_t *, BLASLONG *, BLASLONG *, IFLOAT *, IFLOAT *, BLASLONG) = {
|
||||
GEMM_NN, GEMM_TN, GEMM_RN, GEMM_CN,
|
||||
GEMM_NT, GEMM_TT, GEMM_RT, GEMM_CT,
|
||||
GEMM_NR, GEMM_TR, GEMM_RR, GEMM_CR,
|
||||
GEMM_NC, GEMM_TC, GEMM_RC, GEMM_CC,
|
||||
};
|
||||
|
||||
#if defined(SMALL_MATRIX_OPT) && !defined(GEMM3M) && !defined(XDOUBLE)
|
||||
#define USE_SMALL_MATRIX_OPT 1
|
||||
#else
|
||||
#define USE_SMALL_MATRIX_OPT 0
|
||||
#endif
|
||||
|
||||
#if USE_SMALL_MATRIX_OPT
|
||||
#ifndef DYNAMIC_ARCH
|
||||
#define SMALL_KERNEL_ADDR(table, idx) ((void *)(table[idx]))
|
||||
#else
|
||||
#define SMALL_KERNEL_ADDR(table, idx) ((void *)(*(uintptr_t *)((char *)gotoblas + (size_t)(table[idx]))))
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef COMPLEX
|
||||
static size_t gemm_small_kernel[] = {
|
||||
GEMM_SMALL_KERNEL_NN, GEMM_SMALL_KERNEL_TN, 0, 0,
|
||||
GEMM_SMALL_KERNEL_NT, GEMM_SMALL_KERNEL_TT, 0, 0,
|
||||
};
|
||||
|
||||
|
||||
static size_t gemm_small_kernel_b0[] = {
|
||||
GEMM_SMALL_KERNEL_B0_NN, GEMM_SMALL_KERNEL_B0_TN, 0, 0,
|
||||
GEMM_SMALL_KERNEL_B0_NT, GEMM_SMALL_KERNEL_B0_TT, 0, 0,
|
||||
};
|
||||
|
||||
#define GEMM_SMALL_KERNEL_B0(idx) (int (*)(BLASLONG, BLASLONG, BLASLONG, IFLOAT *, BLASLONG, FLOAT, IFLOAT *, BLASLONG, FLOAT *, BLASLONG)) SMALL_KERNEL_ADDR(gemm_small_kernel_b0, (idx))
|
||||
#define GEMM_SMALL_KERNEL(idx) (int (*)(BLASLONG, BLASLONG, BLASLONG, IFLOAT *, BLASLONG, FLOAT, IFLOAT *, BLASLONG, FLOAT, FLOAT *, BLASLONG)) SMALL_KERNEL_ADDR(gemm_small_kernel, (idx))
|
||||
#else
|
||||
|
||||
static size_t zgemm_small_kernel[] = {
|
||||
GEMM_SMALL_KERNEL_NN, GEMM_SMALL_KERNEL_TN, GEMM_SMALL_KERNEL_RN, GEMM_SMALL_KERNEL_CN,
|
||||
GEMM_SMALL_KERNEL_NT, GEMM_SMALL_KERNEL_TT, GEMM_SMALL_KERNEL_RT, GEMM_SMALL_KERNEL_CT,
|
||||
GEMM_SMALL_KERNEL_NR, GEMM_SMALL_KERNEL_TR, GEMM_SMALL_KERNEL_RR, GEMM_SMALL_KERNEL_CR,
|
||||
GEMM_SMALL_KERNEL_NC, GEMM_SMALL_KERNEL_TC, GEMM_SMALL_KERNEL_RC, GEMM_SMALL_KERNEL_CC,
|
||||
};
|
||||
|
||||
static size_t zgemm_small_kernel_b0[] = {
|
||||
GEMM_SMALL_KERNEL_B0_NN, GEMM_SMALL_KERNEL_B0_TN, GEMM_SMALL_KERNEL_B0_RN, GEMM_SMALL_KERNEL_B0_CN,
|
||||
GEMM_SMALL_KERNEL_B0_NT, GEMM_SMALL_KERNEL_B0_TT, GEMM_SMALL_KERNEL_B0_RT, GEMM_SMALL_KERNEL_B0_CT,
|
||||
GEMM_SMALL_KERNEL_B0_NR, GEMM_SMALL_KERNEL_B0_TR, GEMM_SMALL_KERNEL_B0_RR, GEMM_SMALL_KERNEL_B0_CR,
|
||||
GEMM_SMALL_KERNEL_B0_NC, GEMM_SMALL_KERNEL_B0_TC, GEMM_SMALL_KERNEL_B0_RC, GEMM_SMALL_KERNEL_B0_CC,
|
||||
};
|
||||
|
||||
#define ZGEMM_SMALL_KERNEL(idx) (int (*)(BLASLONG, BLASLONG, BLASLONG, FLOAT *, BLASLONG, FLOAT , FLOAT, FLOAT *, BLASLONG, FLOAT , FLOAT, FLOAT *, BLASLONG)) SMALL_KERNEL_ADDR(zgemm_small_kernel, (idx))
|
||||
#define ZGEMM_SMALL_KERNEL_B0(idx) (int (*)(BLASLONG, BLASLONG, BLASLONG, FLOAT *, BLASLONG, FLOAT , FLOAT, FLOAT *, BLASLONG, FLOAT *, BLASLONG)) SMALL_KERNEL_ADDR(zgemm_small_kernel_b0, (idx))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void CNAME(enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE * transa_array, enum CBLAS_TRANSPOSE * transb_array,
|
||||
blasint * m_array, blasint * n_array, blasint * k_array,
|
||||
#ifndef COMPLEX
|
||||
FLOAT * alpha_array,
|
||||
IFLOAT ** a_array, blasint * lda_array,
|
||||
IFLOAT ** b_array, blasint * ldb_array,
|
||||
FLOAT * beta_array,
|
||||
FLOAT ** c_array, blasint * ldc_array, blasint group_count, blasint * group_size) {
|
||||
#else
|
||||
void * valpha_array,
|
||||
void ** va_array, blasint * lda_array,
|
||||
void ** vb_array, blasint * ldb_array,
|
||||
void * vbeta_array,
|
||||
void ** vc_array, blasint * ldc_array, blasint group_count, blasint * group_size) {
|
||||
|
||||
FLOAT * alpha_array=(FLOAT *)valpha_array;
|
||||
FLOAT * beta_array=(FLOAT *)vbeta_array;
|
||||
FLOAT ** a_array=(FLOAT**)va_array;
|
||||
FLOAT ** b_array=(FLOAT**)vb_array;
|
||||
FLOAT ** c_array=(FLOAT**)vc_array;
|
||||
|
||||
#endif
|
||||
blas_arg_t * args_array=NULL;
|
||||
|
||||
int mode=0, group_mode=0;
|
||||
blasint total_num=0;
|
||||
|
||||
blasint i=0, j=0, matrix_idx=0, count=0;
|
||||
|
||||
int group_transa, group_transb;
|
||||
BLASLONG group_nrowa, group_nrowb;
|
||||
blasint info;
|
||||
|
||||
void * group_alpha, * group_beta;
|
||||
BLASLONG group_m, group_n, group_k;
|
||||
BLASLONG group_lda, group_ldb, group_ldc;
|
||||
void * group_routine=NULL;
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
void * group_small_matrix_opt_routine=NULL;
|
||||
#endif
|
||||
|
||||
#if defined (SMP) || defined(SMALL_MATRIX_OPT)
|
||||
double MNK;
|
||||
#endif
|
||||
|
||||
PRINT_DEBUG_CNAME;
|
||||
|
||||
for(i=0; i<group_count; i++){
|
||||
total_num+=group_size[i];
|
||||
}
|
||||
|
||||
args_array=(blas_arg_t *)malloc(total_num * sizeof(blas_arg_t));
|
||||
|
||||
if(args_array == NULL){
|
||||
openblas_warning(0, "memory alloc failed!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef SMP
|
||||
#ifndef COMPLEX
|
||||
#ifdef XDOUBLE
|
||||
mode = BLAS_XDOUBLE | BLAS_REAL;
|
||||
#elif defined(DOUBLE)
|
||||
mode = BLAS_DOUBLE | BLAS_REAL;
|
||||
#else
|
||||
mode = BLAS_SINGLE | BLAS_REAL;
|
||||
#endif
|
||||
#else
|
||||
#ifdef XDOUBLE
|
||||
mode = BLAS_XDOUBLE | BLAS_COMPLEX;
|
||||
#elif defined(DOUBLE)
|
||||
mode = BLAS_DOUBLE | BLAS_COMPLEX;
|
||||
#else
|
||||
mode = BLAS_SINGLE | BLAS_COMPLEX;
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
for(i=0; i<group_count; matrix_idx+=group_size[i], i++){
|
||||
group_alpha = (void *)&alpha_array[i * COMPSIZE];
|
||||
group_beta = (void *)&beta_array[i * COMPSIZE];
|
||||
|
||||
group_m = group_n = group_k = 0;
|
||||
group_lda = group_ldb = group_ldc = 0;
|
||||
group_transa = -1;
|
||||
group_transb = -1;
|
||||
info = 0;
|
||||
|
||||
if (order == CblasColMajor) {
|
||||
group_m = m_array[i];
|
||||
group_n = n_array[i];
|
||||
group_k = k_array[i];
|
||||
|
||||
group_lda = lda_array[i];
|
||||
group_ldb = ldb_array[i];
|
||||
group_ldc = ldc_array[i];
|
||||
|
||||
if (transa_array[i] == CblasNoTrans) group_transa = 0;
|
||||
if (transa_array[i] == CblasTrans) group_transa = 1;
|
||||
#ifndef COMPLEX
|
||||
if (transa_array[i] == CblasConjNoTrans) group_transa = 0;
|
||||
if (transa_array[i] == CblasConjTrans) group_transa = 1;
|
||||
#else
|
||||
if (transa_array[i] == CblasConjNoTrans) group_transa = 2;
|
||||
if (transa_array[i] == CblasConjTrans) group_transa = 3;
|
||||
#endif
|
||||
if (transb_array[i] == CblasNoTrans) group_transb = 0;
|
||||
if (transb_array[i] == CblasTrans) group_transb = 1;
|
||||
#ifndef COMPLEX
|
||||
if (transb_array[i] == CblasConjNoTrans) group_transb = 0;
|
||||
if (transb_array[i] == CblasConjTrans) group_transb = 1;
|
||||
#else
|
||||
if (transb_array[i] == CblasConjNoTrans) group_transb = 2;
|
||||
if (transb_array[i] == CblasConjTrans) group_transb = 3;
|
||||
#endif
|
||||
group_nrowa = group_m;
|
||||
if (group_transa & 1) group_nrowa = group_k;
|
||||
group_nrowb = group_k;
|
||||
if (group_transb & 1) group_nrowb = group_n;
|
||||
|
||||
info=-1;
|
||||
|
||||
if (group_ldc < group_m) info = 13;
|
||||
if (group_ldb < group_nrowb) info = 10;
|
||||
if (group_lda < group_nrowa) info = 8;
|
||||
if (group_k < 0) info = 5;
|
||||
if (group_n < 0) info = 4;
|
||||
if (group_m < 0) info = 3;
|
||||
if (group_transb < 0) info = 2;
|
||||
if (group_transa < 0) info = 1;
|
||||
|
||||
}else if (order == CblasRowMajor) {
|
||||
|
||||
group_m = n_array[i];
|
||||
group_n = m_array[i];
|
||||
group_k = k_array[i];
|
||||
|
||||
group_lda = ldb_array[i];
|
||||
group_ldb = lda_array[i];
|
||||
group_ldc = ldc_array[i];
|
||||
|
||||
if (transb_array[i] == CblasNoTrans) group_transa = 0;
|
||||
if (transb_array[i] == CblasTrans) group_transa = 1;
|
||||
#ifndef COMPLEX
|
||||
if (transb_array[i] == CblasConjNoTrans) group_transa = 0;
|
||||
if (transb_array[i] == CblasConjTrans) group_transa = 1;
|
||||
#else
|
||||
if (transb_array[i] == CblasConjNoTrans) group_transa = 2;
|
||||
if (transb_array[i] == CblasConjTrans) group_transa = 3;
|
||||
#endif
|
||||
if (transa_array[i] == CblasNoTrans) group_transb = 0;
|
||||
if (transa_array[i] == CblasTrans) group_transb = 1;
|
||||
#ifndef COMPLEX
|
||||
if (transa_array[i] == CblasConjNoTrans) group_transb = 0;
|
||||
if (transa_array[i] == CblasConjTrans) group_transb = 1;
|
||||
#else
|
||||
if (transa_array[i] == CblasConjNoTrans) group_transb = 2;
|
||||
if (transa_array[i] == CblasConjTrans) group_transb = 3;
|
||||
#endif
|
||||
group_nrowa = group_m;
|
||||
if (group_transa & 1) group_nrowa = group_k;
|
||||
group_nrowb = group_k;
|
||||
if (group_transb & 1) group_nrowb = group_n;
|
||||
|
||||
info=-1;
|
||||
|
||||
if (group_ldc < group_m) info = 13;
|
||||
if (group_ldb < group_nrowb) info = 10;
|
||||
if (group_lda < group_nrowa) info = 8;
|
||||
if (group_k < 0) info = 5;
|
||||
if (group_n < 0) info = 4;
|
||||
if (group_m < 0) info = 3;
|
||||
if (group_transb < 0) info = 2;
|
||||
if (group_transa < 0) info = 1;
|
||||
}
|
||||
|
||||
if (info >= 0) {
|
||||
BLASFUNC(xerbla)(ERROR_NAME, &info, sizeof(ERROR_NAME));
|
||||
free(args_array);
|
||||
return;
|
||||
}
|
||||
|
||||
if (group_m == 0 || group_n == 0) continue;
|
||||
|
||||
group_mode=mode;
|
||||
|
||||
#if defined(SMP) || defined(SMALL_MATRIX_OPT)
|
||||
MNK = (double) group_m * (double) group_n * (double) group_k;
|
||||
#endif
|
||||
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
if (MNK <= 100.0*100.0*100.0){
|
||||
group_routine=NULL;
|
||||
#if !defined(COMPLEX)
|
||||
if(*(FLOAT *)(group_beta) == 0.0){
|
||||
group_mode=mode | BLAS_SMALL_B0_OPT;
|
||||
group_small_matrix_opt_routine=(void *)(gemm_small_kernel_b0[(group_transb<<2)|group_transa]);
|
||||
}else{
|
||||
group_mode=mode | BLAS_SMALL_OPT;
|
||||
group_small_matrix_opt_routine=(void *)(gemm_small_kernel[(group_transb<<2)|group_transa]);
|
||||
}
|
||||
#else
|
||||
if(((FLOAT *)(group_beta))[0] == 0.0 && ((FLOAT *)(group_beta))[1] == 0.0){
|
||||
group_mode=mode | BLAS_SMALL_B0_OPT;
|
||||
group_small_matrix_opt_routine=(void *)(zgemm_small_kernel_b0[(group_transb<<2)|group_transa]);
|
||||
}else{
|
||||
group_mode=mode | BLAS_SMALL_OPT;
|
||||
group_small_matrix_opt_routine=(void *)(zgemm_small_kernel[(group_transb<<2)|group_transa]);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}else{
|
||||
#endif
|
||||
group_routine=(void*)(gemm[(group_transb<<2)|group_transa]);
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
for(j=0; j<group_size[i]; j++){
|
||||
args_array[count].m=group_m;
|
||||
args_array[count].n=group_n;
|
||||
args_array[count].k=group_k;
|
||||
args_array[count].lda=group_lda;
|
||||
args_array[count].ldb=group_ldb;
|
||||
args_array[count].ldc=group_ldc;
|
||||
args_array[count].alpha=group_alpha;
|
||||
args_array[count].beta=group_beta;
|
||||
|
||||
if (order == CblasColMajor) {
|
||||
args_array[count].a=(a_array[matrix_idx+j]);
|
||||
args_array[count].b=(b_array[matrix_idx+j]);
|
||||
}else if(order == CblasRowMajor){
|
||||
args_array[count].a=(b_array[matrix_idx+j]);
|
||||
args_array[count].b=(a_array[matrix_idx+j]);
|
||||
}
|
||||
|
||||
args_array[count].c=(c_array[matrix_idx+j]);
|
||||
|
||||
args_array[count].routine_mode=group_mode;
|
||||
args_array[count].routine=group_routine;
|
||||
#ifdef SMALL_MATRIX_OPT
|
||||
if (!group_routine)
|
||||
args_array[count].routine=group_small_matrix_opt_routine;
|
||||
#endif
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
if(count>0){
|
||||
GEMM_BATCH_THREAD(args_array,count);
|
||||
}
|
||||
|
||||
free(args_array);
|
||||
}
|
|
@ -85,7 +85,7 @@ void CNAME(blasint n, FLOAT alpha, FLOAT *x, blasint incx){
|
|||
if (nthreads == 1) {
|
||||
#endif
|
||||
|
||||
SCAL_K(n, 0, 0, alpha, x, incx, NULL, 0, NULL, 0);
|
||||
SCAL_K(n, 0, 0, alpha, x, incx, NULL, 0, NULL, 1);
|
||||
|
||||
#ifdef SMP
|
||||
} else {
|
||||
|
@ -102,7 +102,7 @@ void CNAME(blasint n, FLOAT alpha, FLOAT *x, blasint incx){
|
|||
#else
|
||||
&alpha,
|
||||
#endif
|
||||
x, incx, NULL, 0, NULL, 0, (int (*)(void))SCAL_K, nthreads);
|
||||
x, incx, NULL, 0, NULL, 1, (int (*)(void))SCAL_K, nthreads);
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -251,6 +251,9 @@ void CNAME(enum CBLAS_ORDER order,
|
|||
buffer = (FLOAT *)blas_memory_alloc(1);
|
||||
|
||||
#ifdef SMP
|
||||
if (m * n < 125000 || ku + kl < 15)
|
||||
nthreads = 1;
|
||||
else
|
||||
nthreads = num_cpu_avail(2);
|
||||
|
||||
if (nthreads == 1) {
|
||||
|
|
|
@ -17,6 +17,7 @@ ifeq ($(ARCH), ia64)
|
|||
USE_GEMM3M = 1
|
||||
endif
|
||||
|
||||
|
||||
ifeq ($(ARCH), arm)
|
||||
USE_TRMM = 1
|
||||
endif
|
||||
|
|
|
@ -43,7 +43,7 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS
|
|||
if ( (n <= 0) || (inc_x <= 0))
|
||||
return(0);
|
||||
|
||||
|
||||
if (dummy2 == 0) {
|
||||
while(j < n)
|
||||
{
|
||||
|
||||
|
@ -54,7 +54,25 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLAS
|
|||
|
||||
i += inc_x ;
|
||||
j++;
|
||||
}
|
||||
} else {
|
||||
|
||||
while(j < n)
|
||||
{
|
||||
|
||||
if ( da == 0.0 )
|
||||
if (!isnan(x[i]) && !isinf(x[i])) {
|
||||
x[i]=0.0;
|
||||
} else {
|
||||
x[i]=NAN;
|
||||
}
|
||||
else
|
||||
x[i] = da * x[i] ;
|
||||
|
||||
i += inc_x ;
|
||||
j++;
|
||||
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -61,7 +61,9 @@ int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r,FLOAT da_i, F
|
|||
{
|
||||
temp = - da_i * x[ip+1] ;
|
||||
if (isnan(x[ip]) || isinf(x[ip])) temp = NAN;
|
||||
if (!isinf(x[ip+1]))
|
||||
x[ip+1] = da_i * x[ip] ;
|
||||
else x[ip+1] = NAN;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
|
|
@ -1 +1,6 @@
|
|||
include $(KERNELDIR)/KERNEL.ARMV8SVE
|
||||
|
||||
SGEMVNKERNEL = gemv_n_sve.c
|
||||
DGEMVNKERNEL = gemv_n_sve.c
|
||||
SGEMVTKERNEL = gemv_t_sve.c
|
||||
DGEMVTKERNEL = gemv_t_sve.c
|
||||
|
|
|
@ -131,6 +131,16 @@ SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX)
|
|||
SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX)
|
||||
SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
|
||||
|
||||
SGEMM_SMALL_M_PERMIT = gemm_small_kernel_permit_sve.c
|
||||
SGEMM_SMALL_K_NT = sgemm_small_kernel_nt_sve.c
|
||||
SGEMM_SMALL_K_B0_NT = sgemm_small_kernel_nt_sve.c
|
||||
SGEMM_SMALL_K_NN = sgemm_small_kernel_nn_sve.c
|
||||
SGEMM_SMALL_K_B0_NN = sgemm_small_kernel_nn_sve.c
|
||||
SGEMM_SMALL_K_TT = sgemm_small_kernel_tt_sve.c
|
||||
SGEMM_SMALL_K_B0_TT = sgemm_small_kernel_tt_sve.c
|
||||
SGEMM_SMALL_K_TN = sgemm_small_kernel_tn_sve.c
|
||||
SGEMM_SMALL_K_B0_TN = sgemm_small_kernel_tn_sve.c
|
||||
|
||||
STRMMUNCOPY_M = trmm_uncopy_sve_v1.c
|
||||
STRMMLNCOPY_M = trmm_lncopy_sve_v1.c
|
||||
STRMMUTCOPY_M = trmm_utcopy_sve_v1.c
|
||||
|
@ -152,6 +162,16 @@ DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX)
|
|||
DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
|
||||
DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
|
||||
|
||||
DGEMM_SMALL_M_PERMIT = gemm_small_kernel_permit_sve.c
|
||||
DGEMM_SMALL_K_NT = dgemm_small_kernel_nt_sve.c
|
||||
DGEMM_SMALL_K_B0_NT = dgemm_small_kernel_nt_sve.c
|
||||
DGEMM_SMALL_K_NN = dgemm_small_kernel_nn_sve.c
|
||||
DGEMM_SMALL_K_B0_NN = dgemm_small_kernel_nn_sve.c
|
||||
DGEMM_SMALL_K_TT = dgemm_small_kernel_tt_sve.c
|
||||
DGEMM_SMALL_K_B0_TT = dgemm_small_kernel_tt_sve.c
|
||||
DGEMM_SMALL_K_TN = dgemm_small_kernel_tn_sve.c
|
||||
DGEMM_SMALL_K_B0_TN = dgemm_small_kernel_tn_sve.c
|
||||
|
||||
DTRMMUNCOPY_M = trmm_uncopy_sve_v1.c
|
||||
DTRMMLNCOPY_M = trmm_lncopy_sve_v1.c
|
||||
DTRMMUTCOPY_M = trmm_utcopy_sve_v1.c
|
||||
|
|
|
@ -93,8 +93,8 @@ IZAMAXKERNEL = izamax_thunderx2t99.c
|
|||
|
||||
SNRM2KERNEL = nrm2.S
|
||||
DNRM2KERNEL = nrm2.S
|
||||
CNRM2KERNEL = scnrm2_thunderx2t99.c
|
||||
ZNRM2KERNEL = dznrm2_thunderx2t99.c
|
||||
CNRM2KERNEL = znrm2.S
|
||||
ZNRM2KERNEL = znrm2.S
|
||||
|
||||
DDOTKERNEL = dot.c
|
||||
SDOTKERNEL = dot.c
|
||||
|
|
|
@ -1 +1,4 @@
|
|||
include $(KERNELDIR)/KERNEL.ARMV8SVE
|
||||
|
||||
SGEMVTKERNEL = gemv_t_sve.c
|
||||
DGEMVTKERNEL = gemv_t_sve.c
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
include $(KERNELDIR)/KERNEL.ARMV8SVE
|
|
@ -0,0 +1,742 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2024, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
||||
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include <arm_neon.h>
|
||||
#include <arm_sve.h>
|
||||
#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \
|
||||
__has_include(<arm_neon_sve_bridge.h>)
|
||||
#include <arm_neon_sve_bridge.h>
|
||||
#else
|
||||
#define svdup_neonq_f32(fixed_reg) \
|
||||
({ \
|
||||
svfloat32_t scalable_reg; \
|
||||
asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \
|
||||
scalable_reg; \
|
||||
})
|
||||
#define svdup_neonq_f64(fixed_reg) \
|
||||
({ \
|
||||
svfloat64_t scalable_reg; \
|
||||
asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \
|
||||
scalable_reg; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define RESET_A_POINTER() a_offset = A;
|
||||
|
||||
#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale;
|
||||
#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale;
|
||||
#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda)
|
||||
#define A_ELEMENT(m) A_ELEMENT_K(m, 0)
|
||||
|
||||
#define RESET_B_POINTER() b_offset = B;
|
||||
|
||||
#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb;
|
||||
#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb;
|
||||
#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k))
|
||||
#define B_ELEMENT(n) B_ELEMENT_K(n, 0)
|
||||
|
||||
#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc;
|
||||
#define INCR_C_POINTER(m, incr) // c_offset ## m += incr;
|
||||
#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc;
|
||||
#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i))
|
||||
|
||||
// #undef C_ELEMENT
|
||||
// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc]
|
||||
|
||||
#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n]
|
||||
#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0)
|
||||
|
||||
// ASIMD
|
||||
#define DECLARE_RESULT_VECTOR2(m, n) \
|
||||
float64x2_t result##m##n = vdupq_n_f64(0.0);
|
||||
#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0;
|
||||
#define BROADCAST_LOAD_A2(m, offset_k) \
|
||||
float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k));
|
||||
#define LOAD_A1(m, offset_k) \
|
||||
float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k);
|
||||
#define VECTOR_LOAD_B_K2(n, offset_k) \
|
||||
float64x2_t b##k##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k));
|
||||
#define TRANSPOSE_B2_K2(n0, n1, offset_k0, offset_k1) \
|
||||
float64x2_t b##n0##_k##offset_k0 = \
|
||||
vzip1q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0); \
|
||||
float64x2_t b##n0##_k##offset_k1 = \
|
||||
vzip2q_f64(b##k##n0##_k##offset_k0, b##k##n1##_k##offset_k0);
|
||||
|
||||
#define SCALE_B2_K2(n0, offset_k0, offset_k1) \
|
||||
svfloat64_t b##s##n0##_k##offset_k0 = svdup_neonq_f64(b##n0##_k##offset_k0); \
|
||||
svfloat64_t b##s##n0##_k##offset_k1 = svdup_neonq_f64(b##n0##_k##offset_k1);
|
||||
#define GATHER_LOAD_B2(n, offset_k) \
|
||||
float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \
|
||||
b##n##_k##offset_k = \
|
||||
vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1);
|
||||
#define VECTOR_UNPACK_B2(n, offset_k) \
|
||||
float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k));
|
||||
#define VECTOR_PACK_B2(n, offset_k) \
|
||||
vst1q_f64(&PACK_ELEMENT_K(n, offset_k), b##n##_k##offset_k);
|
||||
#define PACK_B0(n, offset_k) \
|
||||
PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0);
|
||||
#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \
|
||||
result##m##n = \
|
||||
vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k);
|
||||
#define UPDATE_RESULT(m, n, offset_k) \
|
||||
result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k;
|
||||
#ifdef B0
|
||||
#define SCATTER_STORE2(m, n) \
|
||||
result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \
|
||||
C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \
|
||||
C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1);
|
||||
#else
|
||||
#define SCATTER_STORE2(m, n) \
|
||||
result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \
|
||||
C_ELEMENT(m, n + 0) = \
|
||||
C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \
|
||||
C_ELEMENT(m, n + 1) = \
|
||||
C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1);
|
||||
#endif
|
||||
|
||||
// SVE
|
||||
#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0);
|
||||
#define BROADCAST_LOAD_A(m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k));
|
||||
#define BROADCAST_LOAD_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k));
|
||||
#define VECTOR_LOAD_A(pg, m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k));
|
||||
#define QUADWORD_LOAD_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = \
|
||||
svld1rq(pg_true, &B_ELEMENT_K(n, offset_k));
|
||||
#define PACK_B(n, offset_k) \
|
||||
svst1(pg_first, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k);
|
||||
#define VECTOR_PACK_B(n, offset_k) \
|
||||
svst1(pg_true, &PACK_ELEMENT_K(n* v_size, offset_k), b##s##n##_k##offset_k);
|
||||
#define QUADWORD_PACK_B(n, offset_k) \
|
||||
svst1(pg_quad, &PACK_ELEMENT_K(n, offset_k), b##s##n##_k##offset_k);
|
||||
#define UNPACK_VECTOR_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = \
|
||||
svld1(pg_true, &PACK_ELEMENT_K(n * v_size, offset_k));
|
||||
#define UNPACK_BROADCAST_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = svdup_f64(PACK_ELEMENT_K(n, offset_k));
|
||||
#define UNPACK_QUADWORD_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = \
|
||||
svld1rq(pg_true, &PACK_ELEMENT_K(n, offset_k));
|
||||
#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \
|
||||
result##m##n = \
|
||||
svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k);
|
||||
#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \
|
||||
result##m##n = svmla_lane( \
|
||||
result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane);
|
||||
#ifdef B0
|
||||
#define VECTOR_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
svst1(pg, &C_ELEMENT(m, n), result##m##n);
|
||||
#define SCATTER_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n);
|
||||
#else
|
||||
#define VECTOR_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
result##m##n = \
|
||||
svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \
|
||||
svst1(pg, &C_ELEMENT(m, n), result##m##n);
|
||||
#define SCATTER_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
result##m##n = svmla_m(pg, \
|
||||
result##m##n, \
|
||||
svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \
|
||||
beta_vec); \
|
||||
svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n);
|
||||
#endif
|
||||
|
||||
#ifndef LIKELY
|
||||
#ifdef __GNUC__
|
||||
#define LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
#else
|
||||
#define LIKELY(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef B0
|
||||
int
|
||||
CNAME(BLASLONG M,
|
||||
BLASLONG N,
|
||||
BLASLONG K,
|
||||
IFLOAT* A,
|
||||
BLASLONG lda,
|
||||
FLOAT alpha,
|
||||
IFLOAT* B,
|
||||
BLASLONG ldb,
|
||||
FLOAT* C,
|
||||
BLASLONG ldc)
|
||||
#else
|
||||
int
|
||||
CNAME(BLASLONG M,
|
||||
BLASLONG N,
|
||||
BLASLONG K,
|
||||
IFLOAT* A,
|
||||
BLASLONG lda,
|
||||
FLOAT alpha,
|
||||
IFLOAT* B,
|
||||
BLASLONG ldb,
|
||||
FLOAT beta,
|
||||
FLOAT* C,
|
||||
BLASLONG ldc)
|
||||
#endif
|
||||
{
|
||||
const uint64_t v_size = svcntd();
|
||||
const uint64_t v_size2 = v_size * 2;
|
||||
const svbool_t pg_true = svptrue_b64();
|
||||
const svbool_t pg_quad = svwhilelt_b64(0, 2);
|
||||
const svbool_t pg_first = svwhilelt_b64(0, 1);
|
||||
const svfloat64_t alpha_vec = svdup_f64(alpha);
|
||||
#ifndef B0
|
||||
const svfloat64_t beta_vec = svdup_f64(beta);
|
||||
#endif
|
||||
const BLASLONG n4 = N & -4;
|
||||
const BLASLONG n2 = N & -2;
|
||||
const BLASLONG v_m2 = M & -v_size2;
|
||||
const BLASLONG v_m1 = M & -v_size;
|
||||
const BLASLONG k2 = K & -2;
|
||||
|
||||
const int pack_b = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0;
|
||||
FLOAT* packed_b =
|
||||
(pack_b) ? packed_b = (FLOAT*)malloc(K * 4 * sizeof(FLOAT)) : NULL;
|
||||
|
||||
FLOAT* b_offset = B;
|
||||
FLOAT* a_offset = A;
|
||||
FLOAT* c_offset = C;
|
||||
|
||||
BLASLONG j = 0;
|
||||
for (; j < n4; j += 4) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_C_POINTER(1, 1);
|
||||
CREATE_C_POINTER(2, 2);
|
||||
CREATE_C_POINTER(3, 3);
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
CREATE_B_POINTER(2, 2);
|
||||
CREATE_B_POINTER(3, 3);
|
||||
|
||||
BLASLONG i = 0;
|
||||
for (; i < v_m2; i += v_size2) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
CREATE_A_POINTER(1, v_size);
|
||||
UPDATE_A_POINTER(v_size2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 1);
|
||||
DECLARE_RESULT_VECTOR(1, 2);
|
||||
DECLARE_RESULT_VECTOR(1, 3);
|
||||
|
||||
if (LIKELY(packed_b != NULL)) {
|
||||
if (i == 0) {
|
||||
for (; k < k2; k += 2) {
|
||||
|
||||
VECTOR_LOAD_B_K2(0, 0);
|
||||
VECTOR_LOAD_B_K2(1, 0);
|
||||
TRANSPOSE_B2_K2(0, 1, 0, 1);
|
||||
SCALE_B2_K2(0, 0, 1);
|
||||
VECTOR_PACK_B2(0, 0);
|
||||
VECTOR_PACK_B2(0, 1);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1);
|
||||
VECTOR_LOAD_B_K2(2, 0);
|
||||
VECTOR_LOAD_B_K2(3, 0);
|
||||
TRANSPOSE_B2_K2(2, 3, 0, 1);
|
||||
SCALE_B2_K2(2, 0, 1);
|
||||
VECTOR_PACK_B2(2, 0);
|
||||
VECTOR_PACK_B2(2, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1);
|
||||
}
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
PACK_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
PACK_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
PACK_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
PACK_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0);
|
||||
}
|
||||
} else {
|
||||
for (; k < K; k++) {
|
||||
|
||||
UNPACK_QUADWORD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
UNPACK_QUADWORD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (; k < k2; k += 2) {
|
||||
|
||||
VECTOR_LOAD_B_K2(0, 0);
|
||||
VECTOR_LOAD_B_K2(1, 0);
|
||||
TRANSPOSE_B2_K2(0, 1, 0, 1);
|
||||
SCALE_B2_K2(0, 0, 1);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1);
|
||||
VECTOR_LOAD_B_K2(2, 0);
|
||||
VECTOR_LOAD_B_K2(3, 0);
|
||||
TRANSPOSE_B2_K2(2, 3, 0, 1);
|
||||
SCALE_B2_K2(2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 1);
|
||||
}
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0);
|
||||
}
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 0, 2);
|
||||
VECTOR_STORE(pg_true, 0, 3);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
VECTOR_STORE(pg_true, 1, 1);
|
||||
VECTOR_STORE(pg_true, 1, 2);
|
||||
VECTOR_STORE(pg_true, 1, 3);
|
||||
INCR_C_POINTER(0, v_size2);
|
||||
INCR_C_POINTER(1, v_size2);
|
||||
INCR_C_POINTER(2, v_size2);
|
||||
INCR_C_POINTER(3, v_size2);
|
||||
}
|
||||
for (; i < v_m1; i += v_size) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(v_size);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
|
||||
if (LIKELY(packed_b != NULL)) {
|
||||
for (; k < K; k++) {
|
||||
|
||||
UNPACK_QUADWORD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
UNPACK_QUADWORD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
}
|
||||
} else {
|
||||
for (; k < k2; k += 2) {
|
||||
|
||||
VECTOR_LOAD_B_K2(0, 0);
|
||||
VECTOR_LOAD_B_K2(1, 0);
|
||||
TRANSPOSE_B2_K2(0, 1, 0, 1);
|
||||
SCALE_B2_K2(0, 0, 1);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1);
|
||||
VECTOR_LOAD_B_K2(2, 0);
|
||||
VECTOR_LOAD_B_K2(3, 0);
|
||||
TRANSPOSE_B2_K2(2, 3, 0, 1);
|
||||
SCALE_B2_K2(2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1);
|
||||
}
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0);
|
||||
}
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 0, 2);
|
||||
VECTOR_STORE(pg_true, 0, 3);
|
||||
INCR_C_POINTER(0, v_size);
|
||||
INCR_C_POINTER(1, v_size);
|
||||
INCR_C_POINTER(2, v_size);
|
||||
INCR_C_POINTER(3, v_size);
|
||||
}
|
||||
for (; i < M; i += v_size) {
|
||||
const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M));
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(0);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
|
||||
if (LIKELY(packed_b != NULL)) {
|
||||
for (; k < K; k++) {
|
||||
|
||||
UNPACK_QUADWORD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
UNPACK_QUADWORD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
}
|
||||
} else {
|
||||
for (; k < k2; k += 2) {
|
||||
|
||||
VECTOR_LOAD_B_K2(0, 0);
|
||||
VECTOR_LOAD_B_K2(1, 0);
|
||||
TRANSPOSE_B2_K2(0, 1, 0, 1);
|
||||
SCALE_B2_K2(0, 0, 1);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1);
|
||||
VECTOR_LOAD_B_K2(2, 0);
|
||||
VECTOR_LOAD_B_K2(3, 0);
|
||||
TRANSPOSE_B2_K2(2, 3, 0, 1);
|
||||
SCALE_B2_K2(2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 1);
|
||||
}
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0);
|
||||
}
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
VECTOR_STORE(pg_tail, 0, 1);
|
||||
VECTOR_STORE(pg_tail, 0, 2);
|
||||
VECTOR_STORE(pg_tail, 0, 3);
|
||||
INCR_C_POINTER(0, 0);
|
||||
INCR_C_POINTER(1, 0);
|
||||
INCR_C_POINTER(2, 0);
|
||||
INCR_C_POINTER(3, 0);
|
||||
}
|
||||
|
||||
UPDATE_B_POINTER(4);
|
||||
RESET_A_POINTER();
|
||||
UPDATE_C_POINTER(4);
|
||||
}
|
||||
for (; j < n2; j += 2) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_C_POINTER(1, 1);
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
|
||||
BLASLONG i = 0;
|
||||
for (; i < v_m2; i += v_size2) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
CREATE_A_POINTER(1, v_size);
|
||||
UPDATE_A_POINTER(v_size2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 1);
|
||||
|
||||
for (; k < k2; k += 2) {
|
||||
|
||||
VECTOR_LOAD_B_K2(0, 0);
|
||||
VECTOR_LOAD_B_K2(1, 0);
|
||||
TRANSPOSE_B2_K2(0, 1, 0, 1);
|
||||
SCALE_B2_K2(0, 0, 1);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 1);
|
||||
}
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
VECTOR_STORE(pg_true, 1, 1);
|
||||
INCR_C_POINTER(0, v_size2);
|
||||
INCR_C_POINTER(1, v_size2);
|
||||
}
|
||||
for (; i < v_m1; i += v_size) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(v_size);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
|
||||
for (; k < k2; k += 2) {
|
||||
|
||||
VECTOR_LOAD_B_K2(0, 0);
|
||||
VECTOR_LOAD_B_K2(1, 0);
|
||||
TRANSPOSE_B2_K2(0, 1, 0, 1);
|
||||
SCALE_B2_K2(0, 0, 1);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1);
|
||||
}
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
INCR_C_POINTER(0, v_size);
|
||||
INCR_C_POINTER(1, v_size);
|
||||
}
|
||||
for (; i < M; i += v_size) {
|
||||
const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M));
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(0);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
|
||||
for (; k < k2; k += 2) {
|
||||
|
||||
VECTOR_LOAD_B_K2(0, 0);
|
||||
VECTOR_LOAD_B_K2(1, 0);
|
||||
TRANSPOSE_B2_K2(0, 1, 0, 1);
|
||||
SCALE_B2_K2(0, 0, 1);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 1);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 1);
|
||||
}
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
VECTOR_STORE(pg_tail, 0, 1);
|
||||
INCR_C_POINTER(0, 0);
|
||||
INCR_C_POINTER(1, 0);
|
||||
}
|
||||
|
||||
UPDATE_B_POINTER(2);
|
||||
RESET_A_POINTER();
|
||||
UPDATE_C_POINTER(2);
|
||||
}
|
||||
for (; j < N; j++) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_B_POINTER(0, 0);
|
||||
|
||||
BLASLONG i = 0;
|
||||
for (; i < v_m2; i += v_size2) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
CREATE_A_POINTER(1, v_size);
|
||||
UPDATE_A_POINTER(v_size2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
INCR_C_POINTER(0, v_size2);
|
||||
}
|
||||
for (; i < v_m1; i += v_size) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(v_size);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
INCR_C_POINTER(0, v_size);
|
||||
}
|
||||
for (; i < M; i += v_size) {
|
||||
const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M));
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(0);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
INCR_C_POINTER(0, 0);
|
||||
}
|
||||
|
||||
UPDATE_B_POINTER(1);
|
||||
RESET_A_POINTER();
|
||||
UPDATE_C_POINTER(1);
|
||||
}
|
||||
|
||||
if (pack_b)
|
||||
free(packed_b);
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,474 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2024, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
||||
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include <arm_neon.h>
|
||||
#include <arm_sve.h>
|
||||
#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \
|
||||
__has_include(<arm_neon_sve_bridge.h>)
|
||||
#include <arm_neon_sve_bridge.h>
|
||||
#else
|
||||
#define svdup_neonq_f32(fixed_reg) \
|
||||
({ \
|
||||
svfloat32_t scalable_reg; \
|
||||
asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \
|
||||
scalable_reg; \
|
||||
})
|
||||
#define svdup_neonq_f64(fixed_reg) \
|
||||
({ \
|
||||
svfloat64_t scalable_reg; \
|
||||
asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \
|
||||
scalable_reg; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define RESET_A_POINTER() a_offset = A;
|
||||
|
||||
#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale;
|
||||
#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale;
|
||||
#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k) * lda)
|
||||
#define A_ELEMENT(m) A_ELEMENT_K(m, 0)
|
||||
|
||||
#define RESET_B_POINTER() b_offset = B;
|
||||
|
||||
#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale;
|
||||
#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale;
|
||||
#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k) * ldb)
|
||||
#define B_ELEMENT(n) B_ELEMENT_K(n, 0)
|
||||
|
||||
#define CREATE_C_POINTER(n, scale) FLOAT* c_offset##n = c_offset + scale * ldc;
|
||||
#define INCR_C_POINTER(m, incr) // c_offset ## m += incr;
|
||||
#define UPDATE_C_POINTER(scale) c_offset = c_offset + scale * ldc;
|
||||
#define C_ELEMENT(m, n) *(c_offset##n + ((m * v_size) + i))
|
||||
|
||||
// #undef C_ELEMENT
|
||||
// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc]
|
||||
|
||||
#define PACK_ELEMENT_K(n, offset_k) packed_b[(k + offset_k) * 4 + n]
|
||||
#define PACK_ELEMENT(n) PACK_ELEMENT_K(n, 0)
|
||||
|
||||
// ASIMD
|
||||
#define DECLARE_RESULT_VECTOR2(m, n) \
|
||||
float64x2_t result##m##n = vdupq_n_f64(0.0);
|
||||
#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0;
|
||||
#define BROADCAST_LOAD_A2(m, offset_k) \
|
||||
float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k));
|
||||
#define LOAD_A1(m, offset_k) \
|
||||
float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k);
|
||||
#define VECTOR_LOAD_B2(n, offset_k) \
|
||||
float64x2_t b##n##_k##offset_k = vld1q_f64(&B_ELEMENT_K(n, offset_k));
|
||||
#define GATHER_LOAD_B2(n, offset_k) \
|
||||
float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \
|
||||
b##n##_k##offset_k = \
|
||||
vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1);
|
||||
#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \
|
||||
result##m##n = \
|
||||
vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k);
|
||||
#define UPDATE_RESULT(m, n, offset_k) \
|
||||
result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k;
|
||||
#ifdef B0
|
||||
#define SCATTER_STORE2(m, n) \
|
||||
result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \
|
||||
C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \
|
||||
C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1);
|
||||
#else
|
||||
#define SCATTER_STORE2(m, n) \
|
||||
result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \
|
||||
C_ELEMENT(m, n + 0) = \
|
||||
C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \
|
||||
C_ELEMENT(m, n + 1) = \
|
||||
C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1);
|
||||
#endif
|
||||
|
||||
// SVE
|
||||
#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0);
|
||||
#define BROADCAST_LOAD_A(m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k));
|
||||
#define BROADCAST_LOAD_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k));
|
||||
#define VECTOR_LOAD_A(pg, m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k));
|
||||
#define QUADWORD_LOAD_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = \
|
||||
svld1rq(pg_true, &B_ELEMENT_K(n, offset_k));
|
||||
#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \
|
||||
result##m##n = \
|
||||
svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k);
|
||||
#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \
|
||||
result##m##n = svmla_lane( \
|
||||
result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane);
|
||||
#ifdef B0
|
||||
#define VECTOR_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
svst1(pg, &C_ELEMENT(m, n), result##m##n);
|
||||
#define SCATTER_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n);
|
||||
#else
|
||||
#define VECTOR_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
result##m##n = \
|
||||
svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \
|
||||
svst1(pg, &C_ELEMENT(m, n), result##m##n);
|
||||
#define SCATTER_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
result##m##n = svmla_m(pg, \
|
||||
result##m##n, \
|
||||
svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \
|
||||
beta_vec); \
|
||||
svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n);
|
||||
#endif
|
||||
|
||||
#ifndef LIKELY
|
||||
#ifdef __GNUC__
|
||||
#define LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
#else
|
||||
#define LIKELY(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef B0
|
||||
int
|
||||
CNAME(BLASLONG M,
|
||||
BLASLONG N,
|
||||
BLASLONG K,
|
||||
IFLOAT* A,
|
||||
BLASLONG lda,
|
||||
FLOAT alpha,
|
||||
IFLOAT* B,
|
||||
BLASLONG ldb,
|
||||
FLOAT* C,
|
||||
BLASLONG ldc)
|
||||
#else
|
||||
int
|
||||
CNAME(BLASLONG M,
|
||||
BLASLONG N,
|
||||
BLASLONG K,
|
||||
IFLOAT* A,
|
||||
BLASLONG lda,
|
||||
FLOAT alpha,
|
||||
IFLOAT* B,
|
||||
BLASLONG ldb,
|
||||
FLOAT beta,
|
||||
FLOAT* C,
|
||||
BLASLONG ldc)
|
||||
#endif
|
||||
{
|
||||
const uint64_t v_size = svcntd();
|
||||
const uint64_t v_size2 = v_size * 2;
|
||||
const svbool_t pg_true = svptrue_b64();
|
||||
const svbool_t pg_quad = svwhilelt_b64(0, 2);
|
||||
const svfloat64_t alpha_vec = svdup_f64(alpha);
|
||||
#ifndef B0
|
||||
const svfloat64_t beta_vec = svdup_f64(beta);
|
||||
#endif
|
||||
const BLASLONG n4 = N & -4;
|
||||
const BLASLONG n2 = N & -2;
|
||||
const BLASLONG v_m2 = M & -v_size2;
|
||||
const BLASLONG v_m1 = M & -v_size;
|
||||
|
||||
FLOAT* b_offset = B;
|
||||
FLOAT* a_offset = A;
|
||||
FLOAT* c_offset = C;
|
||||
|
||||
BLASLONG j = 0;
|
||||
for (; j < n4; j += 4) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_C_POINTER(1, 1);
|
||||
CREATE_C_POINTER(2, 2);
|
||||
CREATE_C_POINTER(3, 3);
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
CREATE_B_POINTER(2, 2);
|
||||
CREATE_B_POINTER(3, 3);
|
||||
|
||||
BLASLONG i = 0;
|
||||
for (; i < v_m2; i += v_size2) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
CREATE_A_POINTER(1, v_size);
|
||||
UPDATE_A_POINTER(v_size2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 1);
|
||||
DECLARE_RESULT_VECTOR(1, 2);
|
||||
DECLARE_RESULT_VECTOR(1, 3);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
QUADWORD_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
QUADWORD_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 3, 2, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 0, 2);
|
||||
VECTOR_STORE(pg_true, 0, 3);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
VECTOR_STORE(pg_true, 1, 1);
|
||||
VECTOR_STORE(pg_true, 1, 2);
|
||||
VECTOR_STORE(pg_true, 1, 3);
|
||||
INCR_C_POINTER(0, v_size2);
|
||||
INCR_C_POINTER(1, v_size2);
|
||||
INCR_C_POINTER(2, v_size2);
|
||||
INCR_C_POINTER(3, v_size2);
|
||||
}
|
||||
for (; i < v_m1; i += v_size) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(v_size);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
QUADWORD_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
QUADWORD_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 0, 2);
|
||||
VECTOR_STORE(pg_true, 0, 3);
|
||||
INCR_C_POINTER(0, v_size);
|
||||
INCR_C_POINTER(1, v_size);
|
||||
INCR_C_POINTER(2, v_size);
|
||||
INCR_C_POINTER(3, v_size);
|
||||
}
|
||||
for (; i < M; i += v_size) {
|
||||
const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M));
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(0);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
QUADWORD_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
QUADWORD_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 2, 2, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 3, 2, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
VECTOR_STORE(pg_tail, 0, 1);
|
||||
VECTOR_STORE(pg_tail, 0, 2);
|
||||
VECTOR_STORE(pg_tail, 0, 3);
|
||||
INCR_C_POINTER(0, 0);
|
||||
INCR_C_POINTER(1, 0);
|
||||
INCR_C_POINTER(2, 0);
|
||||
INCR_C_POINTER(3, 0);
|
||||
}
|
||||
|
||||
UPDATE_B_POINTER(4);
|
||||
RESET_A_POINTER();
|
||||
UPDATE_C_POINTER(4);
|
||||
}
|
||||
for (; j < n2; j += 2) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_C_POINTER(1, 1);
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
|
||||
BLASLONG i = 0;
|
||||
for (; i < v_m2; i += v_size2) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
CREATE_A_POINTER(1, v_size);
|
||||
UPDATE_A_POINTER(v_size2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 1);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
QUADWORD_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(1, 1, 0, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
VECTOR_STORE(pg_true, 1, 1);
|
||||
INCR_C_POINTER(0, v_size2);
|
||||
INCR_C_POINTER(1, v_size2);
|
||||
}
|
||||
for (; i < v_m1; i += v_size) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(v_size);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
QUADWORD_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
INCR_C_POINTER(0, v_size);
|
||||
INCR_C_POINTER(1, v_size);
|
||||
}
|
||||
for (; i < M; i += v_size) {
|
||||
const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M));
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(0);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
QUADWORD_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 0, 0, 0, 0);
|
||||
UPDATE_RESULT_VECTOR_QUADWORD(0, 1, 0, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
VECTOR_STORE(pg_tail, 0, 1);
|
||||
INCR_C_POINTER(0, 0);
|
||||
INCR_C_POINTER(1, 0);
|
||||
}
|
||||
|
||||
UPDATE_B_POINTER(2);
|
||||
RESET_A_POINTER();
|
||||
UPDATE_C_POINTER(2);
|
||||
}
|
||||
for (; j < N; j++) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_B_POINTER(0, 0);
|
||||
|
||||
BLASLONG i = 0;
|
||||
for (; i < v_m2; i += v_size2) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
CREATE_A_POINTER(1, v_size);
|
||||
UPDATE_A_POINTER(v_size2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
INCR_C_POINTER(0, v_size2);
|
||||
}
|
||||
for (; i < v_m1; i += v_size) {
|
||||
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(v_size);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
INCR_C_POINTER(0, v_size);
|
||||
}
|
||||
for (; i < M; i += v_size) {
|
||||
const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M));
|
||||
CREATE_A_POINTER(0, 0);
|
||||
UPDATE_A_POINTER(0);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
VECTOR_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
INCR_C_POINTER(0, 0);
|
||||
}
|
||||
|
||||
UPDATE_B_POINTER(1);
|
||||
RESET_A_POINTER();
|
||||
UPDATE_C_POINTER(1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,571 @@
|
|||
/***************************************************************************
|
||||
Copyright (c) 2024, The OpenBLAS Project
|
||||
All rights reserved.
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
3. Neither the name of the OpenBLAS project nor the names of
|
||||
its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
||||
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*****************************************************************************/
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include <arm_neon.h>
|
||||
#include <arm_sve.h>
|
||||
#if defined(__ARM_NEON_SVE_BRIDGE) && defined(__has_include) && \
|
||||
__has_include(<arm_neon_sve_bridge.h>)
|
||||
#include <arm_neon_sve_bridge.h>
|
||||
#else
|
||||
#define svdup_neonq_f32(fixed_reg) \
|
||||
({ \
|
||||
svfloat32_t scalable_reg; \
|
||||
asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \
|
||||
scalable_reg; \
|
||||
})
|
||||
#define svdup_neonq_f64(fixed_reg) \
|
||||
({ \
|
||||
svfloat64_t scalable_reg; \
|
||||
asm("mov %0.q, %q1" : "=w"(scalable_reg) : "w"(fixed_reg) :); \
|
||||
scalable_reg; \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define RESET_A_POINTER() a_offset = A;
|
||||
|
||||
#define CREATE_A_POINTER(m, scale) FLOAT* a_offset##m = a_offset + scale * lda;
|
||||
#define UPDATE_A_POINTER(scale) a_offset = a_offset + scale * lda;
|
||||
#define A_ELEMENT_K(m, offset_k) *(a_offset##m + (k + offset_k))
|
||||
#define A_ELEMENT(m) A_ELEMENT_K(m, 0)
|
||||
|
||||
#define RESET_B_POINTER() b_offset = B;
|
||||
|
||||
#define CREATE_B_POINTER(n, scale) FLOAT* b_offset##n = b_offset + scale * ldb;
|
||||
#define UPDATE_B_POINTER(scale) b_offset = b_offset + scale * ldb;
|
||||
#define B_ELEMENT_K(n, offset_k) *(b_offset##n + (k + offset_k))
|
||||
#define B_ELEMENT(n) B_ELEMENT_K(n, 0)
|
||||
|
||||
#define CREATE_C_POINTER(m, scale) FLOAT* c_offset##m = c_offset + scale;
|
||||
#define INCR_C_POINTER(m, incr) // c_offset ## m += incr * ldc;
|
||||
#define UPDATE_C_POINTER(scale) c_offset += scale;
|
||||
#define C_ELEMENT(m, n) \
|
||||
*(c_offset##m + ((j + n) * ldc)) // C[(i+(m))+(j+(n))*ldc]
|
||||
|
||||
// #undef C_ELEMENT
|
||||
// #define C_ELEMENT(m, n) C[(i+(m))+(j+(n))*ldc]
|
||||
|
||||
#define PACK_ELEMENT_K(m, offset_k) packed_a[(k + offset_k) * v_size2 + m]
|
||||
#define PACK_ELEMENT(m) PACK_ELEMENT_K(m, 0)
|
||||
|
||||
// ASIMD
|
||||
#define DECLARE_RESULT_VECTOR2(m, n) \
|
||||
float64x2_t result##m##n = vdupq_n_f64(0.0);
|
||||
#define DECLARE_RESULT(m, n) float64_t result##m##n = 0.0;
|
||||
#define BROADCAST_LOAD_A2(m, offset_k) \
|
||||
float64x2_t a##m##_k##offset_k = vld1q_dup_f64(&A_ELEMENT_K(m, offset_k));
|
||||
#define LOAD_A1(m, offset_k) \
|
||||
float64_t a##m##_k##offset_k = A_ELEMENT_K(m, offset_k);
|
||||
#define GATHER_LOAD_B2(n, offset_k) \
|
||||
float64x2_t b##n##_k##offset_k = vdupq_n_f64(B_ELEMENT_K(n, offset_k)); \
|
||||
b##n##_k##offset_k = \
|
||||
vsetq_lane_f64(B_ELEMENT_K(n + 1, offset_k), b##n##_k##offset_k, 1);
|
||||
#define VECTOR_UNPACK_B2(n, offset_k) \
|
||||
float64x2_t b##n##_k##offset_k = vld1q_f64(&PACK_ELEMENT_K(n, offset_k));
|
||||
#define PACK_B0(n, offset_k) \
|
||||
PACK_ELEMENT_K(n, offset_k) = vget_lane_f64(b##n##_k##offset_k, 0);
|
||||
#define UPDATE_RESULT_VECTOR2(m, n, offset_k) \
|
||||
result##m##n = \
|
||||
vfmaq_f64(result##m##n, a##m##_k##offset_k, b##n##_k##offset_k);
|
||||
#define UPDATE_RESULT(m, n, offset_k) \
|
||||
result##m##n = result##m##n + a##m##_k##offset_k * b##n##_k##offset_k;
|
||||
#ifdef B0
|
||||
#define SCATTER_STORE2(m, n) \
|
||||
result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \
|
||||
C_ELEMENT(m, n + 0) = vgetq_lane_f64(result##m##n, 0); \
|
||||
C_ELEMENT(m, n + 1) = vgetq_lane_f64(result##m##n, 1);
|
||||
#else
|
||||
#define SCATTER_STORE2(m, n) \
|
||||
result##m##n = vmulq_f64(result##m##n, vdupq_n_f64(alpha)); \
|
||||
C_ELEMENT(m, n + 0) = \
|
||||
C_ELEMENT(m, n + 0) * beta + vgetq_lane_f64(result##m##n, 0); \
|
||||
C_ELEMENT(m, n + 1) = \
|
||||
C_ELEMENT(m, n + 1) * beta + vgetq_lane_f64(result##m##n, 1);
|
||||
#endif
|
||||
|
||||
// SVE
|
||||
#define DECLARE_RESULT_VECTOR(m, n) svfloat64_t result##m##n = svdup_f64(0.0);
|
||||
#define BROADCAST_LOAD_A(m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = svdup_f64(A_ELEMENT_K(m, offset_k));
|
||||
#define BROADCAST_LOAD_B(n, offset_k) \
|
||||
svfloat64_t b##s##n##_k##offset_k = svdup_f64(B_ELEMENT_K(n, offset_k));
|
||||
#define VECTOR_LOAD_A(pg, m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = svld1(pg, &A_ELEMENT_K(m, offset_k));
|
||||
#define GATHER_LOAD_A(pg, m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = \
|
||||
svld1_gather_index(pg, &A_ELEMENT_K(m, offset_k), lda_vec);
|
||||
#define PACK_A(m, offset_k) \
|
||||
svst1(pg_first, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k);
|
||||
#define VECTOR_PACK_A(m, offset_k) \
|
||||
svst1(pg_true, &PACK_ELEMENT_K(m* v_size, offset_k), a##s##m##_k##offset_k);
|
||||
#define QUADWORD_PACK_A(m, offset_k) \
|
||||
svst1(pg_quad, &PACK_ELEMENT_K(m, offset_k), a##s##m##_k##offset_k);
|
||||
#define UNPACK_VECTOR_A(m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = \
|
||||
svld1(pg_true, &PACK_ELEMENT_K(m * v_size, offset_k));
|
||||
#define UNPACK_BROADCAST_A(m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = svdup_f64(PACK_ELEMENT_K(m, offset_k));
|
||||
#define UNPACK_QUADWORD_A(m, offset_k) \
|
||||
svfloat64_t a##s##m##_k##offset_k = \
|
||||
svld1rq(pg_true, &PACK_ELEMENT_K(m, offset_k));
|
||||
#define UPDATE_RESULT_VECTOR(pg, m, n, offset_k) \
|
||||
result##m##n = \
|
||||
svmla_m(pg, result##m##n, a##s##m##_k##offset_k, b##s##n##_k##offset_k);
|
||||
#define UPDATE_RESULT_VECTOR_QUADWORD(m, n, outer, lane, offset_k) \
|
||||
result##m##n = svmla_lane( \
|
||||
result##m##n, a##s##m##_k##offset_k, b##s##outer##_k##offset_k, lane);
|
||||
#ifdef B0
|
||||
#define VECTOR_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
svst1(pg, &C_ELEMENT(m, n), result##m##n);
|
||||
#define SCATTER_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n);
|
||||
#else
|
||||
#define VECTOR_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
result##m##n = \
|
||||
svmla_m(pg, result##m##n, svld1(pg, &C_ELEMENT(m, n)), beta_vec); \
|
||||
svst1(pg, &C_ELEMENT(m, n), result##m##n);
|
||||
#define SCATTER_STORE(pg, m, n) \
|
||||
result##m##n = svmul_m(pg, result##m##n, alpha_vec); \
|
||||
result##m##n = svmla_m(pg, \
|
||||
result##m##n, \
|
||||
svld1_gather_index(pg, &C_ELEMENT(m, n), ldc_vec), \
|
||||
beta_vec); \
|
||||
svst1_scatter_index(pg, &C_ELEMENT(m, n), ldc_vec, result##m##n);
|
||||
#endif
|
||||
|
||||
#ifndef LIKELY
|
||||
#ifdef __GNUC__
|
||||
#define LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
#else
|
||||
#define LIKELY(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef B0
|
||||
int
|
||||
CNAME(BLASLONG M,
|
||||
BLASLONG N,
|
||||
BLASLONG K,
|
||||
IFLOAT* A,
|
||||
BLASLONG lda,
|
||||
FLOAT alpha,
|
||||
IFLOAT* B,
|
||||
BLASLONG ldb,
|
||||
FLOAT* C,
|
||||
BLASLONG ldc)
|
||||
#else
|
||||
int
|
||||
CNAME(BLASLONG M,
|
||||
BLASLONG N,
|
||||
BLASLONG K,
|
||||
IFLOAT* A,
|
||||
BLASLONG lda,
|
||||
FLOAT alpha,
|
||||
IFLOAT* B,
|
||||
BLASLONG ldb,
|
||||
FLOAT beta,
|
||||
FLOAT* C,
|
||||
BLASLONG ldc)
|
||||
#endif
|
||||
{
|
||||
const uint64_t v_size = svcntd();
|
||||
const uint64_t v_size2 = v_size * 2;
|
||||
const svbool_t pg_true = svptrue_b64();
|
||||
const svbool_t pg_quad = svwhilelt_b64(0, 2);
|
||||
const svbool_t pg_first = svwhilelt_b64(0, 1);
|
||||
const svfloat64_t alpha_vec = svdup_f64(alpha);
|
||||
#ifndef B0
|
||||
const svfloat64_t beta_vec = svdup_f64(beta);
|
||||
#endif
|
||||
const svuint64_t lda_vec = svindex_u64(0LL, lda);
|
||||
|
||||
const BLASLONG v_m2 = M & -v_size2;
|
||||
const BLASLONG v_m1 = M & -v_size;
|
||||
const BLASLONG n4 = N & -4;
|
||||
const BLASLONG n2 = N & -2;
|
||||
|
||||
const int pack_a = M >= v_size2 && N >= 8 && K >= 8 ? 1 : 0;
|
||||
FLOAT* packed_a =
|
||||
(pack_a) ? packed_a = (FLOAT*)malloc(K * v_size2 * sizeof(FLOAT)) : NULL;
|
||||
|
||||
FLOAT* a_offset = A;
|
||||
FLOAT* b_offset = B;
|
||||
FLOAT* c_offset = C;
|
||||
|
||||
BLASLONG i = 0;
|
||||
for (; i < v_m2; i += v_size2) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_C_POINTER(1, v_size);
|
||||
CREATE_A_POINTER(0, 0);
|
||||
CREATE_A_POINTER(1, v_size);
|
||||
|
||||
BLASLONG j = 0;
|
||||
for (; j < n4; j += 4) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
CREATE_B_POINTER(2, 2);
|
||||
CREATE_B_POINTER(3, 3);
|
||||
UPDATE_B_POINTER(4);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 1);
|
||||
DECLARE_RESULT_VECTOR(1, 2);
|
||||
DECLARE_RESULT_VECTOR(1, 3);
|
||||
|
||||
if (LIKELY(packed_a != NULL)) {
|
||||
if (j == 0) {
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_true, 0, 0);
|
||||
VECTOR_PACK_A(0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
GATHER_LOAD_A(pg_true, 1, 0);
|
||||
VECTOR_PACK_A(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0);
|
||||
}
|
||||
} else {
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
UNPACK_VECTOR_A(0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
UNPACK_VECTOR_A(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
GATHER_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 3, 0);
|
||||
}
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 0, 2);
|
||||
VECTOR_STORE(pg_true, 0, 3);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
VECTOR_STORE(pg_true, 1, 1);
|
||||
VECTOR_STORE(pg_true, 1, 2);
|
||||
VECTOR_STORE(pg_true, 1, 3);
|
||||
INCR_C_POINTER(0, 4);
|
||||
INCR_C_POINTER(1, 4);
|
||||
}
|
||||
for (; j < n2; j += 2) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
UPDATE_B_POINTER(2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 1);
|
||||
|
||||
if (LIKELY(packed_a != NULL)) {
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
UNPACK_VECTOR_A(0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
UNPACK_VECTOR_A(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
}
|
||||
} else {
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
GATHER_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 1, 0);
|
||||
}
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
VECTOR_STORE(pg_true, 1, 1);
|
||||
INCR_C_POINTER(0, 2);
|
||||
INCR_C_POINTER(1, 2);
|
||||
}
|
||||
for (; j < N; j++) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
UPDATE_B_POINTER(1);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(1, 0);
|
||||
|
||||
if (LIKELY(packed_a != NULL)) {
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
UNPACK_VECTOR_A(0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
UNPACK_VECTOR_A(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
}
|
||||
} else {
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
GATHER_LOAD_A(pg_true, 1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 1, 0, 0);
|
||||
}
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 1, 0);
|
||||
INCR_C_POINTER(0, 1);
|
||||
INCR_C_POINTER(1, 1);
|
||||
}
|
||||
|
||||
UPDATE_A_POINTER(v_size2);
|
||||
RESET_B_POINTER();
|
||||
UPDATE_C_POINTER(v_size2);
|
||||
}
|
||||
for (; i < v_m1; i += v_size) {
|
||||
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_A_POINTER(0, 0);
|
||||
|
||||
BLASLONG j = 0;
|
||||
for (; j < n4; j += 4) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
CREATE_B_POINTER(2, 2);
|
||||
CREATE_B_POINTER(3, 3);
|
||||
UPDATE_B_POINTER(4);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 3, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
VECTOR_STORE(pg_true, 0, 2);
|
||||
VECTOR_STORE(pg_true, 0, 3);
|
||||
INCR_C_POINTER(0, 4);
|
||||
}
|
||||
for (; j < n2; j += 2) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
UPDATE_B_POINTER(2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
VECTOR_STORE(pg_true, 0, 1);
|
||||
INCR_C_POINTER(0, 2);
|
||||
}
|
||||
for (; j < N; j++) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
UPDATE_B_POINTER(1);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_true, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_true, 0, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_true, 0, 0);
|
||||
INCR_C_POINTER(0, 1);
|
||||
}
|
||||
|
||||
UPDATE_A_POINTER(v_size);
|
||||
RESET_B_POINTER();
|
||||
UPDATE_C_POINTER(v_size);
|
||||
}
|
||||
for (; i < M; i += v_size) {
|
||||
const svbool_t pg_tail = svwhilelt_b64((uint64_t)i, (uint64_t)(M));
|
||||
CREATE_C_POINTER(0, 0);
|
||||
CREATE_A_POINTER(0, 0);
|
||||
|
||||
BLASLONG j = 0;
|
||||
for (; j < n4; j += 4) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
CREATE_B_POINTER(2, 2);
|
||||
CREATE_B_POINTER(3, 3);
|
||||
UPDATE_B_POINTER(4);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
DECLARE_RESULT_VECTOR(0, 2);
|
||||
DECLARE_RESULT_VECTOR(0, 3);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0);
|
||||
BROADCAST_LOAD_B(2, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 2, 0);
|
||||
BROADCAST_LOAD_B(3, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 3, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
VECTOR_STORE(pg_tail, 0, 1);
|
||||
VECTOR_STORE(pg_tail, 0, 2);
|
||||
VECTOR_STORE(pg_tail, 0, 3);
|
||||
INCR_C_POINTER(0, 4);
|
||||
}
|
||||
for (; j < n2; j += 2) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
CREATE_B_POINTER(1, 1);
|
||||
UPDATE_B_POINTER(2);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
DECLARE_RESULT_VECTOR(0, 1);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0);
|
||||
BROADCAST_LOAD_B(1, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 1, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
VECTOR_STORE(pg_tail, 0, 1);
|
||||
INCR_C_POINTER(0, 2);
|
||||
}
|
||||
for (; j < N; j++) {
|
||||
|
||||
CREATE_B_POINTER(0, 0);
|
||||
UPDATE_B_POINTER(1);
|
||||
|
||||
BLASLONG k = 0;
|
||||
DECLARE_RESULT_VECTOR(0, 0);
|
||||
|
||||
for (; k < K; k++) {
|
||||
|
||||
BROADCAST_LOAD_B(0, 0);
|
||||
GATHER_LOAD_A(pg_tail, 0, 0);
|
||||
UPDATE_RESULT_VECTOR(pg_tail, 0, 0, 0);
|
||||
}
|
||||
VECTOR_STORE(pg_tail, 0, 0);
|
||||
INCR_C_POINTER(0, 1);
|
||||
}
|
||||
|
||||
UPDATE_A_POINTER(0);
|
||||
RESET_B_POINTER();
|
||||
UPDATE_C_POINTER(0);
|
||||
}
|
||||
|
||||
if (pack_a)
|
||||
free(packed_a);
|
||||
|
||||
return 0;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue