Merge branch 'OpenMathLib:develop' into azurewincl
This commit is contained in:
commit
2fefdfa2b8
|
@ -139,6 +139,13 @@ jobs:
|
|||
cd build/openblas_wrap
|
||||
python -c'import _flapack; print(dir(_flapack))'
|
||||
|
||||
- name: Run benchmarks under pytest-benchmark
|
||||
run: |
|
||||
cd benchmark/pybench
|
||||
pip install pytest-benchmark
|
||||
export PYTHONPATH=$PWD/build-install/lib/python${{matrix.pyver}}/site-packages/
|
||||
OPENBLAS_NUM_THREADS=1 pytest benchmarks/bench_blas.py -k 'gesdd'
|
||||
|
||||
- name: Run benchmarks
|
||||
uses: CodSpeedHQ/action@v2
|
||||
with:
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
name: Publish docs via GitHub Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Deploy docs
|
||||
|
@ -10,12 +15,22 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- run: pip install mkdocs mkdocs-material
|
||||
# mkdocs gh-deploy command only builds to the top-level, hence building then deploying ourselves
|
||||
- run: mkdocs build
|
||||
|
||||
- name: Install MkDocs and doc theme packages
|
||||
run: pip install mkdocs mkdocs-material mkdocs-git-revision-date-localized-plugin
|
||||
|
||||
- name: Build docs site
|
||||
run: mkdocs build
|
||||
|
||||
# mkdocs gh-deploy command only builds to the top-level, hence deploying
|
||||
# with this action instead.
|
||||
# Deploys to http://www.openmathlib.org/OpenBLAS/docs/
|
||||
- name: Deploy docs
|
||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||
if: ${{ github.ref == 'refs/heads/develop' }}
|
||||
|
|
|
@ -141,21 +141,21 @@ jobs:
|
|||
|
||||
- job: OSX_OpenMP
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
steps:
|
||||
- script: |
|
||||
brew update
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-10 FC=gfortran-10
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-10 FC=gfortran-10 PREFIX=../blasinst install
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-13 FC=gfortran-13
|
||||
make TARGET=CORE2 DYNAMIC_ARCH=1 USE_OPENMP=1 INTERFACE64=1 CC=gcc-13 FC=gfortran-13 PREFIX=../blasinst install
|
||||
ls -lR ../blasinst
|
||||
|
||||
- job: OSX_GCC_Nothreads
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
steps:
|
||||
- script: |
|
||||
brew update
|
||||
make USE_THREADS=0 CC=gcc-10 FC=gfortran-10
|
||||
make USE_THREADS=0 CC=gcc-13 FC=gfortran-13
|
||||
|
||||
- job: OSX_GCC12
|
||||
pool:
|
||||
|
@ -195,7 +195,7 @@ jobs:
|
|||
|
||||
- job: OSX_dynarch_cmake
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
LD_LIBRARY_PATH: /usr/local/opt/llvm/lib
|
||||
LIBRARY_PATH: /usr/local/opt/llvm/lib
|
||||
|
@ -203,7 +203,7 @@ jobs:
|
|||
- script: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DTARGET=CORE2 -DDYNAMIC_ARCH=1 -DDYNAMIC_LIST='NEHALEM HASWELL SKYLAKEX' -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_Fortran_COMPILER=gfortran-10 -DBUILD_SHARED_LIBS=ON ..
|
||||
cmake -DTARGET=CORE2 -DDYNAMIC_ARCH=1 -DDYNAMIC_LIST='NEHALEM HASWELL SKYLAKEX' -DCMAKE_C_COMPILER=gcc-13 -DCMAKE_Fortran_COMPILER=gfortran-13 -DBUILD_SHARED_LIBS=ON ..
|
||||
cmake --build .
|
||||
ctest
|
||||
|
||||
|
@ -242,7 +242,7 @@ jobs:
|
|||
|
||||
- job: OSX_NDK_ARMV7
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
steps:
|
||||
- script: |
|
||||
brew update
|
||||
|
@ -252,35 +252,35 @@ jobs:
|
|||
|
||||
- job: OSX_IOS_ARMV8
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
CC: /Applications/Xcode_12.4.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_12.4.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS14.4.sdk -arch arm64 -miphoneos-version-min=10.0
|
||||
CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.2.sdk -arch arm64 -miphoneos-version-min=10.0
|
||||
steps:
|
||||
- script: |
|
||||
make TARGET=ARMV8 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1
|
||||
|
||||
- job: OSX_IOS_ARMV7
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
CC: /Applications/Xcode_12.4.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -mno-thumb -Wno-macro-redefined -isysroot /Applications/Xcode_12.4.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS14.4.sdk -arch armv7 -miphoneos-version-min=5.1
|
||||
CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -mno-thumb -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.2.sdk -arch armv7 -miphoneos-version-min=5.1
|
||||
steps:
|
||||
- script: |
|
||||
make TARGET=ARMV7 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1
|
||||
|
||||
- job: OSX_xbuild_DYNAMIC_ARM64
|
||||
pool:
|
||||
vmImage: 'macOS-11'
|
||||
vmImage: 'macOS-12'
|
||||
variables:
|
||||
CC: /Applications/Xcode_12.5.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_12.5.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX11.3.sdk -arch arm64
|
||||
CC: /Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang
|
||||
CFLAGS: -O2 -Wno-macro-redefined -isysroot /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX13.1.sdk -arch arm64
|
||||
steps:
|
||||
- script: |
|
||||
ls /Applications/Xcode_12.5.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs
|
||||
/Applications/Xcode_12.5.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -arch arm64 --print-supported-cpus
|
||||
/Applications/Xcode_11.7.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang --version
|
||||
ls /Applications/Xcode_14.2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs
|
||||
/Applications/Xcode_12.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang -arch arm64 --print-supported-cpus
|
||||
/Applications/Xcode_14.2.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang --version
|
||||
make TARGET=ARMV8 DYNAMIC_ARCH=1 NUM_THREADS=32 HOSTCC=clang NOFORTRAN=1
|
||||
|
||||
- job: ALPINE_MUSL
|
||||
|
|
|
@ -234,11 +234,14 @@ def test_gesdd(benchmark, mn, variant):
|
|||
gesdd = ow.get_func('gesdd', variant)
|
||||
u, s, vt, info = benchmark(run_gesdd, a, lwork, gesdd)
|
||||
|
||||
assert info == 0
|
||||
if variant != 's':
|
||||
# On entry to SLASCL parameter number 4 had an illegal value
|
||||
# under codspeed (cannot repro locally or on CI w/o codspeed)
|
||||
# https://github.com/OpenMathLib/OpenBLAS/issues/4776
|
||||
assert info == 0
|
||||
|
||||
atol = {'s': 1e-5, 'd': 1e-13}
|
||||
|
||||
np.testing.assert_allclose(u @ np.diag(s) @ vt, a, atol=atol[variant])
|
||||
atol = {'s': 1e-5, 'd': 1e-13}
|
||||
np.testing.assert_allclose(u @ np.diag(s) @ vt, a, atol=atol[variant])
|
||||
|
||||
|
||||
# linalg.eigh
|
||||
|
|
|
@ -1309,6 +1309,15 @@ endif ()
|
|||
"#define DTB_DEFAULT_ENTRIES 128\n"
|
||||
"#define DTB_SIZE 4096\n"
|
||||
"#define L2_ASSOCIATIVE 8\n")
|
||||
elseif ("${TCORE}" STREQUAL "RISCV64_GENERIC")
|
||||
file(APPEND ${TARGET_CONF_TEMP}
|
||||
"#define L1_DATA_SIZE 32768\n"
|
||||
"#define L1_DATA_LINESIZE 32\n"
|
||||
"#define L2_SIZE 1048576\n"
|
||||
"#define L2_LINESIZE 32 \n"
|
||||
"#define DTB_DEFAULT_ENTRIES 128\n"
|
||||
"#define DTB_SIZE 4096\n"
|
||||
"#define L2_ASSOCIATIVE 4\n")
|
||||
endif()
|
||||
set(SBGEMM_UNROLL_M 8)
|
||||
set(SBGEMM_UNROLL_N 4)
|
||||
|
|
|
@ -615,7 +615,7 @@ if (${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
|
|||
endif ()
|
||||
|
||||
if (CMAKE_Fortran_COMPILER)
|
||||
if (${F_COMPILER} STREQUAL "NAG" OR ${F_COMPILER} STREQUAL "CRAY" OR CMAKE_Fortran_COMPILER_ID MATCHES "LLVMFlang.*")
|
||||
if ("${F_COMPILER}" STREQUAL "NAG" OR "${F_COMPILER}" STREQUAL "CRAY" OR CMAKE_Fortran_COMPILER_ID MATCHES "LLVMFlang.*")
|
||||
set(FILTER_FLAGS "-msse3;-mssse3;-msse4.1;-mavx;-mavx2,-mskylake-avx512")
|
||||
if (CMAKE_Fortran_COMPILER_ID MATCHES "LLVMFlang.*")
|
||||
message(STATUS "removing fortran flags")
|
||||
|
|
|
@ -253,7 +253,7 @@ static __inline unsigned int blas_quickdivide(unsigned int x, unsigned int y){
|
|||
#ifndef BUFFERSIZE
|
||||
#define BUFFER_SIZE (32 << 22)
|
||||
#else
|
||||
#define BUFFER_SIZE (32 << BUFFERSIZE)
|
||||
#define BUFFER_SIZE (32UL << BUFFERSIZE)
|
||||
#endif
|
||||
|
||||
#define SEEK_ADDRESS
|
||||
|
|
|
@ -1529,6 +1529,7 @@ int get_cpuname(void){
|
|||
switch (model) {
|
||||
case 5: // Comet Lake H and S
|
||||
case 6: // Comet Lake U
|
||||
case 10: // Meteor Lake
|
||||
if(support_avx2())
|
||||
return CPUTYPE_HASWELL;
|
||||
if(support_avx())
|
||||
|
@ -2391,10 +2392,10 @@ int get_coretype(void){
|
|||
else
|
||||
return CORE_NEHALEM;
|
||||
}
|
||||
case 15:
|
||||
if (model <= 0x2) return CORE_NORTHWOOD;
|
||||
else return CORE_PRESCOTT;
|
||||
}
|
||||
case 15:
|
||||
if (model <= 0x2) return CORE_NORTHWOOD;
|
||||
else return CORE_PRESCOTT;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,25 +2,45 @@
|
|||
|
||||
We have a [GitHub discussions](https://github.com/OpenMathLib/OpenBLAS/discussions/) forum to discuss usage and development of OpenBLAS. We also have a [Google group for *users*](https://groups.google.com/forum/#!forum/openblas-users) and a [Google group for *development of*](https://groups.google.com/forum/#!forum/openblas-dev) OpenBLAS.
|
||||
|
||||
## Donations
|
||||
|
||||
You can read OpenBLAS statement of receipts and disbursement and cash balance on [google doc](https://docs.google.com/spreadsheet/ccc?key=0AghkTjXe2lDndE1UZml0dGpaUzJmZGhvenBZd1F2R1E&usp=sharing). A backer list is available [on GitHub](https://github.com/OpenMathLib/OpenBLAS/blob/develop/BACKERS.md).
|
||||
|
||||
We welcome the hardware donation, including the latest CPU and boards.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
This work is partially supported by
|
||||
This work was or is partially supported by the following grants, contracts and institutions:
|
||||
|
||||
* Research and Development of Compiler System and Toolchain for Domestic CPU, National S&T Major Projects: Core Electronic Devices, High-end General Chips and Fundamental Software (No.2009ZX01036-001-002)
|
||||
* National High-tech R&D Program of China (Grant No.2012AA010903)
|
||||
* [PerfXLab](http://www.perfxlab.com/)
|
||||
* Chan Zuckerberg Initiative's Essential Open Source Software for Science program:
|
||||
* Cycle 1 grant: [Strengthening NumPy's foundations - growing beyond code](https://figshare.com/articles/journal_contribution/Proposal_NumPy_OpenBLAS_for_Chan_Zuckerberg_Initiative_EOSS_2019_round_1/10302167) (2019-2020)
|
||||
* Cycle 3 grant: [Improving usability and sustainability for NumPy and OpenBLAS](https://chanzuckerberg.com/eoss/proposals/improving-usability-and-sustainability-for-numpy-and-openblas/) (2020-2021)
|
||||
* Sovereign Tech Fund funding: [Keeping high performance linear algebra computation accessible and open for all](https://www.sovereigntechfund.de/tech/openblas) (2023-2024)
|
||||
|
||||
## Users of OpenBLAS
|
||||
Over the course of OpenBLAS development, a number of donations were received.
|
||||
You can read OpenBLAS's statement of receipts and disbursement and cash balance in
|
||||
[this Google doc](https://docs.google.com/spreadsheet/ccc?key=0AghkTjXe2lDndE1UZml0dGpaUzJmZGhvenBZd1F2R1E&usp=sharing) (covers 2013-2016).
|
||||
A list of backers is available [in BACKERS.md](https://github.com/OpenMathLib/OpenBLAS/blob/develop/BACKERS.md) in the main repo.
|
||||
|
||||
* <a href='http://julialang.org/'>Julia - a high-level, high-performance dynamic programming language for technical computing</a><br />
|
||||
* Ceemple v1.0.3 (C++ technical computing environment), including OpenBLAS, Qt, Boost, OpenCV and others. The only solution with immediate-recompilation of C++ code. Available from <a href='http://www.ceemple.com'>Ceemple C++ Technical Computing</a>.
|
||||
* [netlib-java](https://github.com/fommil/netlib-java) and various upstream libraries, allowing OpenBLAS to be used from languages on the Java Virtual Machine.
|
||||
### Donations
|
||||
|
||||
We welcome hardware donations, including the latest CPUs and motherboards.
|
||||
|
||||
|
||||
## Open source users of OpenBLAS
|
||||
|
||||
Prominent open source users of OpenBLAS include:
|
||||
|
||||
* [Julia](https://julialang.org) - a high-level, high-performance dynamic programming language for technical computing
|
||||
* [NumPy](https://numpy.org) - the fundamental package for scientific computing with Python
|
||||
* [SciPy](https://scipy.org) - fundamental algorithms for scientific computing in Python
|
||||
* [R](https://www.r-project.org/) - a free software environment for statistical computing and graphics
|
||||
* [OpenCV](https://opencv.org/) - the world's biggest computer vision library
|
||||
|
||||
OpenBLAS is packaged in most major Linux distros, as well as general and
|
||||
numerical computing-focused packaging ecosystems like Nix, Homebrew, Spack and
|
||||
conda-forge.
|
||||
|
||||
OpenBLAS is used directly by libraries written in C, C++ and Fortran (and
|
||||
probably other languages), and directly by end users in those languages.
|
||||
|
||||
<!-- TODO: academia users, industry users, hpc centers deployed openblas, etc. -->
|
||||
|
||||
## Publications
|
||||
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
This page describes the Make-based build, which is the default/authoritative
|
||||
build method. Note that the OpenBLAS repository also supports building with
|
||||
CMake (not described here) - that generally works and is tested, however there
|
||||
may be small differences between the Make and CMake builds.
|
||||
|
||||
!!! warning
|
||||
This page is made by someone who is not the developer and should not be considered as an official documentation of the build system. For getting the full picture, it is best to read the Makefiles and understand them yourself.
|
||||
|
||||
|
@ -95,10 +100,21 @@ NUM_PARALLEL - define this to the number of OpenMP instances that your code m
|
|||
```
|
||||
|
||||
|
||||
OpenBLAS uses a fixed set of memory buffers internally, used for communicating and compiling partial results from individual threads.
|
||||
For efficiency, the management array structure for these buffers is sized at build time - this makes it necessary to know in advance how
|
||||
many threads need to be supported on the target system(s).
|
||||
With OpenMP, there is an additional level of complexity as there may be calls originating from a parallel region in the calling program. If OpenBLAS gets called from a single parallel region, it runs single-threaded automatically to avoid overloading the system by fanning out its own set of threads.
|
||||
In the case that an OpenMP program makes multiple calls from independent regions or instances in parallel, this default serialization is not
|
||||
sufficient as the additional caller(s) would compete for the original set of buffers already in use by the first call.
|
||||
So if multiple OpenMP runtimes call into OpenBLAS at the same time, then only one of them will be able to make progress while all the rest of them spin-wait for the one available buffer. Setting NUM_PARALLEL to the upper bound on the number of OpenMP runtimes that you can have in a process ensures that there are a sufficient number of buffer sets available
|
||||
OpenBLAS uses a fixed set of memory buffers internally, used for communicating
|
||||
and compiling partial results from individual threads. For efficiency, the
|
||||
management array structure for these buffers is sized at build time - this
|
||||
makes it necessary to know in advance how many threads need to be supported on
|
||||
the target system(s).
|
||||
|
||||
With OpenMP, there is an additional level of complexity as there may be calls
|
||||
originating from a parallel region in the calling program. If OpenBLAS gets
|
||||
called from a single parallel region, it runs single-threaded automatically to
|
||||
avoid overloading the system by fanning out its own set of threads. In the case
|
||||
that an OpenMP program makes multiple calls from independent regions or
|
||||
instances in parallel, this default serialization is not sufficient as the
|
||||
additional caller(s) would compete for the original set of buffers already in
|
||||
use by the first call. So if multiple OpenMP runtimes call into OpenBLAS at the
|
||||
same time, then only one of them will be able to make progress while all the
|
||||
rest of them spin-wait for the one available buffer. Setting `NUM_PARALLEL` to
|
||||
the upper bound on the number of OpenMP runtimes that you can have in a process
|
||||
ensures that there are a sufficient number of buffer sets available.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Developer manual
|
||||
|
||||
## Source codes Layout
|
||||
## Source code layout
|
||||
|
||||
```
|
||||
OpenBLAS/
|
||||
|
@ -51,8 +51,7 @@ OpenBLAS/
|
|||
|
||||
```
|
||||
|
||||
A call tree for `dgemm` is as following.
|
||||
|
||||
A call tree for `dgemm` looks as follows:
|
||||
```
|
||||
interface/gemm.c
|
||||
│
|
||||
|
@ -61,10 +60,9 @@ driver/level3/level3.c
|
|||
gemm assembly kernels at kernel/
|
||||
```
|
||||
|
||||
To find the kernel currently used for a particular supported cpu, please check the corresponding `kernel/$(ARCH)/KERNEL.$(CPU)` file.
|
||||
|
||||
Here is an example for `kernel/x86_64/KERNEL.HASWELL`
|
||||
To find the kernel currently used for a particular supported CPU, please check the corresponding `kernel/$(ARCH)/KERNEL.$(CPU)` file.
|
||||
|
||||
Here is an example for `kernel/x86_64/KERNEL.HASWELL`:
|
||||
```
|
||||
...
|
||||
DTRMMKERNEL = dtrmm_kernel_4x8_haswell.c
|
||||
|
@ -73,71 +71,122 @@ DGEMMKERNEL = dgemm_kernel_4x8_haswell.S
|
|||
```
|
||||
According to the above `KERNEL.HASWELL`, OpenBLAS Haswell dgemm kernel file is `dgemm_kernel_4x8_haswell.S`.
|
||||
|
||||
|
||||
## Optimizing GEMM for a given hardware
|
||||
|
||||
Read the Goto paper to understand the algorithm.
|
||||
!!! abstract "Read the Goto paper to understand the algorithm"
|
||||
|
||||
Goto, Kazushige; van de Geijn, Robert A. (2008). ["Anatomy of High-Performance Matrix Multiplication"](http://delivery.acm.org/10.1145/1360000/1356053/a12-goto.pdf?ip=155.68.162.54&id=1356053&acc=ACTIVE%20SERVICE&key=A79D83B43E50B5B8%2EF070BBE7E45C3F17%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35&__acm__=1517932837_edfe766f1e295d9a7830812371e1d173). ACM Transactions on Mathematical Software 34 (3): Article 12
|
||||
(The above link is available only to ACM members, but this and many related papers is also available on the pages
|
||||
of van de Geijn's FLAME project, http://www.cs.utexas.edu/~flame/web/FLAMEPublications.html )
|
||||
Goto, Kazushige; van de Geijn, Robert A. (2008).
|
||||
["Anatomy of High-Performance Matrix Multiplication"](http://delivery.acm.org/10.1145/1360000/1356053/a12-goto.pdf?ip=155.68.162.54&id=1356053&acc=ACTIVE%20SERVICE&key=A79D83B43E50B5B8%2EF070BBE7E45C3F17%2E4D4702B0C3E38B35%2E4D4702B0C3E38B35&__acm__=1517932837_edfe766f1e295d9a7830812371e1d173).
|
||||
ACM Transactions on Mathematical Software 34 (3): Article 12
|
||||
|
||||
The `driver/level3/level3.c` is the implementation of Goto's algorithm. Meanwhile, you can look at `kernel/generic/gemmkernel_2x2.c`, which is a naive `2x2` register blocking gemm kernel in C.
|
||||
(The above link is available only to ACM members, but this and many related
|
||||
papers is also available on [the pages of van de Geijn's FLAME project](http://www.cs.utexas.edu/~flame/web/FLAMEPublications.html))
|
||||
|
||||
Then,
|
||||
* Write optimized assembly kernels. consider instruction pipeline, available registers, memory/cache accessing
|
||||
* Tuning cache block size, `Mc`, `Kc`, and `Nc`
|
||||
The `driver/level3/level3.c` is the implementation of Goto's algorithm.
|
||||
Meanwhile, you can look at `kernel/generic/gemmkernel_2x2.c`, which is a naive
|
||||
`2x2` register blocking `gemm` kernel in C. Then:
|
||||
|
||||
Note that not all of the cpu-specific parameters in param.h are actively used in algorithms. DNUMOPT only appears as a scale factor in profiling output of the level3 syrk interface code, while its counterpart SNUMOPT (aliased as NUMOPT in common.h) is not used anywhere at all.
|
||||
SYMV_P is only used in the generic kernels for the symv and chemv/zhemv functions - at least some of those are usually overridden by cpu-specific implementations, so if you start by cloning the existing implementation for a related cpu you need to check its KERNEL file to see if tuning SYMV_P would have any effect at all.
|
||||
GEMV_UNROLL is only used by some older x86_64 kernels, so not all sections in param.h define it.
|
||||
Similarly, not all of the cpu parameters like L2 or L3 cache sizes are necessarily used in current kernels for a given model - by all indications the cpu identification code was imported from some other project originally.
|
||||
* Write optimized assembly kernels. Consider instruction pipeline, available registers, memory/cache access.
|
||||
* Tune cache block sizes (`Mc`, `Kc`, and `Nc`)
|
||||
|
||||
## Run OpenBLAS Test
|
||||
Note that not all of the CPU-specific parameters in `param.h` are actively used in algorithms.
|
||||
`DNUMOPT` only appears as a scale factor in profiling output of the level3 `syrk` interface code,
|
||||
while its counterpart `SNUMOPT` (aliased as `NUMOPT` in `common.h`) is not used anywhere at all.
|
||||
|
||||
We use netlib blas test, cblas test, and LAPACK test. Meanwhile, we use [BLAS-Tester](https://github.com/xianyi/BLAS-Tester), a modified test tool from ATLAS.
|
||||
`SYMV_P` is only used in the generic kernels for the `symv` and `chemv`/`zhemv` functions -
|
||||
at least some of those are usually overridden by CPU-specific implementations, so if you start
|
||||
by cloning the existing implementation for a related CPU you need to check its `KERNEL` file
|
||||
to see if tuning `SYMV_P` would have any effect at all.
|
||||
|
||||
* Run `test` and `ctest` at OpenBLAS. e.g. `make test` or `make ctest`.
|
||||
* Run regression test `utest` at OpenBLAS.
|
||||
* Run LAPACK test. e.g. `make lapack-test`.
|
||||
* Clone [BLAS-Tester](https://github.com/xianyi/BLAS-Tester), which can compare the OpenBLAS result with netlib reference BLAS.
|
||||
`GEMV_UNROLL` is only used by some older x86-64 kernels, so not all sections in `param.h` define it.
|
||||
Similarly, not all of the CPU parameters like L2 or L3 cache sizes are necessarily used in current
|
||||
kernels for a given model - by all indications the CPU identification code was imported from some
|
||||
other project originally.
|
||||
|
||||
|
||||
## Running OpenBLAS tests
|
||||
|
||||
We use tests for Netlib BLAS, CBLAS, and LAPACK. In addition, we use
|
||||
OpenBLAS-specific regression tests. They can be run with Make:
|
||||
|
||||
* `make -C test` for BLAS tests
|
||||
* `make -C ctest` for CBLAS tests
|
||||
* `make -C utest` for OpenBLAS regression tests
|
||||
* `make lapack-test` for LAPACK tests
|
||||
|
||||
We also use the [BLAS-Tester](https://github.com/xianyi/BLAS-Tester) tests for regression testing.
|
||||
It is basically the ATLAS test suite adapted for building with OpenBLAS.
|
||||
|
||||
The project makes use of several Continuous Integration (CI) services
|
||||
conveniently interfaced with GitHub to automatically run tests on a number of
|
||||
platforms and build configurations.
|
||||
|
||||
Also note that the test suites included with "numerically heavy" projects like
|
||||
Julia, NumPy, SciPy, Octave or QuantumEspresso can be used for regression
|
||||
testing, when those projects are built such that they use OpenBLAS.
|
||||
|
||||
The project makes use of several Continuous Integration (CI) services conveniently interfaced with github to automatically check compilability on a number of platforms.
|
||||
Lastly, the testsuites included with "numerically heavy" projects like Julia, NumPy, Octave or QuantumEspresso can be used for regression testing.
|
||||
|
||||
## Benchmarking
|
||||
|
||||
Several simple C benchmarks for performance testing individual BLAS functions are available in the `benchmark` folder, and its `scripts` subdirectory contains corresponding versions for Python, Octave and R.
|
||||
Other options include
|
||||
A number of benchmarking methods are used by OpenBLAS:
|
||||
|
||||
* https://github.com/RoyiAvital/MatlabJuliaMatrixOperationsBenchmark (various matrix operations in Julia and Matlab)
|
||||
* https://github.com/mmperf/mmperf/ (single-core matrix multiplication)
|
||||
- Several simple C benchmarks for performance testing individual BLAS functions
|
||||
are available in the `benchmark` folder. They can be run locally through the
|
||||
`Makefile` in that directory. And the `benchmark/scripts` subdirectory
|
||||
contains similar benchmarks that use OpenBLAS via NumPy, SciPy, Octave and R.
|
||||
- On pull requests, a representative set of functions is tested for performance
|
||||
regressions with Codspeed; results can be viewed at
|
||||
[https://codspeed.io/OpenMathLib/OpenBLAS](https://codspeed.io/OpenMathLib/OpenBLAS).
|
||||
- The [OpenMathLib/BLAS-Benchmarks](https://github.com/OpenMathLib/BLAS-Benchmarks) repository
|
||||
contains an [Airspeed Velocity](https://github.com/airspeed-velocity/asv/)-based benchmark
|
||||
suite which is run on several CPU architectures in cron jobs. Results are published
|
||||
to a dashboard: [http://www.openmathlib.org/BLAS-Benchmarks/](http://www.openmathlib.org/BLAS-Benchmarks/).
|
||||
|
||||
## Adding autodetection support for a new revision or variant of a supported cpu
|
||||
Benchmarking code for BLAS libraries, and specific performance analysis results, can be found
|
||||
in a number of places. For example:
|
||||
|
||||
Especially relevant for x86_64, a new cpu model may be a "refresh" (die shrink and/or different number of cores) within an existing
|
||||
model family without significant changes to its instruction set. (e.g. Intel Skylake, Kaby Lake etc. still are fundamentally Haswell,
|
||||
low end Goldmont etc. are Nehalem). In this case, compilation with the appropriate older TARGET will already lead to a satisfactory build.
|
||||
* [MatlabJuliaMatrixOperationsBenchmark](https://github.com/RoyiAvital/MatlabJuliaMatrixOperationsBenchmark)
|
||||
(various matrix operations in Julia and Matlab)
|
||||
* [mmperf/mmperf](https://github.com/mmperf/mmperf/) (single-core matrix multiplication)
|
||||
|
||||
|
||||
## Adding autodetection support for a new revision or variant of a supported CPU
|
||||
|
||||
Especially relevant for x86-64, a new CPU model may be a "refresh" (die shrink and/or different number of cores) within an existing
|
||||
model family without significant changes to its instruction set (e.g., Intel Skylake and Kaby Lake still are fundamentally the same architecture as Haswell,
|
||||
low end Goldmont etc. are Nehalem). In this case, compilation with the appropriate older `TARGET` will already lead to a satisfactory build.
|
||||
|
||||
To achieve autodetection of the new model, its CPUID (or an equivalent identifier) needs to be added in the `cpuid_<architecture>.c`
|
||||
relevant for its general architecture, with the returned name for the new type set appropriately. For x86 which has the most complex
|
||||
cpuid file, there are two functions that need to be edited - get_cpuname() to return e.g. CPUTYPE_HASWELL and get_corename() for the (broader)
|
||||
core family returning e.g. CORE_HASWELL. (This information ends up in the Makefile.conf and config.h files generated by `getarch`. Failure to
|
||||
set either will typically lead to a missing definition of the GEMM_UNROLL parameters later in the build, as `getarch_2nd` will be unable to
|
||||
find a matching parameter section in param.h.)
|
||||
relevant for its general architecture, with the returned name for the new type set appropriately. For x86, which has the most complex
|
||||
`cpuid` file, there are two functions that need to be edited: `get_cpuname()` to return, e.g., `CPUTYPE_HASWELL` and `get_corename()` for the (broader)
|
||||
core family returning, e.g., `CORE_HASWELL`.[^1]
|
||||
|
||||
For architectures where "DYNAMIC_ARCH" builds are supported, a similar but simpler code section for the corresponding runtime detection of the cpu exists in `driver/others/dynamic.c` (for x86) and `driver/others/dynamic_<arch>.c` for other architectures.
|
||||
[^1]:
|
||||
This information ends up in the `Makefile.conf` and `config.h` files generated by `getarch`. Failure to
|
||||
set either will typically lead to a missing definition of the `GEMM_UNROLL` parameters later in the build,
|
||||
as `getarch_2nd` will be unable to find a matching parameter section in `param.h`.
|
||||
|
||||
For architectures where `DYNAMIC_ARCH` builds are supported, a similar but simpler code section for the corresponding
|
||||
runtime detection of the CPU exists in `driver/others/dynamic.c` (for x86), and `driver/others/dynamic_<arch>.c` for other architectures.
|
||||
Note that for x86 the CPUID is compared after splitting it into its family, extended family, model and extended model parts, so the single decimal
|
||||
number returned by Linux in /proc/cpuinfo for the model has to be converted back to hexadecimal before splitting into its constituent
|
||||
digits, e.g. 142 = 8E , translates to extended model 8, model 14.
|
||||
number returned by Linux in `/proc/cpuinfo` for the model has to be converted back to hexadecimal before splitting into its constituent
|
||||
digits. For example, `142 == 8E` translates to extended model 8, model 14.
|
||||
|
||||
## Adding dedicated support for a new cpu model
|
||||
|
||||
Usually it will be possible to start from an existing model, clone its KERNEL configuration file to the new name to use for this TARGET and eventually replace individual kernels with versions better suited for peculiarities of the new cpu model. In addition, it is necessary to add
|
||||
(or clone at first) the corresponding section of GEMM_UNROLL parameters in the toplevel param.h, and possibly to add definitions such as USE_TRMM
|
||||
(governing whether TRMM functions use the respective GEMM kernel or a separate source file) to the Makefiles (and CMakeLists.txt) in the kernel
|
||||
directory. The new cpu name needs to be added to TargetLists.txt and the cpu autodetection code used by the `getarch` helper program - contained in
|
||||
## Adding dedicated support for a new CPU model
|
||||
|
||||
Usually it will be possible to start from an existing model, clone its `KERNEL` configuration file to the new name to use for this
|
||||
`TARGET` and eventually replace individual kernels with versions better suited for peculiarities of the new CPU model.
|
||||
In addition, it is necessary to add (or clone at first) the corresponding section of `GEMM_UNROLL` parameters in the top-level `param.h`,
|
||||
and possibly to add definitions such as `USE_TRMM` (governing whether `TRMM` functions use the respective `GEMM` kernel or a separate source file)
|
||||
to the `Makefile`s (and `CMakeLists.txt`) in the kernel directory. The new CPU name needs to be added to `TargetList.txt`,
|
||||
and the CPU auto-detection code used by the `getarch` helper program - contained in
|
||||
the `cpuid_<architecture>.c` file amended to include the CPUID (or equivalent) information processing required (see preceding section).
|
||||
|
||||
|
||||
## Adding support for an entirely new architecture
|
||||
|
||||
This endeavour is best started by cloning the entire support structure for 32bit ARM, and within that the ARMV5 cpu in particular as this is implemented through plain C kernels only. An example providing a convenient "shopping list" can be seen in pull request #1526.
|
||||
This endeavour is best started by cloning the entire support structure for 32-bit ARM, and within that the ARMv5 CPU in particular,
|
||||
as this is implemented through plain C kernels only. An example providing a convenient "shopping list" can be seen in pull request
|
||||
[#1526](https://github.com/OpenMathLib/OpenBLAS/pull/1526).
|
||||
|
|
|
@ -49,7 +49,7 @@ settings):
|
|||
to provide an ILP64 interface build as well, use a symbol suffix to avoid
|
||||
symbol name clashes (see the next section).
|
||||
|
||||
[^1] All major distributions do include LAPACK as of mid 2023 as far as we
|
||||
[^1]: All major distributions do include LAPACK as of mid 2023 as far as we
|
||||
know. Older versions of Arch Linux did not, and that was known to cause
|
||||
problems.
|
||||
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
* BLAS-like extensions
|
||||
OpenBLAS for the most part contains implementations of the reference (Netlib)
|
||||
BLAS, CBLAS, LAPACK and LAPACKE interfaces. A few OpenBLAS-specific functions
|
||||
are also provided however, which mostly can be seen as "BLAS extensions".
|
||||
This page documents those non-standard APIs.
|
||||
|
||||
## BLAS-like extensions
|
||||
|
||||
| Routine | Data Types | Description |
|
||||
| ------------- |:------------- | :---------------|
|
||||
|
@ -9,20 +14,26 @@
|
|||
| ?geadd | s,d,c,z | matrix add |
|
||||
| ?gemmt | s,d,c,z | gemm but only a triangular part updated|
|
||||
|
||||
* BLAS-like and Conversion functions for bfloat16 (available when OpenBLAS was compiled with BUILD_BFLOAT16=1)
|
||||
* `void cblas_sbstobf16` converts a float array to an array of bfloat16 values by rounding
|
||||
* `void cblas_sbdtobf16` converts a double array to an array of bfloat16 values by rounding
|
||||
* `void cblas_sbf16tos` converts a bfloat16 array to an array of floats
|
||||
* `void cblas_dbf16tod` converts a bfloat16 array to an array of doubles
|
||||
* `float cblas_sbdot` computes the dot product of two bfloat16 arrays
|
||||
* `void cblas_sbgemv` performs the matrix-vector operations of GEMV with the input matrix and X vector as bfloat16
|
||||
* `void cblas_sbgemm` performs the matrix-matrix operations of GEMM with both input arrays containing bfloat16
|
||||
|
||||
* Utility functions
|
||||
* openblas_get_num_threads
|
||||
* openblas_set_num_threads
|
||||
* `int openblas_get_num_procs(void)` returns the number of processors available on the system (may include "hyperthreading cores")
|
||||
* `int openblas_get_parallel(void)` returns 0 for sequential use, 1 for platform-based threading and 2 for OpenMP-based threading
|
||||
* `char * openblas_get_config()` returns the options OpenBLAS was built with, something like `NO_LAPACKE DYNAMIC_ARCH NO_AFFINITY Haswell`
|
||||
* `int openblas_set_affinity(int thread_index, size_t cpusetsize, cpu_set_t *cpuset)` sets the cpu affinity mask of the given thread to the provided cpuset. (Only available under Linux, with semantics identical to pthread_setaffinity_np)
|
||||
## bfloat16 functionality
|
||||
|
||||
BLAS-like and conversion functions for `bfloat16` (available when OpenBLAS was compiled with `BUILD_BFLOAT16=1`):
|
||||
|
||||
* `void cblas_sbstobf16` converts a float array to an array of bfloat16 values by rounding
|
||||
* `void cblas_sbdtobf16` converts a double array to an array of bfloat16 values by rounding
|
||||
* `void cblas_sbf16tos` converts a bfloat16 array to an array of floats
|
||||
* `void cblas_dbf16tod` converts a bfloat16 array to an array of doubles
|
||||
* `float cblas_sbdot` computes the dot product of two bfloat16 arrays
|
||||
* `void cblas_sbgemv` performs the matrix-vector operations of GEMV with the input matrix and X vector as bfloat16
|
||||
* `void cblas_sbgemm` performs the matrix-matrix operations of GEMM with both input arrays containing bfloat16
|
||||
|
||||
## Utility functions
|
||||
|
||||
* `openblas_get_num_threads`
|
||||
* `openblas_set_num_threads`
|
||||
* `int openblas_get_num_procs(void)` returns the number of processors available on the system (may include "hyperthreading cores")
|
||||
* `int openblas_get_parallel(void)` returns 0 for sequential use, 1 for platform-based threading and 2 for OpenMP-based threading
|
||||
* `char * openblas_get_config()` returns the options OpenBLAS was built with, something like `NO_LAPACKE DYNAMIC_ARCH NO_AFFINITY Haswell`
|
||||
* `int openblas_set_affinity(int thread_index, size_t cpusetsize, cpu_set_t *cpuset)` sets the CPU affinity mask of the given thread
|
||||
to the provided cpuset. Only available on Linux, with semantics identical to `pthread_setaffinity_np`.
|
||||
|
||||
|
|
1022
docs/install.md
1022
docs/install.md
File diff suppressed because it is too large
Load Diff
|
@ -1,70 +1,174 @@
|
|||
## Compile the library
|
||||
|
||||
This user manual covers compiling OpenBLAS itself, linking your code to OpenBLAS,
|
||||
example code to use the C (CBLAS) and Fortran (BLAS) APIs, and some troubleshooting
|
||||
tips. Compiling OpenBLAS is optional, since you may be able to install with a
|
||||
package manager.
|
||||
|
||||
!!! Note BLAS API reference documentation
|
||||
|
||||
The OpenBLAS documentation does not contain API reference documentation for
|
||||
BLAS or LAPACK, since these are standardized APIs, the documentation for
|
||||
which can be found in other places. If you want to understand every BLAS
|
||||
and LAPACK function and definition, we recommend reading the
|
||||
[Netlib BLAS ](http://netlib.org/blas/) and [Netlib LAPACK](http://netlib.org/lapack/)
|
||||
documentation.
|
||||
|
||||
OpenBLAS does contain a limited number of functions that are non-standard,
|
||||
these are documented at [OpenBLAS extension functions](extensions.md).
|
||||
|
||||
|
||||
## Compiling OpenBLAS
|
||||
|
||||
### Normal compile
|
||||
* type `make` to detect the CPU automatically.
|
||||
or
|
||||
* type `make TARGET=xxx` to set target CPU, e.g. `make TARGET=NEHALEM`. The full target list is in file TargetList.txt.
|
||||
|
||||
The default way to build and install OpenBLAS from source is with Make:
|
||||
```
|
||||
make # add `-j4` to compile in parallel with 4 processes
|
||||
make install
|
||||
```
|
||||
|
||||
By default, the CPU architecture is detected automatically when invoking
|
||||
`make`, and the build is optimized for the detected CPU. To override the
|
||||
autodetection, use the `TARGET` flag:
|
||||
|
||||
```
|
||||
# `make TARGET=xxx` sets target CPU: e.g. for an Intel Nehalem CPU:
|
||||
make TARGET=NEHALEM
|
||||
```
|
||||
The full list of known target CPU architectures can be found in
|
||||
`TargetList.txt` in the root of the repository.
|
||||
|
||||
### Cross compile
|
||||
Please set `CC` and `FC` with the cross toolchains. Then, set `HOSTCC` with your host C compiler. At last, set `TARGET` explicitly.
|
||||
|
||||
Examples:
|
||||
For a basic cross-compilation with Make, three steps need to be taken:
|
||||
|
||||
* On x86 box, compile the library for ARM Cortex-A9 linux.
|
||||
- Set the `CC` and `FC` environment variables to select the cross toolchains
|
||||
for C and Fortran.
|
||||
- Set the `HOSTCC` environment variable to select the host C compiler (i.e. the
|
||||
regular C compiler for the machine on which you are invoking the build).
|
||||
- Set `TARGET` explicitly to the CPU architecture on which the produced
|
||||
OpenBLAS binaries will be used.
|
||||
|
||||
Install only gnueabihf versions. Please check https://github.com/xianyi/OpenBLAS/issues/936#issuecomment-237596847
|
||||
#### Cross-compilation examples
|
||||
|
||||
make CC=arm-linux-gnueabihf-gcc FC=arm-linux-gnueabihf-gfortran HOSTCC=gcc TARGET=CORTEXA9
|
||||
|
||||
* On X86 box, compile this library for loongson3a CPU.
|
||||
Compile the library for ARM Cortex-A9 linux on an x86-64 machine
|
||||
_(note: install only `gnueabihf` versions of the cross toolchain - see
|
||||
[this issue comment](https://github.com/OpenMathLib/OpenBLAS/issues/936#issuecomment-237596847)
|
||||
for why_):
|
||||
```
|
||||
make CC=arm-linux-gnueabihf-gcc FC=arm-linux-gnueabihf-gfortran HOSTCC=gcc TARGET=CORTEXA9
|
||||
```
|
||||
|
||||
Compile OpenBLAS for a loongson3a CPU on an x86-64 machine:
|
||||
```
|
||||
make BINARY=64 CC=mips64el-unknown-linux-gnu-gcc FC=mips64el-unknown-linux-gnu-gfortran HOSTCC=gcc TARGET=LOONGSON3A
|
||||
```
|
||||
|
||||
* On X86 box, compile this library for loongson3a CPU with loongcc (based on Open64) compiler.
|
||||
|
||||
Compile OpenBLAS for loongson3a CPU with the `loongcc` (based on Open64) compiler on an x86-64 machine:
|
||||
```
|
||||
make CC=loongcc FC=loongf95 HOSTCC=gcc TARGET=LOONGSON3A CROSS=1 CROSS_SUFFIX=mips64el-st-linux-gnu- NO_LAPACKE=1 NO_SHARED=1 BINARY=32
|
||||
```
|
||||
|
||||
### Debug version
|
||||
### Building a debug version
|
||||
|
||||
make DEBUG=1
|
||||
Add `DEBUG=1` to your build command, e.g.:
|
||||
```
|
||||
make DEBUG=1
|
||||
```
|
||||
|
||||
### Install to the directory (optional)
|
||||
### Install to a specific directory
|
||||
|
||||
Example:
|
||||
!!! note
|
||||
|
||||
make install PREFIX=your_installation_directory
|
||||
Installing to a directory is optional; it is also possible to use the shared or static
|
||||
libraries directly from the build directory.
|
||||
|
||||
The default directory is /opt/OpenBLAS. Note that any flags passed to `make` during build should also be passed to `make install` to circumvent any install errors, i.e. some headers not being copied over correctly.
|
||||
Use `make install` with the `PREFIX` flag to install to a specific directory:
|
||||
|
||||
For more information, please read [Installation Guide](install.md).
|
||||
```
|
||||
make install PREFIX=/path/to/installation/directory
|
||||
```
|
||||
|
||||
## Link the library
|
||||
The default directory is `/opt/OpenBLAS`.
|
||||
|
||||
* Link shared library
|
||||
!!! important
|
||||
|
||||
Note that any flags passed to `make` during build should also be passed to
|
||||
`make install` to circumvent any install errors, i.e. some headers not
|
||||
being copied over correctly.
|
||||
|
||||
For more detailed information on building/installing from source, please read
|
||||
the [Installation Guide](install.md).
|
||||
|
||||
|
||||
## Linking to OpenBLAS
|
||||
|
||||
OpenBLAS can be used as a shared or a static library.
|
||||
|
||||
### Link a shared library
|
||||
|
||||
The shared library is normally called `libopenblas.so`, but not that the name
|
||||
may be different as a result of build flags used or naming choices by a distro
|
||||
packager (see [distributing.md] for details). To link a shared library named
|
||||
`libopenblas.so`, the flag `-lopenblas` is needed. To find the OpenBLAS headers,
|
||||
a `-I/path/to/includedir` is needed. And unless the library is installed in a
|
||||
directory that the linker searches by default, also `-L` and `-Wl,-rpath` flags
|
||||
are needed. For a source file `test.c` (e.g., the example code under _Call
|
||||
CBLAS interface_ further down), the shared library can then be linked with:
|
||||
```
|
||||
gcc -o test test.c -I/your_path/OpenBLAS/include/ -L/your_path/OpenBLAS/lib -Wl,-rpath,/your_path/OpenBLAS/lib -lopenblas
|
||||
```
|
||||
|
||||
The `-Wl,-rpath,/your_path/OpenBLAS/lib` option to linker can be omitted if you ran `ldconfig` to update linker cache, put `/your_path/OpenBLAS/lib` in `/etc/ld.so.conf` or a file in `/etc/ld.so.conf.d`, or installed OpenBLAS in a location that is part of the `ld.so` default search path (usually /lib,/usr/lib and /usr/local/lib). Alternatively, you can set the environment variable LD_LIBRARY_PATH to point to the folder that contains libopenblas.so. Otherwise, linking at runtime will fail with a message like `cannot open shared object file: no such file or directory`
|
||||
The `-Wl,-rpath,/your_path/OpenBLAS/lib` linker flag can be omitted if you
|
||||
ran `ldconfig` to update linker cache, put `/your_path/OpenBLAS/lib` in
|
||||
`/etc/ld.so.conf` or a file in `/etc/ld.so.conf.d`, or installed OpenBLAS in a
|
||||
location that is part of the `ld.so` default search path (usually `/lib`,
|
||||
`/usr/lib` and `/usr/local/lib`). Alternatively, you can set the environment
|
||||
variable `LD_LIBRARY_PATH` to point to the folder that contains `libopenblas.so`.
|
||||
Otherwise, the build may succeed but at runtime loading the library will fail
|
||||
with a message like:
|
||||
```
|
||||
cannot open shared object file: no such file or directory
|
||||
```
|
||||
|
||||
If the library is multithreaded, please add `-lpthread`. If the library contains LAPACK functions, please add `-lgfortran` or other Fortran libs, although if you only make calls to LAPACKE routines, i.e. your code has `#include "lapacke.h"` and makes calls to methods like `LAPACKE_dgeqrf`, `-lgfortran` is not needed.
|
||||
More flags may be needed, depending on how OpenBLAS was built:
|
||||
|
||||
* Link static library
|
||||
- If `libopenblas` is multi-threaded, please add `-lpthread`.
|
||||
- If the library contains LAPACK functions (usually also true), please add
|
||||
`-lgfortran` (other Fortran libraries may also be needed, e.g. `-lquadmath`).
|
||||
Note that if you only make calls to LAPACKE routines, i.e. your code has
|
||||
`#include "lapacke.h"` and makes calls to methods like `LAPACKE_dgeqrf`,
|
||||
then `-lgfortran` is not needed.
|
||||
|
||||
!!! tip Use pkg-config
|
||||
|
||||
Usually a pkg-config file (e.g., `openblas.pc`) is installed together
|
||||
with a `libopenblas` shared library. pkg-config is a tool that will
|
||||
tell you the exact flags needed for linking. For example:
|
||||
|
||||
```
|
||||
$ pkg-config --cflags openblas
|
||||
-I/usr/local/include
|
||||
$ pkg-config --libs openblas
|
||||
-L/usr/local/lib -lopenblas
|
||||
```
|
||||
|
||||
### Link a static library
|
||||
|
||||
Linking a static library is simpler - add the path to the static OpenBLAS
|
||||
library to the compile command:
|
||||
```
|
||||
gcc -o test test.c /your/path/libopenblas.a
|
||||
```
|
||||
|
||||
You can download `test.c` from https://gist.github.com/xianyi/5780018
|
||||
|
||||
## Code examples
|
||||
|
||||
### Call CBLAS interface
|
||||
This example shows calling cblas_dgemm in C. https://gist.github.com/xianyi/6930656
|
||||
|
||||
This example shows calling `cblas_dgemm` in C:
|
||||
|
||||
<!-- Source: https://gist.github.com/xianyi/6930656 -->
|
||||
```c
|
||||
#include <cblas.h>
|
||||
#include <stdio.h>
|
||||
|
@ -83,14 +187,17 @@ void main()
|
|||
}
|
||||
```
|
||||
|
||||
To compile this file, save it as `test_cblas_dgemm.c` and then run:
|
||||
```
|
||||
gcc -o test_cblas_open test_cblas_dgemm.c -I /your_path/OpenBLAS/include/ -L/your_path/OpenBLAS/lib -lopenblas -lpthread -lgfortran
|
||||
gcc -o test_cblas_open test_cblas_dgemm.c -I/your_path/OpenBLAS/include/ -L/your_path/OpenBLAS/lib -lopenblas -lpthread -lgfortran
|
||||
```
|
||||
will result in a `test_cblas_open` executable.
|
||||
|
||||
### Call BLAS Fortran interface
|
||||
|
||||
This example shows calling dgemm Fortran interface in C. https://gist.github.com/xianyi/5780018
|
||||
This example shows calling the `dgemm` Fortran interface in C:
|
||||
|
||||
<!-- Source: https://gist.github.com/xianyi/5780018 -->
|
||||
```c
|
||||
#include "stdio.h"
|
||||
#include "stdlib.h"
|
||||
|
@ -158,22 +265,41 @@ int main(int argc, char* argv[])
|
|||
}
|
||||
```
|
||||
|
||||
To compile this file, save it as `time_dgemm.c` and then run:
|
||||
```
|
||||
gcc -o time_dgemm time_dgemm.c /your/path/libopenblas.a -lpthread
|
||||
./time_dgemm <m> <n> <k>
|
||||
```
|
||||
You can then run it as: `./time_dgemm <m> <n> <k>`, with `m`, `n`, and `k` input
|
||||
parameters to the `time_dgemm` executable.
|
||||
|
||||
!!! note
|
||||
|
||||
When calling the Fortran interface from C, you have to deal with symbol name
|
||||
differences caused by compiler conventions. That is why the `dgemm_` function
|
||||
call in the example above has a trailing underscore. This is what it looks like
|
||||
when using `gcc`/`gfortran`, however such details may change for different
|
||||
compilers. Hence it requires extra support code. The CBLAS interface may be
|
||||
more portable when writing C code.
|
||||
|
||||
When writing code that needs to be portable and work across different
|
||||
platforms and compilers, the above code example is not recommended for
|
||||
usage. Instead, we advise looking at how OpenBLAS (or BLAS in general, since
|
||||
this problem isn't specific to OpenBLAS) functions are called in widely
|
||||
used projects like Julia, SciPy, or R.
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
* Please read [Faq](faq.md) at first.
|
||||
* Please use gcc version 4.6 and above to compile Sandy Bridge AVX kernels on Linux/MingW/BSD.
|
||||
* Please use Clang version 3.1 and above to compile the library on Sandy Bridge microarchitecture. The Clang 3.0 will generate the wrong AVX binary code.
|
||||
* The number of CPUs/Cores should less than or equal to 256. On Linux x86_64(amd64), there is experimental support for up to 1024 CPUs/Cores and 128 numa nodes if you build the library with BIGNUMA=1.
|
||||
* OpenBLAS does not set processor affinity by default. On Linux, you can enable processor affinity by commenting the line NO_AFFINITY=1 in Makefile.rule. But this may cause [the conflict with R parallel](https://stat.ethz.ch/pipermail/r-sig-hpc/2012-April/001348.html).
|
||||
* On Loongson 3A. make test would be failed because of pthread_create error. The error code is EAGAIN. However, it will be OK when you run the same testcase on shell.
|
||||
|
||||
## BLAS reference manual
|
||||
|
||||
If you want to understand every BLAS function and definition, please read [Intel MKL reference manual](https://software.intel.com/en-us/intel-mkl/documentation) or [netlib.org](http://netlib.org/blas/)
|
||||
|
||||
Here are [OpenBLAS extension functions](extensions.md)
|
||||
* Please read the [FAQ](faq.md) first, your problem may be described there.
|
||||
* Please ensure you are using a recent enough compiler, that supports the
|
||||
features your CPU provides (example: GCC versions before 4.6 were known to
|
||||
not support AVX kernels, and before 6.1 AVX512CD kernels).
|
||||
* The number of CPU cores supported by default is <=256. On Linux x86-64, there
|
||||
is experimental support for up to 1024 cores and 128 NUMA nodes if you build
|
||||
the library with `BIGNUMA=1`.
|
||||
* OpenBLAS does not set processor affinity by default. On Linux, you can enable
|
||||
processor affinity by commenting out the line `NO_AFFINITY=1` in
|
||||
`Makefile.rule`.
|
||||
* On Loongson 3A, `make test` is known to fail with a `pthread_create` error
|
||||
and an `EAGAIN` error code. However, it will be OK when you run the same
|
||||
testcase in a shell.
|
||||
|
|
36
mkdocs.yml
36
mkdocs.yml
|
@ -1,17 +1,44 @@
|
|||
site_name: OpenBLAS
|
||||
site_url: https://openblas.net/docs/
|
||||
repo_url: https://github.com/OpenMathLib/OpenBLAS
|
||||
copyright: Copyright © 2012- OpenBLAS contributors
|
||||
|
||||
theme:
|
||||
name: material
|
||||
logo: logo.svg
|
||||
favicon: logo.svg
|
||||
features:
|
||||
- header.autohide
|
||||
palette:
|
||||
primary: grey
|
||||
# Palette toggle for dark mode
|
||||
- scheme: slate
|
||||
primary: blue grey
|
||||
toggle:
|
||||
icon: material/brightness-4
|
||||
name: Switch to light mode
|
||||
|
||||
# Palette toggle for light mode
|
||||
- scheme: default
|
||||
primary: blue grey
|
||||
toggle:
|
||||
icon: material/brightness-7
|
||||
name: Switch to dark mode
|
||||
|
||||
plugins:
|
||||
- search
|
||||
- git-revision-date-localized:
|
||||
enable_creation_date: true
|
||||
|
||||
markdown_extensions:
|
||||
- admonition
|
||||
- pymdownx.details
|
||||
- pymdownx.superfences
|
||||
- footnotes
|
||||
- pymdownx.tabbed:
|
||||
alternate_style: true
|
||||
- toc:
|
||||
toc_depth: 4
|
||||
|
||||
nav:
|
||||
- index.md
|
||||
- install.md
|
||||
|
@ -23,3 +50,10 @@ nav:
|
|||
- ci.md
|
||||
- about.md
|
||||
- faq.md
|
||||
|
||||
extra:
|
||||
social:
|
||||
- icon: fontawesome/brands/github
|
||||
link: https://github.com/OpenMathLib/OpenBLAS
|
||||
- icon: material/license
|
||||
link: https://github.com/OpenMathLib/OpenBLAS/LICENSE
|
||||
|
|
Loading…
Reference in New Issue