Merge branch 'develop' into z14

This commit is contained in:
maamountki
2019-01-31 19:36:41 +02:00
committed by GitHub
236 changed files with 13108 additions and 2301 deletions

View File

@@ -125,10 +125,13 @@ function (build_core TARGET_CORE KDIR TSUFFIX KERNEL_DEFINITIONS)
set(USE_TRMM true)
endif ()
foreach (float_type ${FLOAT_TYPES})
foreach (float_type SINGLE DOUBLE)
string(SUBSTRING ${float_type} 0 1 float_char)
GenerateNamedObjects("${KERNELDIR}/${${float_char}GEMMKERNEL}" "" "gemm_kernel" false "" "" false ${float_type})
endforeach()
foreach (float_type ${FLOAT_TYPES})
string(SUBSTRING ${float_type} 0 1 float_char)
if (${float_char}GEMMINCOPY)
GenerateNamedObjects("${KERNELDIR}/${${float_char}GEMMINCOPY}" "${float_type}" "${${float_char}GEMMINCOPYOBJ}" false "" "" true ${float_type})
endif ()

View File

@@ -5,8 +5,43 @@ endif
TOPDIR = ..
include $(TOPDIR)/Makefile.system
AVX2OPT =
ifeq ($(C_COMPILER), GCC)
# AVX2 support was added in 4.7.0
GCCVERSIONGTEQ4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 4)
GCCMINORVERSIONGTEQ7 := $(shell expr `$(CC) -dumpversion | cut -f2 -d.` \>= 7)
ifeq ($(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ7), 11)
AVX2OPT = -mavx2
endif
endif
ifeq ($(C_COMPILER), CLANG)
# Any clang posing as gcc 4.2 should be new enough (3.4 or later)
GCCVERSIONGTEQ4 := $(shell expr `$(CC) -dumpversion | cut -f1 -d.` \>= 4)
GCCMINORVERSIONGTEQ2 := $(shell expr `$(CC) -dumpversion | cut -f2 -d.` \>= 2)
ifeq ($(GCCVERSIONGTEQ4)$(GCCMINORVERSIONGTEQ2), 11)
AVX2OPT = -mavx2
endif
endif
ifdef NO_AVX2
AVX2OPT=
endif
ifdef TARGET_CORE
override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE)
ifeq ($(TARGET_CORE), SKYLAKEX)
override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) -march=skylake-avx512
ifeq ($(OSNAME), CYGWIN_NT)
override CFLAGS += -fno-asynchronous-unwind-tables
endif
ifeq ($(OSNAME), WINNT)
ifeq ($(C_COMPILER), GCC)
override CFLAGS += -fno-asynchronous-unwind-tables
endif
endif
else ifeq ($(TARGET_CORE), HASWELL)
override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) $(AVX2OPT)
else
override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE)
endif
BUILD_KERNEL = 1
KDIR =
TSUFFIX = _$(TARGET_CORE)
@@ -88,7 +123,11 @@ lsame.$(SUFFIX): $(KERNELDIR)/$(LSAME_KERNEL)
$(CC) -c $(CFLAGS) -DF_INTERFACE $< -o $(@F)
setparam$(TSUFFIX).$(SUFFIX): setparam$(TSUFFIX).c kernel$(TSUFFIX).h
ifeq ($(USE_GEMM3M), 1)
$(CC) -c $(CFLAGS) -DUSE_GEMM3M $< -o $@
else
$(CC) -c $(CFLAGS) $< -o $@
endif
setparam$(TSUFFIX).c : setparam-ref.c
sed 's/TS/$(TSUFFIX)/g' $< > $(@F)

View File

@@ -44,7 +44,7 @@ ifeq ($(CORE), POWER8)
USE_TRMM = 1
endif
ifeq ($(CORE), Z13)
ifeq ($(ARCH), zarch)
USE_TRMM = 1
endif

View File

@@ -58,11 +58,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmiad X!, { d4 - d5 }
vldmia.f64 X!, { d4 - d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
fldmiad X!, { d6 - d7 }
vldmia.f64 X!, { d6 - d7 }
vabs.f64 d6, d6
vadd.f64 d1 , d1, d5
vabs.f64 d7, d7
@@ -73,7 +73,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 }
vldmia.f64 X!, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
@@ -82,22 +82,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
add X, X, INC_X
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
add X, X, INC_X
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
add X, X, INC_X
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
add X, X, INC_X
@@ -107,7 +107,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
add X, X, INC_X
@@ -118,11 +118,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
fldmias X!, { s4 - s5 }
vldmia.f32 X!, { s4 - s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5
fldmias X!, { s6 - s7 }
vldmia.f32 X!, { s6 - s7 }
vabs.f32 s6, s6
vadd.f32 s1 , s1, s5
vabs.f32 s7, s7
@@ -133,7 +133,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 }
vldmia.f32 X!, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
@@ -142,22 +142,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
add X, X, INC_X
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
add X, X, INC_X
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
add X, X, INC_X
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
add X, X, INC_X
@@ -167,7 +167,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
add X, X, INC_X
@@ -184,11 +184,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmiad X!, { d4 - d5 }
vldmia.f64 X!, { d4 - d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
fldmiad X!, { d6 - d7 }
vldmia.f64 X!, { d6 - d7 }
vabs.f64 d6, d6
vadd.f64 d1 , d1, d5
vabs.f64 d7, d7
@@ -196,11 +196,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
vadd.f64 d1 , d1, d7
pld [ X, #X_PRE ]
fldmiad X!, { d4 - d5 }
vldmia.f64 X!, { d4 - d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
fldmiad X!, { d6 - d7 }
vldmia.f64 X!, { d6 - d7 }
vabs.f64 d6, d6
vadd.f64 d1 , d1, d5
vabs.f64 d7, d7
@@ -212,11 +212,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 }
vldmia.f64 X!, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
fldmiad X!, { d4 }
vldmia.f64 X!, { d4 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
@@ -226,28 +226,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4
fldmiad X, { d4 -d5 }
vldmia.f64 X, { d4 -d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
vadd.f64 d0 , d0, d5
add X, X, INC_X
fldmiad X, { d4 -d5 }
vldmia.f64 X, { d4 -d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
vadd.f64 d0 , d0, d5
add X, X, INC_X
fldmiad X, { d4 -d5 }
vldmia.f64 X, { d4 -d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
vadd.f64 d0 , d0, d5
add X, X, INC_X
fldmiad X, { d4 -d5 }
vldmia.f64 X, { d4 -d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
@@ -259,7 +259,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmiad X, { d4 -d5 }
vldmia.f64 X, { d4 -d5 }
vabs.f64 d4, d4
vadd.f64 d0 , d0, d4
vabs.f64 d5, d5
@@ -273,22 +273,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmias X!, { s4 - s5 }
vldmia.f32 X!, { s4 - s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5
fldmias X!, { s6 - s7 }
vldmia.f32 X!, { s6 - s7 }
vabs.f32 s6, s6
vadd.f32 s1 , s1, s5
vabs.f32 s7, s7
vadd.f32 s0 , s0, s6
vadd.f32 s1 , s1, s7
fldmias X!, { s4 - s5 }
vldmia.f32 X!, { s4 - s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5
fldmias X!, { s6 - s7 }
vldmia.f32 X!, { s6 - s7 }
vabs.f32 s6, s6
vadd.f32 s1 , s1, s5
vabs.f32 s7, s7
@@ -300,11 +300,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 }
vldmia.f32 X!, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
fldmias X!, { s4 }
vldmia.f32 X!, { s4 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
@@ -313,28 +313,28 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4
fldmias X, { s4 -s5 }
vldmia.f32 X, { s4 -s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5
vadd.f32 s0 , s0, s5
add X, X, INC_X
fldmias X, { s4 -s5 }
vldmia.f32 X, { s4 -s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5
vadd.f32 s0 , s0, s5
add X, X, INC_X
fldmias X, { s4 -s5 }
vldmia.f32 X, { s4 -s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5
vadd.f32 s0 , s0, s5
add X, X, INC_X
fldmias X, { s4 -s5 }
vldmia.f32 X, { s4 -s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5
@@ -346,7 +346,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X, { s4 -s5 }
vldmia.f32 X, { s4 -s5 }
vabs.f32 s4, s4
vadd.f32 s0 , s0, s4
vabs.f32 s5, s5

View File

@@ -146,17 +146,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmiad X!, { d4 - d7 }
vldmia.f64 X!, { d4 - d7 }
pld [ Y, #X_PRE ]
fldmiad Y , { d8 - d11 }
vldmia.f64 Y , { d8 - d11 }
fmacd d8 , d0, d4
fstmiad Y!, { d8 }
vstmia.f64 Y!, { d8 }
fmacd d9 , d0, d5
fstmiad Y!, { d9 }
vstmia.f64 Y!, { d9 }
fmacd d10, d0, d6
fstmiad Y!, { d10 }
vstmia.f64 Y!, { d10 }
fmacd d11, d0, d7
fstmiad Y!, { d11 }
vstmia.f64 Y!, { d11 }
.endm
@@ -164,19 +164,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 }
fldmiad Y , { d8 }
vldmia.f64 X!, { d4 }
vldmia.f64 Y , { d8 }
fmacd d8 , d0, d4
fstmiad Y!, { d8 }
vstmia.f64 Y!, { d8 }
.endm
.macro KERNEL_S1
fldmiad X , { d4 }
fldmiad Y , { d8 }
vldmia.f64 X , { d4 }
vldmia.f64 Y , { d8 }
fmacd d8 , d0, d4
fstmiad Y , { d8 }
vstmia.f64 Y , { d8 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -186,16 +186,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
fldmias X!, { s4 - s7 }
fldmias Y , { s8 - s11 }
vldmia.f32 X!, { s4 - s7 }
vldmia.f32 Y , { s8 - s11 }
fmacs s8 , s0, s4
fstmias Y!, { s8 }
vstmia.f32 Y!, { s8 }
fmacs s9 , s0, s5
fstmias Y!, { s9 }
vstmia.f32 Y!, { s9 }
fmacs s10, s0, s6
fstmias Y!, { s10 }
vstmia.f32 Y!, { s10 }
fmacs s11, s0, s7
fstmias Y!, { s11 }
vstmia.f32 Y!, { s11 }
.endm
@@ -203,19 +203,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 }
fldmias Y , { s8 }
vldmia.f32 X!, { s4 }
vldmia.f32 Y , { s8 }
fmacs s8 , s0, s4
fstmias Y!, { s8 }
vstmia.f32 Y!, { s8 }
.endm
.macro KERNEL_S1
fldmias X , { s4 }
fldmias Y , { s8 }
vldmia.f32 X , { s4 }
vldmia.f32 Y , { s8 }
fmacs s8 , s0, s4
fstmias Y , { s8 }
vstmia.f32 Y , { s8 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -231,42 +231,42 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmiad X!, { d4 - d7 }
vldmia.f64 X!, { d4 - d7 }
pld [ Y, #X_PRE ]
fldmiad Y , { d8 - d11 }
vldmia.f64 Y , { d8 - d11 }
FMAC_R1 d8 , d0, d4
FMAC_R2 d8 , d1, d5
FMAC_I1 d9 , d0, d5
FMAC_I2 d9 , d1, d4
fstmiad Y!, { d8 }
fstmiad Y!, { d9 }
vstmia.f64 Y!, { d8 }
vstmia.f64 Y!, { d9 }
FMAC_R1 d10, d0, d6
FMAC_R2 d10, d1, d7
FMAC_I1 d11, d0, d7
FMAC_I2 d11, d1, d6
fstmiad Y!, { d10 }
fstmiad Y!, { d11 }
vstmia.f64 Y!, { d10 }
vstmia.f64 Y!, { d11 }
pld [ X, #X_PRE ]
fldmiad X!, { d4 - d7 }
vldmia.f64 X!, { d4 - d7 }
pld [ Y, #X_PRE ]
fldmiad Y , { d8 - d11 }
vldmia.f64 Y , { d8 - d11 }
FMAC_R1 d8 , d0, d4
FMAC_R2 d8 , d1, d5
FMAC_I1 d9 , d0, d5
FMAC_I2 d9 , d1, d4
fstmiad Y!, { d8 }
fstmiad Y!, { d9 }
vstmia.f64 Y!, { d8 }
vstmia.f64 Y!, { d9 }
FMAC_R1 d10, d0, d6
FMAC_R2 d10, d1, d7
FMAC_I1 d11, d0, d7
FMAC_I2 d11, d1, d6
fstmiad Y!, { d10 }
fstmiad Y!, { d11 }
vstmia.f64 Y!, { d10 }
vstmia.f64 Y!, { d11 }
@@ -277,15 +277,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 - d5 }
fldmiad Y , { d8 - d9 }
vldmia.f64 X!, { d4 - d5 }
vldmia.f64 Y , { d8 - d9 }
FMAC_R1 d8 , d0, d4
FMAC_R2 d8 , d1, d5
FMAC_I1 d9 , d0, d5
FMAC_I2 d9 , d1, d4
fstmiad Y!, { d8 }
fstmiad Y!, { d9 }
vstmia.f64 Y!, { d8 }
vstmia.f64 Y!, { d9 }
@@ -293,14 +293,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmiad X , { d4 - d5 }
fldmiad Y , { d8 - d9 }
vldmia.f64 X , { d4 - d5 }
vldmia.f64 Y , { d8 - d9 }
FMAC_R1 d8 , d0, d4
FMAC_R2 d8 , d1, d5
FMAC_I1 d9 , d0, d5
FMAC_I2 d9 , d1, d4
fstmiad Y , { d8 - d9 }
vstmia.f64 Y , { d8 - d9 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -314,40 +314,40 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmias X!, { s4 - s7 }
vldmia.f32 X!, { s4 - s7 }
pld [ Y, #X_PRE ]
fldmias Y , { s8 - s11 }
vldmia.f32 Y , { s8 - s11 }
FMAC_R1 s8 , s0, s4
FMAC_R2 s8 , s1, s5
FMAC_I1 s9 , s0, s5
FMAC_I2 s9 , s1, s4
fstmias Y!, { s8 }
fstmias Y!, { s9 }
vstmia.f32 Y!, { s8 }
vstmia.f32 Y!, { s9 }
FMAC_R1 s10, s0, s6
FMAC_R2 s10, s1, s7
FMAC_I1 s11, s0, s7
FMAC_I2 s11, s1, s6
fstmias Y!, { s10 }
fstmias Y!, { s11 }
vstmia.f32 Y!, { s10 }
vstmia.f32 Y!, { s11 }
fldmias X!, { s4 - s7 }
fldmias Y , { s8 - s11 }
vldmia.f32 X!, { s4 - s7 }
vldmia.f32 Y , { s8 - s11 }
FMAC_R1 s8 , s0, s4
FMAC_R2 s8 , s1, s5
FMAC_I1 s9 , s0, s5
FMAC_I2 s9 , s1, s4
fstmias Y!, { s8 }
fstmias Y!, { s9 }
vstmia.f32 Y!, { s8 }
vstmia.f32 Y!, { s9 }
FMAC_R1 s10, s0, s6
FMAC_R2 s10, s1, s7
FMAC_I1 s11, s0, s7
FMAC_I2 s11, s1, s6
fstmias Y!, { s10 }
fstmias Y!, { s11 }
vstmia.f32 Y!, { s10 }
vstmia.f32 Y!, { s11 }
@@ -358,15 +358,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 - s5 }
fldmias Y , { s8 - s9 }
vldmia.f32 X!, { s4 - s5 }
vldmia.f32 Y , { s8 - s9 }
FMAC_R1 s8 , s0, s4
FMAC_R2 s8 , s1, s5
FMAC_I1 s9 , s0, s5
FMAC_I2 s9 , s1, s4
fstmias Y!, { s8 }
fstmias Y!, { s9 }
vstmia.f32 Y!, { s8 }
vstmia.f32 Y!, { s9 }
@@ -374,14 +374,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X , { s4 - s5 }
fldmias Y , { s8 - s9 }
vldmia.f32 X , { s4 - s5 }
vldmia.f32 Y , { s8 - s9 }
FMAC_R1 s8 , s0, s4
FMAC_R2 s8 , s1, s5
FMAC_I1 s9 , s0, s5
FMAC_I2 s9 , s1, s4
fstmias Y , { s8 - s9 }
vstmia.f32 Y , { s8 - s9 }
add X, X, INC_X
add Y, Y, INC_Y

View File

@@ -65,15 +65,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_F4
pld [ X, #X_PRE ]
fldmias X!, { s0 - s7 }
fstmias Y!, { s0 - s7 }
vldmia.f32 X!, { s0 - s7 }
vstmia.f32 Y!, { s0 - s7 }
.endm
.macro COPY_F1
fldmias X!, { s0 - s1 }
fstmias Y!, { s0 - s1 }
vldmia.f32 X!, { s0 - s1 }
vstmia.f32 Y!, { s0 - s1 }
.endm
@@ -83,23 +83,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S4
nop
fldmias X, { s0 - s1 }
fstmias Y, { s0 - s1 }
vldmia.f32 X, { s0 - s1 }
vstmia.f32 Y, { s0 - s1 }
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s2 - s3 }
fstmias Y, { s2 - s3 }
vldmia.f32 X, { s2 - s3 }
vstmia.f32 Y, { s2 - s3 }
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s0 - s1 }
fstmias Y, { s0 - s1 }
vldmia.f32 X, { s0 - s1 }
vstmia.f32 Y, { s0 - s1 }
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s2 - s3 }
fstmias Y, { s2 - s3 }
vldmia.f32 X, { s2 - s3 }
vstmia.f32 Y, { s2 - s3 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -108,8 +108,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S1
fldmias X, { s0 - s1 }
fstmias Y, { s0 - s1 }
vldmia.f32 X, { s0 - s1 }
vstmia.f32 Y, { s0 - s1 }
add X, X, INC_X
add Y, Y, INC_Y

View File

@@ -76,30 +76,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmias X!, { s4 - s5 }
fldmias Y!, { s8 - s9 }
vldmia.f32 X!, { s4 - s5 }
vldmia.f32 Y!, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fldmias X!, { s6 - s7 }
vldmia.f32 X!, { s6 - s7 }
fmacs s2 , s5, s9
fmacs s3 , s5, s8
fldmias Y!, { s10 - s11 }
vldmia.f32 Y!, { s10 - s11 }
fmacs s0 , s6, s10
fmacs s1 , s6, s11
fmacs s2 , s7, s11
fmacs s3 , s7, s10
fldmias X!, { s4 - s5 }
fldmias Y!, { s8 - s9 }
vldmia.f32 X!, { s4 - s5 }
vldmia.f32 Y!, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fldmias X!, { s6 - s7 }
vldmia.f32 X!, { s6 - s7 }
fmacs s2 , s5, s9
fmacs s3 , s5, s8
fldmias Y!, { s10 - s11 }
vldmia.f32 Y!, { s10 - s11 }
fmacs s0 , s6, s10
fmacs s1 , s6, s11
fmacs s2 , s7, s11
@@ -109,8 +109,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 - s5 }
fldmias Y!, { s8 - s9 }
vldmia.f32 X!, { s4 - s5 }
vldmia.f32 Y!, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fmacs s2 , s5, s9
@@ -125,8 +125,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
nop
fldmias X, { s4 - s5 }
fldmias Y, { s8 - s9 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fmacs s2 , s5, s9
@@ -134,8 +134,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s4 - s5 }
fldmias Y, { s8 - s9 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fmacs s2 , s5, s9
@@ -143,8 +143,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s4 - s5 }
fldmias Y, { s8 - s9 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fmacs s2 , s5, s9
@@ -152,8 +152,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s4 - s5 }
fldmias Y, { s8 - s9 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fmacs s2 , s5, s9
@@ -166,8 +166,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X, { s4 - s5 }
fldmias Y, { s8 - s9 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s8 - s9 }
fmacs s0 , s4, s8
fmacs s1 , s4, s9
fmacs s2 , s5, s9

View File

@@ -165,9 +165,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_I
pld [ AO, #A_PRE ]
fldmias AO!, { s0 - s3 }
vldmia.f32 AO!, { s0 - s3 }
pld [ BO, #B_PRE ]
fldmias BO!, { s4 - s7 }
vldmia.f32 BO!, { s4 - s7 }
fmuls s8 , s0, s4
@@ -197,9 +197,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_M1
pld [ AO, #A_PRE ]
fldmias AO!, { s0 - s3 }
vldmia.f32 AO!, { s0 - s3 }
pld [ BO, #B_PRE ]
fldmias BO!, { s4 - s7 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -225,8 +225,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_M2
fldmias AO!, { s0 - s3 }
fldmias BO!, { s4 - s7 }
vldmia.f32 AO!, { s0 - s3 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -254,8 +254,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_E
fldmias AO!, { s0 - s3 }
fldmias BO!, { s4 - s7 }
vldmia.f32 AO!, { s0 - s3 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -282,8 +282,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_SUB
fldmias AO!, { s0 - s3 }
fldmias BO!, { s4 - s7 }
vldmia.f32 AO!, { s0 - s3 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -317,7 +317,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s7 }
vldmia.f32 CO1, { s4 - s7 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
@@ -329,9 +329,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s11
FMAC_I2 s7 , s1 , s10
fstmias CO1, { s4 - s7 }
vstmia.f32 CO1, { s4 - s7 }
fldmias CO2, { s4 - s7 }
vldmia.f32 CO2, { s4 - s7 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
@@ -343,7 +343,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s15
FMAC_I2 s7 , s1 , s14
fstmias CO2, { s4 - s7 }
vstmia.f32 CO2, { s4 - s7 }
add CO1, CO1, #16
@@ -500,23 +500,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s5 }
vldmia.f32 CO1, { s4 - s5 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
FMAC_R2 s4 , s1 , s9
FMAC_I2 s5 , s1 , s8
fstmias CO1, { s4 - s5 }
vstmia.f32 CO1, { s4 - s5 }
fldmias CO2, { s4 - s5 }
vldmia.f32 CO2, { s4 - s5 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
FMAC_R2 s4 , s1 , s13
FMAC_I2 s5 , s1 , s12
fstmias CO2, { s4 - s5 }
vstmia.f32 CO2, { s4 - s5 }
add CO1, CO1, #8
@@ -671,7 +671,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s7 }
vldmia.f32 CO1, { s4 - s7 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
@@ -683,7 +683,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s11
FMAC_I2 s7 , s1 , s10
fstmias CO1, { s4 - s7 }
vstmia.f32 CO1, { s4 - s7 }
add CO1, CO1, #16
@@ -800,14 +800,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s5 }
vldmia.f32 CO1, { s4 - s5 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
FMAC_R2 s4 , s1 , s9
FMAC_I2 s5 , s1 , s8
fstmias CO1, { s4 - s5 }
vstmia.f32 CO1, { s4 - s5 }
add CO1, CO1, #8

View File

@@ -182,30 +182,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_I
pld [ AO , #A_PRE ]
pld [ BO , #B_PRE ]
fldmias AO!, { s0 - s1 }
fldmias BO!, { s8 - s9 }
vldmia.f32 AO!, { s0 - s1 }
vldmia.f32 BO!, { s8 - s9 }
fmuls s16 , s0, s8
fmuls s24 , s1, s9
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmuls s17 , s0, s9
fmuls s25 , s1, s8
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmuls s18 , s2, s8
fmuls s26 , s3, s9
fldmias AO!, { s4 - s5 }
vldmia.f32 AO!, { s4 - s5 }
fmuls s19 , s2, s9
fmuls s27 , s3, s8
fldmias BO!, { s12 - s13 }
vldmia.f32 BO!, { s12 - s13 }
fmuls s20 , s0, s10
fmuls s28 , s1, s11
fldmias AO!, { s6 - s7 }
vldmia.f32 AO!, { s6 - s7 }
fmuls s21 , s0, s11
fmuls s29 , s1, s10
fldmias BO!, { s14 - s15 }
vldmia.f32 BO!, { s14 - s15 }
fmuls s22 , s2, s10
fmuls s30 , s3, s11
fmuls s23 , s2, s11
@@ -218,17 +218,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_M1
fmacs s16 , s0, s8
fldmias AO!, { s4 - s5 }
vldmia.f32 AO!, { s4 - s5 }
fmacs s24 , s1, s9
fmacs s17 , s0, s9
fldmias BO!, { s12 - s13 }
vldmia.f32 BO!, { s12 - s13 }
fmacs s25 , s1, s8
fmacs s18 , s2, s8
fldmias AO!, { s6 - s7 }
vldmia.f32 AO!, { s6 - s7 }
fmacs s26 , s3, s9
fmacs s19 , s2, s9
fldmias BO!, { s14 - s15 }
vldmia.f32 BO!, { s14 - s15 }
fmacs s27 , s3, s8
fmacs s20 , s0, s10
@@ -250,19 +250,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ BO , #B_PRE ]
fmacs s24 , s5, s13
fmacs s17 , s4, s13
fldmias AO!, { s0 - s1 }
vldmia.f32 AO!, { s0 - s1 }
fmacs s25 , s5, s12
fmacs s18 , s6, s12
fmacs s26 , s7, s13
fldmias BO!, { s8 - s9 }
vldmia.f32 BO!, { s8 - s9 }
fmacs s19 , s6, s13
fmacs s27 , s7, s12
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmacs s20 , s4, s14
fmacs s28 , s5, s15
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmacs s21 , s4, s15
fmacs s29 , s5, s14
@@ -300,16 +300,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_SUB
fldmias AO!, { s0 - s1 }
fldmias BO!, { s8 - s9 }
vldmia.f32 AO!, { s0 - s1 }
vldmia.f32 BO!, { s8 - s9 }
fmacs s16 , s0, s8
fmacs s24 , s1, s9
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmacs s17 , s0, s9
fmacs s25 , s1, s8
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmacs s18 , s2, s8
fmacs s26 , s3, s9
fmacs s19 , s2, s9
@@ -338,8 +338,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s7 }
fldmias CO2, { s8 - s11 }
vldmia.f32 CO1, { s4 - s7 }
vldmia.f32 CO2, { s8 - s11 }
FADD_R s16, s24 , s16
FADD_I s17, s25 , s17
@@ -370,8 +370,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s10, s1 , s23
FMAC_I2 s11, s1 , s22
fstmias CO1, { s4 - s7 }
fstmias CO2, { s8 - s11 }
vstmia.f32 CO1, { s4 - s7 }
vstmia.f32 CO2, { s8 - s11 }
add CO1, CO1, #16
@@ -534,8 +534,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s5 }
fldmias CO2, { s8 - s9 }
vldmia.f32 CO1, { s4 - s5 }
vldmia.f32 CO2, { s8 - s9 }
FADD_R s16, s24 , s16
FADD_I s17, s25 , s17
@@ -552,8 +552,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s8 , s1 , s21
FMAC_I2 s9 , s1 , s20
fstmias CO1, { s4 - s5 }
fstmias CO2, { s8 - s9 }
vstmia.f32 CO1, { s4 - s5 }
vstmia.f32 CO2, { s8 - s9 }
add CO1, CO1, #8
@@ -716,7 +716,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s7 }
vldmia.f32 CO1, { s4 - s7 }
FADD_R s16, s24 , s16
FADD_I s17, s25 , s17
@@ -733,7 +733,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s19
FMAC_I2 s7 , s1 , s18
fstmias CO1, { s4 - s7 }
vstmia.f32 CO1, { s4 - s7 }
add CO1, CO1, #16
@@ -851,7 +851,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias CO1, { s4 - s5 }
vldmia.f32 CO1, { s4 - s5 }
FADD_R s16, s24 , s16
FADD_I s17, s25 , s17
@@ -861,7 +861,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s4 , s1 , s17
FMAC_I2 s5 , s1 , s16
fstmias CO1, { s4 - s5 }
vstmia.f32 CO1, { s4 - s5 }
add CO1, CO1, #8

View File

@@ -85,7 +85,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s6 , [ AO2, #8 ]
flds s7 , [ AO2, #12 ]
fstmias BO!, { s0 - s7 }
vstmia.f32 BO!, { s0 - s7 }
add AO2, AO2, #16
.endm
@@ -99,7 +99,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s3 , [ AO2, #4 ]
add AO1, AO1, #8
fstmias BO!, { s0 - s3 }
vstmia.f32 BO!, { s0 - s3 }
add AO2, AO2, #8
.endm
@@ -111,7 +111,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s2 , [ AO1, #8 ]
flds s3 , [ AO1, #12 ]
fstmias BO!, { s0 - s3 }
vstmia.f32 BO!, { s0 - s3 }
add AO1, AO1, #16
.endm
@@ -122,7 +122,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0 , [ AO1, #0 ]
flds s1 , [ AO1, #4 ]
fstmias BO!, { s0 - s1 }
vstmia.f32 BO!, { s0 - s1 }
add AO1, AO1, #8
.endm

View File

@@ -73,12 +73,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**************************************************************************************/
.macro COPY2x2
fldmias AO1, { s0 - s3 }
vldmia.f32 AO1, { s0 - s3 }
add r3, AO1, LDA
fldmias r3, { s4 - s7 }
vldmia.f32 r3, { s4 - s7 }
fstmias BO1, { s0 - s7 }
vstmia.f32 BO1, { s0 - s7 }
add AO1, AO1, #16
add BO1, BO1, M4
@@ -86,12 +86,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x2
fldmias AO1, { s0 -s1 }
vldmia.f32 AO1, { s0 -s1 }
add r3, AO1, LDA
fldmias r3, { s2 - s3 }
vldmia.f32 r3, { s2 - s3 }
fstmias BO2, { s0 - s3 }
vstmia.f32 BO2, { s0 - s3 }
add AO1, AO1, #8
add BO2, BO2, #16
@@ -100,9 +100,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*************************************************************************************************************************/
.macro COPY2x1
fldmias AO1, { s0 - s3 }
vldmia.f32 AO1, { s0 - s3 }
fstmias BO1, { s0 - s3 }
vstmia.f32 BO1, { s0 - s3 }
add AO1, AO1, #16
add BO1, BO1, M4
@@ -110,9 +110,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x1
fldmias AO1, { s0 - s1 }
vldmia.f32 AO1, { s0 - s1 }
fstmias BO2, { s0 - s1 }
vstmia.f32 BO2, { s0 - s1 }
add AO1, AO1, #8
add BO2, BO2, #8

View File

@@ -201,7 +201,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias YO, { s4 - s7 }
vldmia.f32 YO, { s4 - s7 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
@@ -213,9 +213,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s11
FMAC_I2 s7 , s1 , s10
fstmias YO!, { s4 - s7 }
vstmia.f32 YO!, { s4 - s7 }
fldmias YO, { s4 - s7 }
vldmia.f32 YO, { s4 - s7 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
@@ -227,7 +227,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s15
FMAC_I2 s7 , s1 , s14
fstmias YO!, { s4 - s7 }
vstmia.f32 YO!, { s4 - s7 }
.endm
@@ -266,14 +266,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
FMAC_R2 s4 , s1 , s9
FMAC_I2 s5 , s1 , s8
fstmias YO, { s4 - s5 }
vstmia.f32 YO, { s4 - s5 }
add YO, YO, #8
@@ -349,47 +349,47 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
FMAC_R2 s4 , s1 , s9
FMAC_I2 s5 , s1 , s8
fstmias YO, { s4 - s5 }
vstmia.f32 YO, { s4 - s5 }
add YO, YO, INC_Y
fldmias YO, { s6 - s7 }
vldmia.f32 YO, { s6 - s7 }
FMAC_R1 s6 , s0 , s10
FMAC_I1 s7 , s0 , s11
FMAC_R2 s6 , s1 , s11
FMAC_I2 s7 , s1 , s10
fstmias YO, { s6 - s7 }
vstmia.f32 YO, { s6 - s7 }
add YO, YO, INC_Y
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
FMAC_R2 s4 , s1 , s13
FMAC_I2 s5 , s1 , s12
fstmias YO, { s4 - s5 }
vstmia.f32 YO, { s4 - s5 }
add YO, YO, INC_Y
fldmias YO, { s6 - s7 }
vldmia.f32 YO, { s6 - s7 }
FMAC_R1 s6 , s0 , s14
FMAC_I1 s7 , s0 , s15
FMAC_R2 s6 , s1 , s15
FMAC_I2 s7 , s1 , s14
fstmias YO, { s6 - s7 }
vstmia.f32 YO, { s6 - s7 }
add YO, YO, INC_Y
@@ -430,14 +430,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA_R
flds s1, ALPHA_I
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
FMAC_R1 s4 , s0 , s8
FMAC_I1 s5 , s0 , s9
FMAC_R2 s4 , s1 , s9
FMAC_I2 s5 , s1 , s8
fstmias YO, { s4 - s5 }
vstmia.f32 YO, { s4 - s5 }
add YO, YO, INC_Y

View File

@@ -150,9 +150,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X1
fldmias XO! , { s2 - s3 }
fldmias AO1!, { s4 - s5 }
fldmias AO2!, { s8 - s9 }
vldmia.f32 XO! , { s2 - s3 }
vldmia.f32 AO1!, { s4 - s5 }
vldmia.f32 AO2!, { s8 - s9 }
fmacs s12 , s4 , s2
fmacs s13 , s4 , s3
@@ -168,7 +168,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F2
fldmias YO, { s4 - s7 }
vldmia.f32 YO, { s4 - s7 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
@@ -180,7 +180,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s15
FMAC_I2 s7 , s1 , s14
fstmias YO!, { s4 - s7 }
vstmia.f32 YO!, { s4 - s7 }
.endm
@@ -204,8 +204,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmias XO! , { s2 - s3 }
fldmias AO1!, { s4 - s5 }
vldmia.f32 XO! , { s2 - s3 }
vldmia.f32 AO1!, { s4 - s5 }
fmacs s12 , s4 , s2
fmacs s13 , s4 , s3
@@ -216,14 +216,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F1
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
FMAC_R2 s4 , s1 , s13
FMAC_I2 s5 , s1 , s12
fstmias YO!, { s4 - s5 }
vstmia.f32 YO!, { s4 - s5 }
.endm
@@ -249,9 +249,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X1
fldmias XO , { s2 - s3 }
fldmias AO1!, { s4 - s5 }
fldmias AO2!, { s8 - s9 }
vldmia.f32 XO , { s2 - s3 }
vldmia.f32 AO1!, { s4 - s5 }
vldmia.f32 AO2!, { s8 - s9 }
fmacs s12 , s4 , s2
fmacs s13 , s4 , s3
@@ -269,25 +269,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S2
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
FMAC_R2 s4 , s1 , s13
FMAC_I2 s5 , s1 , s12
fstmias YO, { s4 - s5 }
vstmia.f32 YO, { s4 - s5 }
add YO, YO, INC_Y
fldmias YO, { s6 - s7 }
vldmia.f32 YO, { s6 - s7 }
FMAC_R1 s6 , s0 , s14
FMAC_I1 s7 , s0 , s15
FMAC_R2 s6 , s1 , s15
FMAC_I2 s7 , s1 , s14
fstmias YO, { s6 - s7 }
vstmia.f32 YO, { s6 - s7 }
add YO, YO, INC_Y
@@ -313,8 +313,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmias XO , { s2 - s3 }
fldmias AO1!, { s4 - s5 }
vldmia.f32 XO , { s2 - s3 }
vldmia.f32 AO1!, { s4 - s5 }
fmacs s12 , s4 , s2
fmacs s13 , s4 , s3
@@ -327,14 +327,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
FMAC_R1 s4 , s0 , s12
FMAC_I1 s5 , s0 , s13
FMAC_R2 s4 , s1 , s13
FMAC_I2 s5 , s1 , s12
fstmias YO, { s4 - s5 }
vstmia.f32 YO, { s4 - s5 }
add YO, YO, INC_Y

View File

@@ -165,9 +165,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_I
pld [ AO, #A_PRE ]
fldmias AO!, { s0 - s3 }
vldmia.f32 AO!, { s0 - s3 }
pld [ BO, #B_PRE ]
fldmias BO!, { s4 - s7 }
vldmia.f32 BO!, { s4 - s7 }
fmuls s8 , s0, s4
@@ -197,9 +197,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_M1
pld [ AO, #A_PRE ]
fldmias AO!, { s0 - s3 }
vldmia.f32 AO!, { s0 - s3 }
pld [ BO, #B_PRE ]
fldmias BO!, { s4 - s7 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -225,8 +225,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_M2
fldmias AO!, { s0 - s3 }
fldmias BO!, { s4 - s7 }
vldmia.f32 AO!, { s0 - s3 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -254,8 +254,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_E
fldmias AO!, { s0 - s3 }
fldmias BO!, { s4 - s7 }
vldmia.f32 AO!, { s0 - s3 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -282,8 +282,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_SUB
fldmias AO!, { s0 - s3 }
fldmias BO!, { s4 - s7 }
vldmia.f32 AO!, { s0 - s3 }
vldmia.f32 BO!, { s4 - s7 }
fmacs s8 , s0, s4
fmacs s9 , s0, s5
@@ -331,7 +331,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s11
FMAC_I2 s7 , s1 , s10
fstmias CO1, { s4 - s7 }
vstmia.f32 CO1, { s4 - s7 }
flds s4, FP_ZERO
vmov.f32 s5, s4
@@ -348,7 +348,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s15
FMAC_I2 s7 , s1 , s14
fstmias CO2, { s4 - s7 }
vstmia.f32 CO2, { s4 - s7 }
add CO1, CO1, #16
@@ -513,7 +513,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s4 , s1 , s9
FMAC_I2 s5 , s1 , s8
fstmias CO1, { s4 - s5 }
vstmia.f32 CO1, { s4 - s5 }
flds s4, FP_ZERO
vmov.f32 s5, s4
@@ -523,7 +523,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s4 , s1 , s13
FMAC_I2 s5 , s1 , s12
fstmias CO2, { s4 - s5 }
vstmia.f32 CO2, { s4 - s5 }
add CO1, CO1, #8
@@ -693,7 +693,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s11
FMAC_I2 s7 , s1 , s10
fstmias CO1, { s4 - s7 }
vstmia.f32 CO1, { s4 - s7 }
add CO1, CO1, #16
@@ -818,7 +818,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s4 , s1 , s9
FMAC_I2 s5 , s1 , s8
fstmias CO1, { s4 - s5 }
vstmia.f32 CO1, { s4 - s5 }
add CO1, CO1, #8

View File

@@ -170,30 +170,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_I
pld [ AO , #A_PRE ]
pld [ BO , #B_PRE ]
fldmias AO!, { s0 - s1 }
fldmias BO!, { s8 - s9 }
vldmia.f32 AO!, { s0 - s1 }
vldmia.f32 BO!, { s8 - s9 }
fmuls s16 , s0, s8
fmuls s24 , s1, s9
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmuls s17 , s0, s9
fmuls s25 , s1, s8
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmuls s18 , s2, s8
fmuls s26 , s3, s9
fldmias AO!, { s4 - s5 }
vldmia.f32 AO!, { s4 - s5 }
fmuls s19 , s2, s9
fmuls s27 , s3, s8
fldmias BO!, { s12 - s13 }
vldmia.f32 BO!, { s12 - s13 }
fmuls s20 , s0, s10
fmuls s28 , s1, s11
fldmias AO!, { s6 - s7 }
vldmia.f32 AO!, { s6 - s7 }
fmuls s21 , s0, s11
fmuls s29 , s1, s10
fldmias BO!, { s14 - s15 }
vldmia.f32 BO!, { s14 - s15 }
fmuls s22 , s2, s10
fmuls s30 , s3, s11
fmuls s23 , s2, s11
@@ -206,17 +206,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_M1
fmacs s16 , s0, s8
fldmias AO!, { s4 - s5 }
vldmia.f32 AO!, { s4 - s5 }
fmacs s24 , s1, s9
fmacs s17 , s0, s9
fldmias BO!, { s12 - s13 }
vldmia.f32 BO!, { s12 - s13 }
fmacs s25 , s1, s8
fmacs s18 , s2, s8
fldmias AO!, { s6 - s7 }
vldmia.f32 AO!, { s6 - s7 }
fmacs s26 , s3, s9
fmacs s19 , s2, s9
fldmias BO!, { s14 - s15 }
vldmia.f32 BO!, { s14 - s15 }
fmacs s27 , s3, s8
fmacs s20 , s0, s10
@@ -238,19 +238,19 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ BO , #B_PRE ]
fmacs s24 , s5, s13
fmacs s17 , s4, s13
fldmias AO!, { s0 - s1 }
vldmia.f32 AO!, { s0 - s1 }
fmacs s25 , s5, s12
fmacs s18 , s6, s12
fmacs s26 , s7, s13
fldmias BO!, { s8 - s9 }
vldmia.f32 BO!, { s8 - s9 }
fmacs s19 , s6, s13
fmacs s27 , s7, s12
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmacs s20 , s4, s14
fmacs s28 , s5, s15
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmacs s21 , s4, s15
fmacs s29 , s5, s14
@@ -288,16 +288,16 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL2x2_SUB
fldmias AO!, { s0 - s1 }
fldmias BO!, { s8 - s9 }
vldmia.f32 AO!, { s0 - s1 }
vldmia.f32 BO!, { s8 - s9 }
fmacs s16 , s0, s8
fmacs s24 , s1, s9
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmacs s17 , s0, s9
fmacs s25 , s1, s8
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmacs s18 , s2, s8
fmacs s26 , s3, s9
fmacs s19 , s2, s9
@@ -354,8 +354,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s10, s1 , s23
FMAC_I2 s11, s1 , s22
fstmias CO1, { s4 - s7 }
fstmias CO2, { s8 - s11 }
vstmia.f32 CO1, { s4 - s7 }
vstmia.f32 CO2, { s8 - s11 }
add CO1, CO1, #16
@@ -532,8 +532,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s8 , s1 , s21
FMAC_I2 s9 , s1 , s20
fstmias CO1, { s4 - s5 }
fstmias CO2, { s8 - s9 }
vstmia.f32 CO1, { s4 - s5 }
vstmia.f32 CO2, { s8 - s9 }
add CO1, CO1, #8
@@ -710,7 +710,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s6 , s1 , s19
FMAC_I2 s7 , s1 , s18
fstmias CO1, { s4 - s7 }
vstmia.f32 CO1, { s4 - s7 }
add CO1, CO1, #16
@@ -835,7 +835,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 s4 , s1 , s17
FMAC_I2 s5 , s1 , s16
fstmias CO1, { s4 - s5 }
vstmia.f32 CO1, { s4 - s5 }
add CO1, CO1, #8

View File

@@ -65,15 +65,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_F4
pld [ X, #X_PRE ]
fldmiad X!, { d0 - d3 }
fstmiad Y!, { d0 - d3 }
vldmia.f64 X!, { d0 - d3 }
vstmia.f64 Y!, { d0 - d3 }
.endm
.macro COPY_F1
fldmiad X!, { d0 }
fstmiad Y!, { d0 }
vldmia.f64 X!, { d0 }
vstmia.f64 Y!, { d0 }
.endm
@@ -83,23 +83,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S4
nop
fldmiad X, { d0 }
fstmiad Y, { d0 }
vldmia.f64 X, { d0 }
vstmia.f64 Y, { d0 }
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d1 }
fstmiad Y, { d1 }
vldmia.f64 X, { d1 }
vstmia.f64 Y, { d1 }
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d0 }
fstmiad Y, { d0 }
vldmia.f64 X, { d0 }
vstmia.f64 Y, { d0 }
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d1 }
fstmiad Y, { d1 }
vldmia.f64 X, { d1 }
vstmia.f64 Y, { d1 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -108,8 +108,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S1
fldmiad X, { d0 }
fstmiad Y, { d0 }
vldmia.f64 X, { d0 }
vstmia.f64 Y, { d0 }
add X, X, INC_X
add Y, Y, INC_Y

View File

@@ -67,26 +67,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmiad X!, { d8 }
vldmia.f64 X!, { d8 }
pld [ Y, #X_PRE ]
fldmiad Y!, { d4 }
fldmiad Y!, { d5 }
vldmia.f64 Y!, { d4 }
vldmia.f64 Y!, { d5 }
fmacd d0 , d4, d8
fldmiad X!, { d9 }
fldmiad Y!, { d6 }
vldmia.f64 X!, { d9 }
vldmia.f64 Y!, { d6 }
fmacd d1 , d5, d9
fldmiad X!, { d10 }
fldmiad X!, { d11 }
vldmia.f64 X!, { d10 }
vldmia.f64 X!, { d11 }
fmacd d0 , d6, d10
fldmiad Y!, { d7 }
vldmia.f64 Y!, { d7 }
fmacd d1 , d7, d11
.endm
.macro KERNEL_F1
fldmiad X!, { d4 }
fldmiad Y!, { d8 }
vldmia.f64 X!, { d4 }
vldmia.f64 Y!, { d8 }
fmacd d0 , d4, d8
.endm
@@ -97,26 +97,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4
nop
fldmiad X, { d4 }
fldmiad Y, { d8 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d8 }
add X, X, INC_X
add Y, Y, INC_Y
fmacd d0 , d4, d8
fldmiad X, { d5 }
fldmiad Y, { d9 }
vldmia.f64 X, { d5 }
vldmia.f64 Y, { d9 }
add X, X, INC_X
add Y, Y, INC_Y
fmacd d1 , d5, d9
fldmiad X, { d6 }
fldmiad Y, { d10 }
vldmia.f64 X, { d6 }
vldmia.f64 Y, { d10 }
add X, X, INC_X
add Y, Y, INC_Y
fmacd d0 , d6, d10
fldmiad X, { d7 }
fldmiad Y, { d11 }
vldmia.f64 X, { d7 }
vldmia.f64 Y, { d11 }
add X, X, INC_X
add Y, Y, INC_Y
fmacd d1 , d7, d11
@@ -126,8 +126,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmiad X, { d4 }
fldmiad Y, { d8 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d8 }
add X, X, INC_X
fmacd d0 , d4, d8
add Y, Y, INC_Y

View File

@@ -331,7 +331,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add r4 , CO2, r3
pld [ CO2 , #C_PRE ]
fldmiad CO1, { d8 - d11 }
vldmia.f64 CO1, { d8 - d11 }
pld [ r4 , #C_PRE ]
fmacd d8 , d0 , d16
@@ -352,7 +352,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fmacd d15, d0 , d23
fstd d11, [CO1, #24 ]
fldmiad r4, { d8 - d11 }
vldmia.f64 r4, { d8 - d11 }
fmacd d8 , d0 , d24
fstd d12, [CO2]
@@ -367,7 +367,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ CO2 , #C_PRE ]
fldmiad CO2, { d12 - d15 }
vldmia.f64 CO2, { d12 - d15 }
fstd d8 , [r4 ]
fmacd d12, d0 , d28
@@ -378,7 +378,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fstd d11, [r4 , #24 ]
fmacd d15, d0 , d31
fstmiad CO2, { d12 - d15 }
vstmia.f64 CO2, { d12 - d15 }
add CO1, CO1, #32

View File

@@ -73,7 +73,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d3 , [ AO2, #8 ]
add AO1, AO1, #16
fstmiad BO!, { d0 - d3 }
vstmia.f64 BO!, { d0 - d3 }
add AO2, AO2, #16
.endm
@@ -85,7 +85,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d1 , [ AO2, #0 ]
add AO1, AO1, #8
fstmiad BO!, { d0 - d1 }
vstmia.f64 BO!, { d0 - d1 }
add AO2, AO2, #8
.endm
@@ -95,7 +95,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0 , [ AO1, #0 ]
fldd d1 , [ AO1, #8 ]
fstmiad BO!, { d0 - d1 }
vstmia.f64 BO!, { d0 - d1 }
add AO1, AO1, #16
.endm
@@ -105,7 +105,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0 , [ AO1, #0 ]
fstmiad BO!, { d0 }
vstmia.f64 BO!, { d0 }
add AO1, AO1, #8
.endm

View File

@@ -105,10 +105,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d11, [ AO4, #16 ]
fldd d15, [ AO4, #24 ]
fstmiad BO!, { d0 - d3 }
vstmia.f64 BO!, { d0 - d3 }
add AO4, AO4, #32
fstmiad BO!, { d4 - d7 }
fstmiad BO!, { d8 - d15 }
vstmia.f64 BO!, { d4 - d7 }
vstmia.f64 BO!, { d8 - d15 }
.endm
@@ -122,7 +122,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d3 , [ AO4, #0 ]
add AO3, AO3, #8
fstmiad BO!, { d0 - d3 }
vstmia.f64 BO!, { d0 - d3 }
add AO4, AO4, #8
.endm
@@ -140,7 +140,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d5 , [ AO2, #16 ]
fldd d7 , [ AO2, #24 ]
fstmiad BO!, { d0 - d7 }
vstmia.f64 BO!, { d0 - d7 }
add AO2, AO2, #32
.endm
@@ -152,7 +152,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d1 , [ AO2, #0 ]
add AO1, AO1, #8
fstmiad BO!, { d0 - d1 }
vstmia.f64 BO!, { d0 - d1 }
add AO2, AO2, #8
.endm
@@ -164,7 +164,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d2 , [ AO1, #16 ]
fldd d3 , [ AO1, #24 ]
fstmiad BO!, { d0 - d3 }
vstmia.f64 BO!, { d0 - d3 }
add AO1, AO1, #32
.endm
@@ -174,7 +174,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0 , [ AO1, #0 ]
fstmiad BO!, { d0 }
vstmia.f64 BO!, { d0 }
add AO1, AO1, #8
.endm

View File

@@ -76,21 +76,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY4x4
pld [ AO1, #A_PRE ]
fldmiad AO1, { d0 - d3 }
vldmia.f64 AO1, { d0 - d3 }
add r3, AO1, LDA
pld [ r3, #A_PRE ]
fldmiad r3, { d4 - d7 }
vldmia.f64 r3, { d4 - d7 }
add r3, r3, LDA
pld [ r3, #A_PRE ]
fldmiad r3, { d8 - d11 }
vldmia.f64 r3, { d8 - d11 }
add r3, r3, LDA
pld [ r3, #A_PRE ]
fldmiad r3, { d12 - d15 }
vldmia.f64 r3, { d12 - d15 }
fstmiad BO1, { d0 - d15 }
vstmia.f64 BO1, { d0 - d15 }
add AO1, AO1, #32
add BO1, BO1, M4
@@ -98,18 +98,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY2x4
fldmiad AO1, { d0 - d1 }
vldmia.f64 AO1, { d0 - d1 }
add r3, AO1, LDA
fldmiad r3, { d2 - d3 }
vldmia.f64 r3, { d2 - d3 }
add r3, r3, LDA
fldmiad r3, { d4 - d5 }
vldmia.f64 r3, { d4 - d5 }
add r3, r3, LDA
fldmiad r3, { d6 - d7 }
vldmia.f64 r3, { d6 - d7 }
fstmiad BO2, { d0 - d7 }
vstmia.f64 BO2, { d0 - d7 }
add AO1, AO1, #16
add BO2, BO2, #64
@@ -117,18 +117,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x4
fldmiad AO1, { d0 }
vldmia.f64 AO1, { d0 }
add r3, AO1, LDA
fldmiad r3, { d1 }
vldmia.f64 r3, { d1 }
add r3, r3, LDA
fldmiad r3, { d2 }
vldmia.f64 r3, { d2 }
add r3, r3, LDA
fldmiad r3, { d3 }
vldmia.f64 r3, { d3 }
fstmiad BO3, { d0 - d3 }
vstmia.f64 BO3, { d0 - d3 }
add AO1, AO1, #8
add BO3, BO3, #32
@@ -139,13 +139,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY4x2
pld [ AO1, #A_PRE ]
fldmiad AO1, { d0 - d3 }
vldmia.f64 AO1, { d0 - d3 }
add r3, AO1, LDA
pld [ r3, #A_PRE ]
fldmiad r3, { d4 - d7 }
vldmia.f64 r3, { d4 - d7 }
fstmiad BO1, { d0 - d7 }
vstmia.f64 BO1, { d0 - d7 }
add AO1, AO1, #32
add BO1, BO1, M4
@@ -153,12 +153,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY2x2
fldmiad AO1, { d0 - d1 }
vldmia.f64 AO1, { d0 - d1 }
add r3, AO1, LDA
fldmiad r3, { d2 - d3 }
vldmia.f64 r3, { d2 - d3 }
fstmiad BO2, { d0 - d3 }
vstmia.f64 BO2, { d0 - d3 }
add AO1, AO1, #16
add BO2, BO2, #32
@@ -166,12 +166,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x2
fldmiad AO1, { d0 }
vldmia.f64 AO1, { d0 }
add r3, AO1, LDA
fldmiad r3, { d1 }
vldmia.f64 r3, { d1 }
fstmiad BO3, { d0 - d1 }
vstmia.f64 BO3, { d0 - d1 }
add AO1, AO1, #8
add BO3, BO3, #16
@@ -182,9 +182,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY4x1
pld [ AO1, #A_PRE ]
fldmiad AO1, { d0 - d3 }
vldmia.f64 AO1, { d0 - d3 }
fstmiad BO1, { d0 - d3 }
vstmia.f64 BO1, { d0 - d3 }
add AO1, AO1, #32
add BO1, BO1, M4
@@ -192,9 +192,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY2x1
fldmiad AO1, { d0 - d1 }
vldmia.f64 AO1, { d0 - d1 }
fstmiad BO2, { d0 - d1 }
vstmia.f64 BO2, { d0 - d1 }
add AO1, AO1, #16
add BO2, BO2, #16
@@ -202,9 +202,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x1
fldmiad AO1, { d0 }
vldmia.f64 AO1, { d0 }
fstmiad BO3, { d0 }
vstmia.f64 BO3, { d0 }
add AO1, AO1, #8
add BO3, BO3, #8

View File

@@ -128,10 +128,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d8 , [ BO ]
pld [ AO , #A_PRE ]
fldmiad AO!, { d0 - d1}
vldmia.f64 AO!, { d0 - d1}
fmuld d16 , d0, d8
fldmiad AO!, { d2 - d3}
vldmia.f64 AO!, { d2 - d3}
fmuld d17 , d1, d8
fldd d9 , [ BO, #8 ]
fmuld d18 , d2, d8
@@ -148,10 +148,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fmuld d23 , d3, d9
fmuld d24 , d0, d10
fldmiad AO!, { d4 - d5 }
vldmia.f64 AO!, { d4 - d5 }
fmuld d25 , d1, d10
fmuld d26 , d2, d10
fldmiad AO!, { d6 - d7 }
vldmia.f64 AO!, { d6 - d7 }
fmuld d27 , d3, d10
fldd d13, [ BO, #8 ]
@@ -173,10 +173,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d8 , [ BO ]
pld [ AO , #A_PRE ]
fldmiad AO!, { d0 - d1}
vldmia.f64 AO!, { d0 - d1}
fmacd d16 , d0, d8
fldmiad AO!, { d2 - d3}
vldmia.f64 AO!, { d2 - d3}
fmacd d17 , d1, d8
fldd d9 , [ BO, #8 ]
fmacd d18 , d2, d8
@@ -193,10 +193,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fmacd d23 , d3, d9
fmacd d24 , d0, d10
fldmiad AO!, { d4 - d5 }
vldmia.f64 AO!, { d4 - d5 }
fmacd d25 , d1, d10
fmacd d26 , d2, d10
fldmiad AO!, { d6 - d7 }
vldmia.f64 AO!, { d6 - d7 }
fmacd d27 , d3, d10
fldd d13, [ BO, #8 ]
@@ -225,11 +225,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d8 , [ BO ]
fmacd d21 , d5, d13
fmacd d22 , d6, d13
fldmiad AO!, { d0 - d1 }
vldmia.f64 AO!, { d0 - d1 }
fmacd d23 , d7, d13
fmacd d24 , d4, d14
fldmiad AO!, { d2 - d3 }
vldmia.f64 AO!, { d2 - d3 }
fmacd d25 , d5, d14
fldd d9 , [ BO, #8 ]
fmacd d26 , d6, d14
@@ -257,10 +257,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fmacd d19 , d3, d8
fmacd d20 , d0, d9
fldmiad AO!, { d4 - d5 }
vldmia.f64 AO!, { d4 - d5 }
fmacd d21 , d1, d9
fmacd d22 , d2, d9
fldmiad AO!, { d6 - d7 }
vldmia.f64 AO!, { d6 - d7 }
fmacd d23 , d3, d9
fmacd d24 , d0, d10
@@ -390,7 +390,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fstd d11, [r4 , #24 ]
fmuld d15, d0 , d31
fstmiad CO2, { d12 - d15 }
vstmia.f64 CO2, { d12 - d15 }
add CO1, CO1, #32

View File

@@ -139,8 +139,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F8X1
pld [ AO2 , #A_PRE ]
fldmiad XO! , { d2 }
fldmiad AO1 , { d4 - d7 }
vldmia.f64 XO! , { d2 }
vldmia.f64 AO1 , { d4 - d7 }
vmla.f64 d8 , d2 , d4
pld [ AO2 , #4*SIZE ]
@@ -150,7 +150,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
vmla.f64 d11 , d2 , d7
fldmiad r3 , { d4 - d7 }
vldmia.f64 r3 , { d4 - d7 }
vmla.f64 d12 , d2 , d4
vmla.f64 d13 , d2 , d5
@@ -164,23 +164,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F8
fldmiad YO, { d4 - d7 }
vldmia.f64 YO, { d4 - d7 }
vmla.f64 d4 , d0, d8
vmla.f64 d5 , d0, d9
vmla.f64 d6 , d0, d10
vmla.f64 d7 , d0, d11
fstmiad YO!, { d4 - d7 }
vstmia.f64 YO!, { d4 - d7 }
fldmiad YO, { d4 - d7 }
vldmia.f64 YO, { d4 - d7 }
vmla.f64 d4 , d0, d12
vmla.f64 d5 , d0, d13
vmla.f64 d6 , d0, d14
vmla.f64 d7 , d0, d15
fstmiad YO!, { d4 - d7 }
vstmia.f64 YO!, { d4 - d7 }
.endm
@@ -195,8 +195,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmiad XO! , { d2 }
fldmiad AO1 , { d8 }
vldmia.f64 XO! , { d2 }
vldmia.f64 AO1 , { d8 }
vmla.f64 d12 , d2 , d8
add AO1, AO1, LDA
@@ -204,9 +204,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F1
fldmiad YO, { d4 }
vldmia.f64 YO, { d4 }
vmla.f64 d4, d0, d12
fstmiad YO!, { d4 }
vstmia.f64 YO!, { d4 }
.endm
@@ -234,8 +234,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4X1
pld [ AO2 , #A_PRE ]
fldmiad XO , { d2 }
fldmiad AO1 , { d8 - d11 }
vldmia.f64 XO , { d2 }
vldmia.f64 AO1 , { d8 - d11 }
vmla.f64 d12 , d2 , d8
add AO1, AO1, LDA
@@ -249,24 +249,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S4
fldmiad YO, { d4 }
vldmia.f64 YO, { d4 }
vmla.f64 d4 , d0, d12
fstmiad YO, { d4 }
vstmia.f64 YO, { d4 }
add YO, YO, INC_Y
fldmiad YO, { d5 }
vldmia.f64 YO, { d5 }
vmla.f64 d5 , d0, d13
fstmiad YO, { d5 }
vstmia.f64 YO, { d5 }
add YO, YO, INC_Y
fldmiad YO, { d4 }
vldmia.f64 YO, { d4 }
vmla.f64 d4 , d0, d14
fstmiad YO, { d4 }
vstmia.f64 YO, { d4 }
add YO, YO, INC_Y
fldmiad YO, { d5 }
vldmia.f64 YO, { d5 }
vmla.f64 d5 , d0, d15
fstmiad YO, { d5 }
vstmia.f64 YO, { d5 }
add YO, YO, INC_Y
.endm
@@ -282,8 +282,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmiad XO , { d2 }
fldmiad AO1 , { d8 }
vldmia.f64 XO , { d2 }
vldmia.f64 AO1 , { d8 }
vmla.f64 d12 , d2 , d8
add AO1, AO1, LDA
add XO, XO , INC_X
@@ -292,9 +292,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmiad YO, { d4 }
vldmia.f64 YO, { d4 }
vmla.f64 d4, d0, d12
fstmiad YO , { d4 }
vstmia.f64 YO , { d4 }
add YO, YO, INC_Y
.endm
@@ -338,8 +338,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F8X1
pld [ AO2, #A_PRE ]
fldmias XO! , { s2 }
fldmias AO1 , { s4 - s7 }
vldmia.f32 XO! , { s2 }
vldmia.f32 AO1 , { s4 - s7 }
vmla.f32 s8 , s2 , s4
vmla.f32 s9 , s2 , s5
@@ -348,7 +348,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add r3, AO1, #4*SIZE
fldmias r3 , { s4 - s7 }
vldmia.f32 r3 , { s4 - s7 }
vmla.f32 s12 , s2 , s4
vmla.f32 s13 , s2 , s5
@@ -362,24 +362,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F8
fldmias YO, { s4 - s7 }
vldmia.f32 YO, { s4 - s7 }
vmla.f32 s4 , s0, s8
vmla.f32 s5 , s0, s9
vmla.f32 s6 , s0, s10
vmla.f32 s7 , s0, s11
fstmias YO!, { s4 - s7 }
vstmia.f32 YO!, { s4 - s7 }
fldmias YO, { s4 - s7 }
vldmia.f32 YO, { s4 - s7 }
vmla.f32 s4 , s0, s12
vmla.f32 s5 , s0, s13
vmla.f32 s6 , s0, s14
vmla.f32 s7 , s0, s15
fstmias YO!, { s4 - s7 }
vstmia.f32 YO!, { s4 - s7 }
.endm
@@ -394,8 +394,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmias XO! , { s2 }
fldmias AO1 , { s8 }
vldmia.f32 XO! , { s2 }
vldmia.f32 AO1 , { s8 }
vmla.f32 s12 , s2 , s8
add AO1, AO1, LDA
@@ -403,9 +403,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F1
fldmias YO, { s4 }
vldmia.f32 YO, { s4 }
vmla.f32 s4, s0, s12
fstmias YO!, { s4 }
vstmia.f32 YO!, { s4 }
.endm
@@ -434,8 +434,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4X1
fldmias XO , { s2 }
fldmias AO1 , { s8 - s11 }
vldmia.f32 XO , { s2 }
vldmia.f32 AO1 , { s8 - s11 }
vmla.f32 s12 , s2 , s8
vmla.f32 s13 , s2 , s9
@@ -449,24 +449,24 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S4
fldmias YO, { s4 }
vldmia.f32 YO, { s4 }
vmla.f32 s4 , s0, s12
fstmias YO, { s4 }
vstmia.f32 YO, { s4 }
add YO, YO, INC_Y
fldmias YO, { s5 }
vldmia.f32 YO, { s5 }
vmla.f32 s5 , s0, s13
fstmias YO, { s5 }
vstmia.f32 YO, { s5 }
add YO, YO, INC_Y
fldmias YO, { s4 }
vldmia.f32 YO, { s4 }
vmla.f32 s4 , s0, s14
fstmias YO, { s4 }
vstmia.f32 YO, { s4 }
add YO, YO, INC_Y
fldmias YO, { s5 }
vldmia.f32 YO, { s5 }
vmla.f32 s5 , s0, s15
fstmias YO, { s5 }
vstmia.f32 YO, { s5 }
add YO, YO, INC_Y
.endm
@@ -482,8 +482,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmias XO , { s2 }
fldmias AO1 , { s8 }
vldmia.f32 XO , { s2 }
vldmia.f32 AO1 , { s8 }
vmla.f32 s12 , s2 , s8
add AO1, AO1, LDA
add XO, XO , INC_X
@@ -492,9 +492,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmias YO, { s4 }
vldmia.f32 YO, { s4 }
vmla.f32 s4, s0, s12
fstmias YO , { s4 }
vstmia.f32 YO , { s4 }
add YO, YO, INC_Y
.endm

View File

@@ -138,8 +138,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F8X1
fldmiad XO! , { d4 }
fldmiad AO1 , { d8 - d15 }
vldmia.f64 XO! , { d4 }
vldmia.f64 AO1 , { d8 - d15 }
vmla.f64 d24 , d4 , d8
pld [ AO2 , #A_PRE ]
@@ -158,7 +158,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F8
fldmiad YO, { d16 - d23 }
vldmia.f64 YO, { d16 - d23 }
vmla.f64 d16, d0, d24
vmla.f64 d17, d0, d25
@@ -169,7 +169,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
vmla.f64 d22, d0, d30
vmla.f64 d23, d0, d31
fstmiad YO!, { d16 - d23 }
vstmia.f64 YO!, { d16 - d23 }
.endm
@@ -184,8 +184,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmiad XO! , { d4 }
fldmiad AO1 , { d8 }
vldmia.f64 XO! , { d4 }
vldmia.f64 AO1 , { d8 }
vmla.f64 d24 , d4 , d8
add AO1, AO1, LDA
@@ -193,9 +193,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F1
fldmiad YO, { d16 }
vldmia.f64 YO, { d16 }
vmla.f64 d16, d0, d24
fstmiad YO!, { d16 }
vstmia.f64 YO!, { d16 }
.endm
@@ -234,8 +234,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ AO2 , #A_PRE ]
pld [ AO2 , #A_PRE+32 ]
fldmiad XO , { d4 }
fldmiad AO1 , { d8 - d15 }
vldmia.f64 XO , { d4 }
vldmia.f64 AO1 , { d8 - d15 }
vmla.f64 d24 , d4 , d8
vmla.f64 d25 , d4 , d9
@@ -253,44 +253,44 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S8
fldmiad YO, { d16 }
vldmia.f64 YO, { d16 }
vmla.f64 d16, d0, d24
fstmiad YO, { d16 }
vstmia.f64 YO, { d16 }
add YO, YO, INC_Y
fldmiad YO, { d17 }
vldmia.f64 YO, { d17 }
vmla.f64 d17, d0, d25
fstmiad YO, { d17 }
vstmia.f64 YO, { d17 }
add YO, YO, INC_Y
fldmiad YO, { d18 }
vldmia.f64 YO, { d18 }
vmla.f64 d18, d0, d26
fstmiad YO, { d18 }
vstmia.f64 YO, { d18 }
add YO, YO, INC_Y
fldmiad YO, { d19 }
vldmia.f64 YO, { d19 }
vmla.f64 d19, d0, d27
fstmiad YO, { d19 }
vstmia.f64 YO, { d19 }
add YO, YO, INC_Y
fldmiad YO, { d20 }
vldmia.f64 YO, { d20 }
vmla.f64 d20, d0, d28
fstmiad YO, { d20 }
vstmia.f64 YO, { d20 }
add YO, YO, INC_Y
fldmiad YO, { d21 }
vldmia.f64 YO, { d21 }
vmla.f64 d21, d0, d29
fstmiad YO, { d21 }
vstmia.f64 YO, { d21 }
add YO, YO, INC_Y
fldmiad YO, { d22 }
vldmia.f64 YO, { d22 }
vmla.f64 d22, d0, d30
fstmiad YO, { d22 }
vstmia.f64 YO, { d22 }
add YO, YO, INC_Y
fldmiad YO, { d23 }
vldmia.f64 YO, { d23 }
vmla.f64 d23, d0, d31
fstmiad YO, { d23 }
vstmia.f64 YO, { d23 }
add YO, YO, INC_Y
.endm
@@ -306,8 +306,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmiad XO , { d4 }
fldmiad AO1 , { d8 }
vldmia.f64 XO , { d4 }
vldmia.f64 AO1 , { d8 }
vmla.f64 d24 , d4 , d8
add AO1, AO1, LDA
add XO, XO, INC_X
@@ -316,9 +316,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmiad YO, { d16 }
vldmia.f64 YO, { d16 }
vmla.f64 d16, d0, d24
fstmiad YO, { d16 }
vstmia.f64 YO, { d16 }
add YO, YO, INC_Y
.endm
@@ -361,8 +361,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F8X1
pld [ AO2 , #A_PRE ]
fldmias XO! , { s4 }
fldmias AO1 , { s8 - s15 }
vldmia.f32 XO! , { s4 }
vldmia.f32 AO1 , { s8 - s15 }
vmla.f32 s24 , s4 , s8
vmla.f32 s25 , s4 , s9
@@ -379,7 +379,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F8
fldmias YO, { s16 - s23 }
vldmia.f32 YO, { s16 - s23 }
vmla.f32 s16, s0, s24
vmla.f32 s17, s0, s25
@@ -390,7 +390,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
vmla.f32 s22, s0, s30
vmla.f32 s23, s0, s31
fstmias YO!, { s16 - s23 }
vstmia.f32 YO!, { s16 - s23 }
.endm
@@ -405,8 +405,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmias XO! , { s4 }
fldmias AO1 , { s8 }
vldmia.f32 XO! , { s4 }
vldmia.f32 AO1 , { s8 }
vmla.f32 s24 , s4 , s8
add AO1, AO1, LDA
@@ -414,9 +414,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F1
fldmias YO, { s16 }
vldmia.f32 YO, { s16 }
vmla.f32 s16, s0, s24
fstmias YO!, { s16 }
vstmia.f32 YO!, { s16 }
.endm
@@ -454,8 +454,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S8X1
pld [ AO2 , #A_PRE ]
fldmias XO , { s4 }
fldmias AO1 , { s8 - s15 }
vldmia.f32 XO , { s4 }
vldmia.f32 AO1 , { s8 - s15 }
vmla.f32 s24 , s4 , s8
vmla.f32 s25 , s4 , s9
@@ -473,44 +473,44 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S8
fldmias YO, { s16 }
vldmia.f32 YO, { s16 }
vmla.f32 s16, s0, s24
fstmias YO, { s16 }
vstmia.f32 YO, { s16 }
add YO, YO, INC_Y
fldmias YO, { s17 }
vldmia.f32 YO, { s17 }
vmla.f32 s17, s0, s25
fstmias YO, { s17 }
vstmia.f32 YO, { s17 }
add YO, YO, INC_Y
fldmias YO, { s18 }
vldmia.f32 YO, { s18 }
vmla.f32 s18, s0, s26
fstmias YO, { s18 }
vstmia.f32 YO, { s18 }
add YO, YO, INC_Y
fldmias YO, { s19 }
vldmia.f32 YO, { s19 }
vmla.f32 s19, s0, s27
fstmias YO, { s19 }
vstmia.f32 YO, { s19 }
add YO, YO, INC_Y
fldmias YO, { s20 }
vldmia.f32 YO, { s20 }
vmla.f32 s20, s0, s28
fstmias YO, { s20 }
vstmia.f32 YO, { s20 }
add YO, YO, INC_Y
fldmias YO, { s21 }
vldmia.f32 YO, { s21 }
vmla.f32 s21, s0, s29
fstmias YO, { s21 }
vstmia.f32 YO, { s21 }
add YO, YO, INC_Y
fldmias YO, { s22 }
vldmia.f32 YO, { s22 }
vmla.f32 s22, s0, s30
fstmias YO, { s22 }
vstmia.f32 YO, { s22 }
add YO, YO, INC_Y
fldmias YO, { s23 }
vldmia.f32 YO, { s23 }
vmla.f32 s23, s0, s31
fstmias YO, { s23 }
vstmia.f32 YO, { s23 }
add YO, YO, INC_Y
.endm
@@ -526,8 +526,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmias XO , { s4 }
fldmias AO1 , { s8 }
vldmia.f32 XO , { s4 }
vldmia.f32 AO1 , { s8 }
vmla.f32 s24 , s4 , s8
add AO1, AO1, LDA
add XO, XO, INC_X
@@ -536,9 +536,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmias YO, { s16 }
vldmia.f32 YO, { s16 }
vmla.f32 s16, s0, s24
fstmias YO, { s16 }
vstmia.f32 YO, { s16 }
add YO, YO, INC_Y
.endm

View File

@@ -112,13 +112,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X4
pld [ XO , #X_PRE ]
fldmiad XO! , { d12 - d15 }
vldmia.f64 XO! , { d12 - d15 }
pld [ AO1 , #A_PRE ]
fldmiad AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d8 - d9 }
pld [ AO2 , #A_PRE ]
fldmiad AO2!, { d4 - d5 }
fldmiad AO1!, { d10 - d11 }
fldmiad AO2!, { d6 - d7 }
vldmia.f64 AO2!, { d4 - d5 }
vldmia.f64 AO1!, { d10 - d11 }
vldmia.f64 AO2!, { d6 - d7 }
vmla.f64 d2 , d12 , d8
vmla.f64 d3 , d12 , d4
@@ -133,9 +133,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X1
fldmiad XO! , { d1 }
fldmiad AO1!, { d8 }
fldmiad AO2!, { d4 }
vldmia.f64 XO! , { d1 }
vldmia.f64 AO1!, { d8 }
vldmia.f64 AO2!, { d4 }
vmla.f64 d2 , d1 , d8
vmla.f64 d3 , d1 , d4
@@ -143,10 +143,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F2
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
vmla.f64 d4, d0, d2
vmla.f64 d5, d0, d3
fstmiad YO!, { d4 - d5 }
vstmia.f64 YO!, { d4 - d5 }
.endm
@@ -160,10 +160,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X4
pld [ XO , #X_PRE ]
fldmiad XO! , { d12 - d15 }
vldmia.f64 XO! , { d12 - d15 }
pld [ AO1 , #A_PRE ]
fldmiad AO1!, { d8 - d9 }
fldmiad AO1!, { d10 - d11 }
vldmia.f64 AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d10 - d11 }
vmla.f64 d2 , d12 , d8
vmla.f64 d2 , d13 , d9
vmla.f64 d2 , d14, d10
@@ -173,17 +173,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmiad XO! , { d1 }
fldmiad AO1!, { d8 }
vldmia.f64 XO! , { d1 }
vldmia.f64 AO1!, { d8 }
vmla.f64 d2 , d1 , d8
.endm
.macro SAVE_F1
fldmiad YO, { d4 }
vldmia.f64 YO, { d4 }
vmla.f64 d4, d0, d2
fstmiad YO!, { d4 }
vstmia.f64 YO!, { d4 }
.endm
@@ -197,23 +197,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X4
fldmiad XO , { d12 }
vldmia.f64 XO , { d12 }
add XO, XO, INC_X
pld [ AO1 , #A_PRE ]
fldmiad AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d8 - d9 }
pld [ AO2 , #A_PRE ]
fldmiad AO2!, { d4 - d5 }
vldmia.f64 AO2!, { d4 - d5 }
fldmiad XO , { d13 }
vldmia.f64 XO , { d13 }
add XO, XO, INC_X
fldmiad AO1!, { d10 - d11 }
fldmiad AO2!, { d6 - d7 }
vldmia.f64 AO1!, { d10 - d11 }
vldmia.f64 AO2!, { d6 - d7 }
fldmiad XO , { d14 }
vldmia.f64 XO , { d14 }
add XO, XO, INC_X
fldmiad XO , { d15 }
vldmia.f64 XO , { d15 }
add XO, XO, INC_X
vmla.f64 d2 , d12 , d8
@@ -229,9 +229,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X1
fldmiad XO , { d1 }
fldmiad AO1!, { d8 }
fldmiad AO2!, { d4 }
vldmia.f64 XO , { d1 }
vldmia.f64 AO1!, { d8 }
vldmia.f64 AO2!, { d4 }
vmla.f64 d2 , d1 , d8
add XO, XO, INC_X
vmla.f64 d3 , d1 , d4
@@ -240,14 +240,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S2
fldmiad YO, { d4 }
vldmia.f64 YO, { d4 }
vmla.f64 d4, d0, d2
fstmiad YO, { d4 }
vstmia.f64 YO, { d4 }
add YO, YO, INC_Y
fldmiad YO, { d5 }
vldmia.f64 YO, { d5 }
vmla.f64 d5, d0, d3
fstmiad YO, { d5 }
vstmia.f64 YO, { d5 }
add YO, YO, INC_Y
.endm
@@ -261,20 +261,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X4
fldmiad XO , { d12 }
vldmia.f64 XO , { d12 }
add XO, XO, INC_X
pld [ AO1 , #A_PRE ]
fldmiad AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d8 - d9 }
fldmiad XO , { d13 }
vldmia.f64 XO , { d13 }
add XO, XO, INC_X
fldmiad AO1!, { d10 - d11 }
vldmia.f64 AO1!, { d10 - d11 }
fldmiad XO , { d14 }
vldmia.f64 XO , { d14 }
add XO, XO, INC_X
fldmiad XO , { d15 }
vldmia.f64 XO , { d15 }
add XO, XO, INC_X
vmla.f64 d2 , d12 , d8
@@ -286,8 +286,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmiad XO , { d1 }
fldmiad AO1!, { d8 }
vldmia.f64 XO , { d1 }
vldmia.f64 AO1!, { d8 }
vmla.f64 d2 , d1 , d8
add XO, XO, INC_X
@@ -295,9 +295,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmiad YO, { d4 }
vldmia.f64 YO, { d4 }
vmla.f64 d4, d0, d2
fstmiad YO, { d4 }
vstmia.f64 YO, { d4 }
add YO, YO, INC_Y
.endm
@@ -315,11 +315,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X4
fldmias XO! , { s12 - s15 }
fldmias AO1!, { s8 - s9 }
fldmias AO2!, { s4 - s5 }
fldmias AO1!, { s10 - s11 }
fldmias AO2!, { s6 - s7 }
vldmia.f32 XO! , { s12 - s15 }
vldmia.f32 AO1!, { s8 - s9 }
vldmia.f32 AO2!, { s4 - s5 }
vldmia.f32 AO1!, { s10 - s11 }
vldmia.f32 AO2!, { s6 - s7 }
vmla.f32 s2 , s12 , s8
vmla.f32 s3 , s12 , s4
@@ -334,9 +334,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X1
fldmias XO! , { s1 }
fldmias AO1!, { s8 }
fldmias AO2!, { s4 }
vldmia.f32 XO! , { s1 }
vldmia.f32 AO1!, { s8 }
vldmia.f32 AO2!, { s4 }
vmla.f32 s2 , s1 , s8
vmla.f32 s3 , s1 , s4
@@ -344,10 +344,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F2
fldmias YO, { s4 - s5 }
vldmia.f32 YO, { s4 - s5 }
vmla.f32 s4, s0, s2
vmla.f32 s5, s0, s3
fstmias YO!, { s4 - s5 }
vstmia.f32 YO!, { s4 - s5 }
.endm
@@ -359,9 +359,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X4
fldmias XO! , { s12 - s15 }
fldmias AO1!, { s8 - s9 }
fldmias AO1!, { s10 - s11 }
vldmia.f32 XO! , { s12 - s15 }
vldmia.f32 AO1!, { s8 - s9 }
vldmia.f32 AO1!, { s10 - s11 }
vmla.f32 s2 , s12 , s8
vmla.f32 s2 , s13 , s9
vmla.f32 s2 , s14, s10
@@ -371,17 +371,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmias XO! , { s1 }
fldmias AO1!, { s8 }
vldmia.f32 XO! , { s1 }
vldmia.f32 AO1!, { s8 }
vmla.f32 s2 , s1 , s8
.endm
.macro SAVE_F1
fldmias YO, { s4 }
vldmia.f32 YO, { s4 }
vmla.f32 s4, s0, s2
fstmias YO!, { s4 }
vstmia.f32 YO!, { s4 }
.endm
@@ -395,21 +395,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X4
fldmias XO , { s12 }
vldmia.f32 XO , { s12 }
add XO, XO, INC_X
fldmias AO1!, { s8 - s9 }
fldmias AO2!, { s4 - s5 }
vldmia.f32 AO1!, { s8 - s9 }
vldmia.f32 AO2!, { s4 - s5 }
fldmias XO , { s13 }
vldmia.f32 XO , { s13 }
add XO, XO, INC_X
fldmias AO1!, { s10 - s11 }
fldmias AO2!, { s6 - s7 }
vldmia.f32 AO1!, { s10 - s11 }
vldmia.f32 AO2!, { s6 - s7 }
fldmias XO , { s14 }
vldmia.f32 XO , { s14 }
add XO, XO, INC_X
fldmias XO , { s15 }
vldmia.f32 XO , { s15 }
add XO, XO, INC_X
vmla.f32 s2 , s12 , s8
@@ -425,9 +425,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X1
fldmias XO , { s1 }
fldmias AO1!, { s8 }
fldmias AO2!, { s4 }
vldmia.f32 XO , { s1 }
vldmia.f32 AO1!, { s8 }
vldmia.f32 AO2!, { s4 }
vmla.f32 s2 , s1 , s8
add XO, XO, INC_X
vmla.f32 s3 , s1 , s4
@@ -436,14 +436,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S2
fldmias YO, { s4 }
vldmia.f32 YO, { s4 }
vmla.f32 s4, s0, s2
fstmias YO, { s4 }
vstmia.f32 YO, { s4 }
add YO, YO, INC_Y
fldmias YO, { s5 }
vldmia.f32 YO, { s5 }
vmla.f32 s5, s0, s3
fstmias YO, { s5 }
vstmia.f32 YO, { s5 }
add YO, YO, INC_Y
.endm
@@ -456,20 +456,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X4
fldmias XO , { s12 }
vldmia.f32 XO , { s12 }
add XO, XO, INC_X
pld [ AO1 , #A_PRE ]
fldmias AO1!, { s8 - s9 }
vldmia.f32 AO1!, { s8 - s9 }
fldmias XO , { s13 }
vldmia.f32 XO , { s13 }
add XO, XO, INC_X
fldmias AO1!, { s10 - s11 }
vldmia.f32 AO1!, { s10 - s11 }
fldmias XO , { s14 }
vldmia.f32 XO , { s14 }
add XO, XO, INC_X
fldmias XO , { s15 }
vldmia.f32 XO , { s15 }
add XO, XO, INC_X
vmla.f32 s2 , s12 , s8
@@ -481,8 +481,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmias XO , { s1 }
fldmias AO1!, { s8 }
vldmia.f32 XO , { s1 }
vldmia.f32 AO1!, { s8 }
vmla.f32 s2 , s1 , s8
add XO, XO, INC_X
@@ -490,9 +490,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmias YO, { s4 }
vldmia.f32 YO, { s4 }
vmla.f32 s4, s0, s2
fstmias YO, { s4 }
vstmia.f32 YO, { s4 }
add YO, YO, INC_Y
.endm

View File

@@ -108,17 +108,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X4
pld [ XO , #X_PRE ]
fldmiad XO! , { d28 - d31 }
vldmia.f64 XO! , { d28 - d31 }
pld [ AO1 , #A_PRE ]
fldmiad AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d8 - d9 }
pld [ AO2 , #A_PRE ]
fldmiad AO2!, { d16 - d17 }
vldmia.f64 AO2!, { d16 - d17 }
vmla.f64 d4 , d28 , d8
vmla.f64 d5 , d28 , d16
fldmiad AO1!, { d10 - d11 }
vldmia.f64 AO1!, { d10 - d11 }
vmla.f64 d4 , d29 , d9
vmla.f64 d5 , d29 , d17
fldmiad AO2!, { d18 - d19 }
vldmia.f64 AO2!, { d18 - d19 }
vmla.f64 d4 , d30, d10
vmla.f64 d5 , d30, d18
vmla.f64 d4 , d31, d11
@@ -129,9 +129,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X1
fldmiad XO! , { d2 }
fldmiad AO1!, { d8 }
fldmiad AO2!, { d16 }
vldmia.f64 XO! , { d2 }
vldmia.f64 AO1!, { d8 }
vldmia.f64 AO2!, { d16 }
vmla.f64 d4 , d2 , d8
vmla.f64 d5 , d2 , d16
@@ -139,10 +139,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F2
fldmiad YO, { d24 - d25 }
vldmia.f64 YO, { d24 - d25 }
vmla.f64 d24, d0, d4
vmla.f64 d25, d0, d5
fstmiad YO!, { d24 - d25 }
vstmia.f64 YO!, { d24 - d25 }
.endm
@@ -156,23 +156,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X4
pld [ AO1 , #A_PRE ]
fldmiad XO , { d28 }
vldmia.f64 XO , { d28 }
add XO, XO, INC_X
fldmiad AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d8 - d9 }
pld [ AO2 , #A_PRE ]
fldmiad AO2!, { d16 - d17 }
vldmia.f64 AO2!, { d16 - d17 }
vmla.f64 d4 , d28 , d8
fldmiad XO , { d29 }
vldmia.f64 XO , { d29 }
add XO, XO, INC_X
vmla.f64 d5 , d28 , d16
fldmiad AO1!, { d10 - d11 }
vldmia.f64 AO1!, { d10 - d11 }
vmla.f64 d4 , d29 , d9
fldmiad XO , { d30 }
vldmia.f64 XO , { d30 }
add XO, XO, INC_X
vmla.f64 d5 , d29 , d17
fldmiad AO2!, { d18 - d19 }
vldmia.f64 AO2!, { d18 - d19 }
vmla.f64 d4 , d30, d10
fldmiad XO , { d31 }
vldmia.f64 XO , { d31 }
add XO, XO, INC_X
vmla.f64 d5 , d30, d18
vmla.f64 d4 , d31, d11
@@ -183,10 +183,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X1
fldmiad XO , { d2 }
fldmiad AO1!, { d8 }
vldmia.f64 XO , { d2 }
vldmia.f64 AO1!, { d8 }
add XO, XO, INC_X
fldmiad AO2!, { d16 }
vldmia.f64 AO2!, { d16 }
vmla.f64 d4 , d2 , d8
vmla.f64 d5 , d2 , d16
@@ -194,14 +194,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S2
fldmiad YO, { d24 }
vldmia.f64 YO, { d24 }
vmla.f64 d24, d0, d4
fstmiad YO, { d24 }
vstmia.f64 YO, { d24 }
add YO, YO, INC_Y
fldmiad YO, { d24 }
vldmia.f64 YO, { d24 }
vmla.f64 d24, d0, d5
fstmiad YO, { d24 }
vstmia.f64 YO, { d24 }
add YO, YO, INC_Y
.endm
@@ -215,11 +215,11 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X4
pld [ XO , #X_PRE ]
fldmiad XO! , { d28 - d31 }
vldmia.f64 XO! , { d28 - d31 }
pld [ AO1 , #A_PRE ]
fldmiad AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d8 - d9 }
vmla.f64 d4 , d28 , d8
fldmiad AO1!, { d10 - d11 }
vldmia.f64 AO1!, { d10 - d11 }
vmla.f64 d4 , d29 , d9
vmla.f64 d4 , d30, d10
vmla.f64 d4 , d31, d11
@@ -229,17 +229,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmiad XO! , { d2 }
fldmiad AO1!, { d8 }
vldmia.f64 XO! , { d2 }
vldmia.f64 AO1!, { d8 }
vmla.f64 d4 , d2 , d8
.endm
.macro SAVE_F1
fldmiad YO, { d24 }
vldmia.f64 YO, { d24 }
vmla.f64 d24, d0, d4
fstmiad YO!, { d24 }
vstmia.f64 YO!, { d24 }
.endm
@@ -252,18 +252,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X4
pld [ AO1 , #A_PRE ]
fldmiad XO , { d28 }
vldmia.f64 XO , { d28 }
add XO, XO, INC_X
fldmiad AO1!, { d8 - d9 }
vldmia.f64 AO1!, { d8 - d9 }
vmla.f64 d4 , d28 , d8
fldmiad XO , { d29 }
vldmia.f64 XO , { d29 }
add XO, XO, INC_X
fldmiad AO1!, { d10 - d11 }
vldmia.f64 AO1!, { d10 - d11 }
vmla.f64 d4 , d29 , d9
fldmiad XO , { d30 }
vldmia.f64 XO , { d30 }
add XO, XO, INC_X
vmla.f64 d4 , d30, d10
fldmiad XO , { d31 }
vldmia.f64 XO , { d31 }
add XO, XO, INC_X
vmla.f64 d4 , d31, d11
@@ -272,8 +272,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmiad XO , { d2 }
fldmiad AO1!, { d8 }
vldmia.f64 XO , { d2 }
vldmia.f64 AO1!, { d8 }
add XO, XO, INC_X
vmla.f64 d4 , d2 , d8
@@ -281,9 +281,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmiad YO, { d24 }
vldmia.f64 YO, { d24 }
vmla.f64 d24, d0, d4
fstmiad YO, { d24 }
vstmia.f64 YO, { d24 }
add YO, YO, INC_Y
.endm
@@ -300,15 +300,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X4
fldmias XO! , { s28 - s31 }
fldmias AO1!, { s8 - s9 }
fldmias AO2!, { s16 - s17 }
vldmia.f32 XO! , { s28 - s31 }
vldmia.f32 AO1!, { s8 - s9 }
vldmia.f32 AO2!, { s16 - s17 }
vmla.f32 s4 , s28 , s8
vmla.f32 s5 , s28 , s16
fldmias AO1!, { s10 - s11 }
vldmia.f32 AO1!, { s10 - s11 }
vmla.f32 s4 , s29 , s9
vmla.f32 s5 , s29 , s17
fldmias AO2!, { s18 - s19 }
vldmia.f32 AO2!, { s18 - s19 }
vmla.f32 s4 , s30, s10
vmla.f32 s5 , s30, s18
vmla.f32 s4 , s31, s11
@@ -319,9 +319,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X1
fldmias XO! , { s2 }
fldmias AO1!, { s8 }
fldmias AO2!, { s16 }
vldmia.f32 XO! , { s2 }
vldmia.f32 AO1!, { s8 }
vldmia.f32 AO2!, { s16 }
vmla.f32 s4 , s2 , s8
vmla.f32 s5 , s2 , s16
@@ -329,10 +329,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F2
fldmias YO, { s24 - s25 }
vldmia.f32 YO, { s24 - s25 }
vmla.f32 s24, s0, s4
vmla.f32 s25, s0, s5
fstmias YO!, { s24 - s25 }
vstmia.f32 YO!, { s24 - s25 }
.endm
@@ -345,22 +345,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X4
fldmias XO , { s28 }
vldmia.f32 XO , { s28 }
add XO, XO, INC_X
fldmias AO1!, { s8 - s9 }
fldmias AO2!, { s16 - s17 }
vldmia.f32 AO1!, { s8 - s9 }
vldmia.f32 AO2!, { s16 - s17 }
vmla.f32 s4 , s28 , s8
fldmias XO , { s29 }
vldmia.f32 XO , { s29 }
add XO, XO, INC_X
vmla.f32 s5 , s28 , s16
fldmias AO1!, { s10 - s11 }
vldmia.f32 AO1!, { s10 - s11 }
vmla.f32 s4 , s29 , s9
fldmias XO , { s30 }
vldmia.f32 XO , { s30 }
add XO, XO, INC_X
vmla.f32 s5 , s29 , s17
fldmias AO2!, { s18 - s19 }
vldmia.f32 AO2!, { s18 - s19 }
vmla.f32 s4 , s30, s10
fldmias XO , { s31 }
vldmia.f32 XO , { s31 }
add XO, XO, INC_X
vmla.f32 s5 , s30, s18
vmla.f32 s4 , s31, s11
@@ -371,10 +371,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X1
fldmias XO , { s2 }
fldmias AO1!, { s8 }
vldmia.f32 XO , { s2 }
vldmia.f32 AO1!, { s8 }
add XO, XO, INC_X
fldmias AO2!, { s16 }
vldmia.f32 AO2!, { s16 }
vmla.f32 s4 , s2 , s8
vmla.f32 s5 , s2 , s16
@@ -382,14 +382,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S2
fldmias YO, { s24 }
vldmia.f32 YO, { s24 }
vmla.f32 s24, s0, s4
fstmias YO, { s24 }
vstmia.f32 YO, { s24 }
add YO, YO, INC_Y
fldmias YO, { s24 }
vldmia.f32 YO, { s24 }
vmla.f32 s24, s0, s5
fstmias YO, { s24 }
vstmia.f32 YO, { s24 }
add YO, YO, INC_Y
.endm
@@ -402,10 +402,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X4
fldmias XO! , { s28 - s31 }
fldmias AO1!, { s8 - s9 }
vldmia.f32 XO! , { s28 - s31 }
vldmia.f32 AO1!, { s8 - s9 }
vmla.f32 s4 , s28 , s8
fldmias AO1!, { s10 - s11 }
vldmia.f32 AO1!, { s10 - s11 }
vmla.f32 s4 , s29 , s9
vmla.f32 s4 , s30, s10
vmla.f32 s4 , s31, s11
@@ -415,17 +415,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmias XO! , { s2 }
fldmias AO1!, { s8 }
vldmia.f32 XO! , { s2 }
vldmia.f32 AO1!, { s8 }
vmla.f32 s4 , s2 , s8
.endm
.macro SAVE_F1
fldmias YO, { s24 }
vldmia.f32 YO, { s24 }
vmla.f32 s24, s0, s4
fstmias YO!, { s24 }
vstmia.f32 YO!, { s24 }
.endm
@@ -437,18 +437,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X4
fldmias XO , { s28 }
vldmia.f32 XO , { s28 }
add XO, XO, INC_X
fldmias AO1!, { s8 - s9 }
vldmia.f32 AO1!, { s8 - s9 }
vmla.f32 s4 , s28 , s8
fldmias XO , { s29 }
vldmia.f32 XO , { s29 }
add XO, XO, INC_X
fldmias AO1!, { s10 - s11 }
vldmia.f32 AO1!, { s10 - s11 }
vmla.f32 s4 , s29 , s9
fldmias XO , { s30 }
vldmia.f32 XO , { s30 }
add XO, XO, INC_X
vmla.f32 s4 , s30, s10
fldmias XO , { s31 }
vldmia.f32 XO , { s31 }
add XO, XO, INC_X
vmla.f32 s4 , s31, s11
@@ -457,8 +457,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmias XO , { s2 }
fldmias AO1!, { s8 }
vldmia.f32 XO , { s2 }
vldmia.f32 AO1!, { s8 }
add XO, XO, INC_X
vmla.f32 s4 , s2 , s8
@@ -466,9 +466,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmias YO, { s24 }
vldmia.f32 YO, { s24 }
vmla.f32 s24, s0, s4
fstmias YO, { s24 }
vstmia.f32 YO, { s24 }
add YO, YO, INC_Y
.endm

View File

@@ -114,7 +114,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_F
fldmiad X!, { d0 }
vldmia.f64 X!, { d0 }
VABS( d0, d0 )
mov Z, #1
mov INDEX, Z
@@ -123,7 +123,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 }
vldmia.f64 X!, { d4 }
add Z, Z, #1
VABS( d4, d4 )
vcmpe.f64 d4, d0
@@ -135,7 +135,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_S
fldmiad X, { d0 }
vldmia.f64 X, { d0 }
VABS( d0, d0 )
mov Z, #1
mov INDEX, Z
@@ -146,7 +146,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
add Z, Z, #1
VABS( d4, d4 )
vcmpe.f64 d4, d0
@@ -161,7 +161,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_F
fldmias X!, { s0 }
vldmia.f32 X!, { s0 }
VABS( s0, s0 )
mov Z, #1
mov INDEX, Z
@@ -170,7 +170,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 }
vldmia.f32 X!, { s4 }
add Z, Z, #1
VABS( s4, s4 )
vcmpe.f32 s4, s0
@@ -182,7 +182,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_S
fldmias X, { s0 }
vldmia.f32 X, { s0 }
VABS( s0, s0 )
mov Z, #1
mov INDEX, Z
@@ -193,7 +193,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X, { s4 }
vldmia.f32 X, { s4 }
add Z, Z, #1
VABS( s4, s4 )
vcmpe.f32 s4, s0
@@ -215,7 +215,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_F
fldmiad X!, { d0 -d1 }
vldmia.f64 X!, { d0 -d1 }
vabs.f64 d0, d0
vabs.f64 d1, d1
vadd.f64 d0 , d0, d1
@@ -227,7 +227,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 - d5 }
vldmia.f64 X!, { d4 - d5 }
add Z, Z, #1
vabs.f64 d4, d4
vabs.f64 d5, d5
@@ -241,7 +241,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_S
fldmiad X, { d0 -d1 }
vldmia.f64 X, { d0 -d1 }
vabs.f64 d0, d0
vabs.f64 d1, d1
vadd.f64 d0 , d0, d1
@@ -255,7 +255,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
add Z, Z, #1
vabs.f64 d4, d4
vabs.f64 d5, d5
@@ -272,7 +272,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_F
fldmias X!, { s0 -s1 }
vldmia.f32 X!, { s0 -s1 }
vabs.f32 s0, s0
vabs.f32 s1, s1
vadd.f32 s0 , s0, s1
@@ -284,7 +284,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 - s5 }
vldmia.f32 X!, { s4 - s5 }
add Z, Z, #1
vabs.f32 s4, s4
vabs.f32 s5, s5
@@ -298,7 +298,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro INIT_S
fldmias X, { s0 -s1 }
vldmia.f32 X, { s0 -s1 }
vabs.f32 s0, s0
vabs.f32 s1, s1
vadd.f32 s0 , s0, s1
@@ -312,7 +312,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
add Z, Z, #1
vabs.f32 s4, s4
vabs.f32 s5, s5

View File

@@ -53,7 +53,7 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
while(i < n)
{
if( x[ix] > minf )
if( x[ix] < minf )
{
min = i;
minf = x[ix];

View File

@@ -58,7 +58,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 }
vldmia.f64 X!, { d4 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_F1_NEXT_\@
@@ -95,7 +95,7 @@ KERNEL_F1_NEXT_\@:
.macro KERNEL_S1
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_S1_NEXT
@@ -121,7 +121,7 @@ KERNEL_S1_NEXT:
.macro KERNEL_F1
fldmias X!, { s4 }
vldmia.f32 X!, { s4 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_F1_NEXT_\@
@@ -158,7 +158,7 @@ KERNEL_F1_NEXT_\@:
.macro KERNEL_S1
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_S1_NEXT
@@ -191,7 +191,7 @@ KERNEL_S1_NEXT:
.macro KERNEL_F1
fldmiad X!, { d4 - d5 }
vldmia.f64 X!, { d4 - d5 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
@@ -249,7 +249,7 @@ KERNEL_F1_END_\@:
.macro KERNEL_S1
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
@@ -294,7 +294,7 @@ KERNEL_S1_END_\@:
.macro KERNEL_F1
fldmias X!, { s4 - s5 }
vldmia.f32 X!, { s4 - s5 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr
@@ -350,7 +350,7 @@ KERNEL_F1_END_\@:
.macro KERNEL_S1
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr

View File

@@ -58,7 +58,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 }
vldmia.f64 X!, { d4 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_F1_NEXT_\@
@@ -95,7 +95,7 @@ KERNEL_F1_NEXT_\@:
.macro KERNEL_S1
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_S1_NEXT
@@ -121,7 +121,7 @@ KERNEL_S1_NEXT:
.macro KERNEL_F1
fldmias X!, { s4 }
vldmia.f32 X!, { s4 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_F1_NEXT_\@
@@ -158,7 +158,7 @@ KERNEL_F1_NEXT_\@:
.macro KERNEL_S1
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr
beq KERNEL_S1_NEXT
@@ -191,7 +191,7 @@ KERNEL_S1_NEXT:
.macro KERNEL_F1
fldmiad X!, { d4 - d5 }
vldmia.f64 X!, { d4 - d5 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
@@ -249,7 +249,7 @@ KERNEL_F1_END_\@:
.macro KERNEL_S1
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vcmpe.f64 d4, d6 // compare with 0.0
vmrs APSR_nzcv, fpscr
@@ -294,7 +294,7 @@ KERNEL_S1_END_\@:
.macro KERNEL_F1
fldmias X!, { s4 - s5 }
vldmia.f32 X!, { s4 - s5 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr
@@ -350,7 +350,7 @@ KERNEL_F1_END_\@:
.macro KERNEL_S1
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vcmpe.f32 s4, s6 // compare with 0.0
vmrs APSR_nzcv, fpscr

View File

@@ -77,68 +77,68 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmiad X, { d4 }
fldmiad Y, { d5 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d5 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d5
vmul.f64 d3 , d0, d5
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
fldmiad X, { d4 }
fldmiad Y, { d5 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d5 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d5
vmul.f64 d3 , d0, d5
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
fldmiad X, { d4 }
fldmiad Y, { d5 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d5 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d5
vmul.f64 d3 , d0, d5
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
fldmiad X, { d4 }
fldmiad Y, { d5 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d5 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d5
vmul.f64 d3 , d0, d5
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
.endm
.macro KERNEL_F1
fldmiad X, { d4 }
fldmiad Y, { d5 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d5 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d5
vmul.f64 d3 , d0, d5
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
.endm
.macro KERNEL_S1
fldmiad X, { d4 }
fldmiad Y, { d5 }
vldmia.f64 X, { d4 }
vldmia.f64 Y, { d5 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d5
vmul.f64 d3 , d0, d5
vmls.f64 d3 , d1, d4
fstmiad X, { d2 }
fstmiad Y, { d3 }
vstmia.f64 X, { d2 }
vstmia.f64 Y, { d3 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -149,68 +149,68 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
fldmias X, { s4 }
fldmias Y, { s5 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s5 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s5
vmul.f32 s3 , s0, s5
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
fldmias X, { s4 }
fldmias Y, { s5 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s5 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s5
vmul.f32 s3 , s0, s5
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
fldmias X, { s4 }
fldmias Y, { s5 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s5 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s5
vmul.f32 s3 , s0, s5
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
fldmias X, { s4 }
fldmias Y, { s5 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s5 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s5
vmul.f32 s3 , s0, s5
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
.endm
.macro KERNEL_F1
fldmias X, { s4 }
fldmias Y, { s5 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s5 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s5
vmul.f32 s3 , s0, s5
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
.endm
.macro KERNEL_S1
fldmias X, { s4 }
fldmias Y, { s5 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s5 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s5
vmul.f32 s3 , s0, s5
vmls.f32 s3 , s1, s4
fstmias X, { s2 }
fstmias Y, { s3 }
vstmia.f32 X, { s2 }
vstmia.f32 Y, { s3 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -230,96 +230,96 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmiad X, { d4 - d5 }
fldmiad Y, { d6 - d7 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d6 - d7 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d6
vmul.f64 d3 , d0, d6
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
vmul.f64 d2 , d0, d5
fmacd d2 , d1, d7
vmul.f64 d3 , d0, d7
vmls.f64 d3 , d1, d5
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
fldmiad X, { d4 - d5 }
fldmiad Y, { d6 - d7 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d6 - d7 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d6
vmul.f64 d3 , d0, d6
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
vmul.f64 d2 , d0, d5
fmacd d2 , d1, d7
vmul.f64 d3 , d0, d7
vmls.f64 d3 , d1, d5
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmiad X, { d4 - d5 }
fldmiad Y, { d6 - d7 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d6 - d7 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d6
vmul.f64 d3 , d0, d6
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
vmul.f64 d2 , d0, d5
fmacd d2 , d1, d7
vmul.f64 d3 , d0, d7
vmls.f64 d3 , d1, d5
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
fldmiad X, { d4 - d5 }
fldmiad Y, { d6 - d7 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d6 - d7 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d6
vmul.f64 d3 , d0, d6
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
vmul.f64 d2 , d0, d5
fmacd d2 , d1, d7
vmul.f64 d3 , d0, d7
vmls.f64 d3 , d1, d5
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
.endm
.macro KERNEL_F1
fldmiad X, { d4 - d5 }
fldmiad Y, { d6 - d7 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d6 - d7 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d6
vmul.f64 d3 , d0, d6
vmls.f64 d3 , d1, d4
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
vmul.f64 d2 , d0, d5
fmacd d2 , d1, d7
vmul.f64 d3 , d0, d7
vmls.f64 d3 , d1, d5
fstmiad X!, { d2 }
fstmiad Y!, { d3 }
vstmia.f64 X!, { d2 }
vstmia.f64 Y!, { d3 }
.endm
.macro KERNEL_S1
fldmiad X, { d4 - d5 }
fldmiad Y, { d6 - d7 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d6 - d7 }
vmul.f64 d2 , d0, d4
fmacd d2 , d1, d6
vmul.f64 d3 , d0, d6
@@ -347,96 +347,96 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmias X, { s4 - s5 }
fldmias Y, { s6 - s7 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s6 - s7 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s6
vmul.f32 s3 , s0, s6
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
vmul.f32 s2 , s0, s5
fmacs s2 , s1, s7
vmul.f32 s3 , s0, s7
vmls.f32 s3 , s1, s5
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
fldmias X, { s4 - s5 }
fldmias Y, { s6 - s7 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s6 - s7 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s6
vmul.f32 s3 , s0, s6
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
vmul.f32 s2 , s0, s5
fmacs s2 , s1, s7
vmul.f32 s3 , s0, s7
vmls.f32 s3 , s1, s5
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmias X, { s4 - s5 }
fldmias Y, { s6 - s7 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s6 - s7 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s6
vmul.f32 s3 , s0, s6
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
vmul.f32 s2 , s0, s5
fmacs s2 , s1, s7
vmul.f32 s3 , s0, s7
vmls.f32 s3 , s1, s5
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
fldmias X, { s4 - s5 }
fldmias Y, { s6 - s7 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s6 - s7 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s6
vmul.f32 s3 , s0, s6
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
vmul.f32 s2 , s0, s5
fmacs s2 , s1, s7
vmul.f32 s3 , s0, s7
vmls.f32 s3 , s1, s5
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
.endm
.macro KERNEL_F1
fldmias X, { s4 - s5 }
fldmias Y, { s6 - s7 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s6 - s7 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s6
vmul.f32 s3 , s0, s6
vmls.f32 s3 , s1, s4
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
vmul.f32 s2 , s0, s5
fmacs s2 , s1, s7
vmul.f32 s3 , s0, s7
vmls.f32 s3 , s1, s5
fstmias X!, { s2 }
fstmias Y!, { s3 }
vstmia.f32 X!, { s2 }
vstmia.f32 Y!, { s3 }
.endm
.macro KERNEL_S1
fldmias X, { s4 - s5 }
fldmias Y, { s6 - s7 }
vldmia.f32 X, { s4 - s5 }
vldmia.f32 Y, { s6 - s7 }
vmul.f32 s2 , s0, s4
fmacs s2 , s1, s6
vmul.f32 s3 , s0, s6

View File

@@ -64,30 +64,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
pld [ X, #X_PRE ]
fldmiad X, { d4 - d7 }
vldmia.f64 X, { d4 - d7 }
vmul.f64 d4, d4, d0
vmul.f64 d5, d5, d0
vmul.f64 d6, d6, d0
fstmiad X!, { d4 - d5 }
vstmia.f64 X!, { d4 - d5 }
vmul.f64 d7, d7, d0
fstmiad X!, { d6 - d7 }
vstmia.f64 X!, { d6 - d7 }
.endm
.macro KERNEL_F1
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vmul.f64 d4, d4, d0
fstmiad X!, { d4 }
vstmia.f64 X!, { d4 }
.endm
.macro KERNEL_S1
fldmiad X, { d4 }
vldmia.f64 X, { d4 }
vmul.f64 d4, d4, d0
fstmiad X, { d4 }
vstmia.f64 X, { d4 }
add X, X, INC_X
.endm
@@ -96,30 +96,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
fldmias X, { s4 - s7 }
vldmia.f32 X, { s4 - s7 }
vmul.f32 s4, s4, s0
vmul.f32 s5, s5, s0
vmul.f32 s6, s6, s0
fstmias X!, { s4 - s5 }
vstmia.f32 X!, { s4 - s5 }
vmul.f32 s7, s7, s0
fstmias X!, { s6 - s7 }
vstmia.f32 X!, { s6 - s7 }
.endm
.macro KERNEL_F1
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vmul.f32 s4, s4, s0
fstmias X!, { s4 }
vstmia.f32 X!, { s4 }
.endm
.macro KERNEL_S1
fldmias X, { s4 }
vldmia.f32 X, { s4 }
vmul.f32 s4, s4, s0
fstmias X, { s4 }
vstmia.f32 X, { s4 }
add X, X, INC_X
.endm
@@ -136,58 +136,58 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vmul.f64 d2, d0, d4
vmls.f64 d2, d1, d5
vmul.f64 d3, d0, d5
fmacd d3, d1, d4
fstmiad X!, { d2 - d3 }
vstmia.f64 X!, { d2 - d3 }
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vmul.f64 d2, d0, d4
vmls.f64 d2, d1, d5
vmul.f64 d3, d0, d5
fmacd d3, d1, d4
fstmiad X!, { d2 - d3 }
vstmia.f64 X!, { d2 - d3 }
pld [ X, #X_PRE ]
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vmul.f64 d2, d0, d4
vmls.f64 d2, d1, d5
vmul.f64 d3, d0, d5
fmacd d3, d1, d4
fstmiad X!, { d2 - d3 }
vstmia.f64 X!, { d2 - d3 }
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vmul.f64 d2, d0, d4
vmls.f64 d2, d1, d5
vmul.f64 d3, d0, d5
fmacd d3, d1, d4
fstmiad X!, { d2 - d3 }
vstmia.f64 X!, { d2 - d3 }
.endm
.macro KERNEL_F1
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vmul.f64 d2, d0, d4
vmls.f64 d2, d1, d5
vmul.f64 d3, d0, d5
fmacd d3, d1, d4
fstmiad X!, { d2 - d3 }
vstmia.f64 X!, { d2 - d3 }
.endm
.macro KERNEL_S1
fldmiad X, { d4 - d5 }
vldmia.f64 X, { d4 - d5 }
vmul.f64 d2, d0, d4
vmls.f64 d2, d1, d5
vmul.f64 d3, d0, d5
fmacd d3, d1, d4
fstmiad X, { d2 - d3 }
vstmia.f64 X, { d2 - d3 }
add X, X, INC_X
.endm
@@ -199,56 +199,56 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vmul.f32 s2, s0, s4
vmls.f32 s2, s1, s5
vmul.f32 s3, s0, s5
fmacs s3, s1, s4
fstmias X!, { s2 - s3 }
vstmia.f32 X!, { s2 - s3 }
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vmul.f32 s2, s0, s4
vmls.f32 s2, s1, s5
vmul.f32 s3, s0, s5
fmacs s3, s1, s4
fstmias X!, { s2 - s3 }
vstmia.f32 X!, { s2 - s3 }
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vmul.f32 s2, s0, s4
vmls.f32 s2, s1, s5
vmul.f32 s3, s0, s5
fmacs s3, s1, s4
fstmias X!, { s2 - s3 }
vstmia.f32 X!, { s2 - s3 }
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vmul.f32 s2, s0, s4
vmls.f32 s2, s1, s5
vmul.f32 s3, s0, s5
fmacs s3, s1, s4
fstmias X!, { s2 - s3 }
vstmia.f32 X!, { s2 - s3 }
.endm
.macro KERNEL_F1
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vmul.f32 s2, s0, s4
vmls.f32 s2, s1, s5
vmul.f32 s3, s0, s5
fmacs s3, s1, s4
fstmias X!, { s2 - s3 }
vstmia.f32 X!, { s2 - s3 }
.endm
.macro KERNEL_S1
fldmias X, { s4 - s5 }
vldmia.f32 X, { s4 - s5 }
vmul.f32 s2, s0, s4
vmls.f32 s2, s1, s5
vmul.f32 s3, s0, s5
fmacs s3, s1, s4
fstmias X, { s2 - s3 }
vstmia.f32 X, { s2 - s3 }
add X, X, INC_X
.endm

View File

@@ -65,17 +65,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_F8
pld [ X, #X_PRE ]
fldmias X!, { s0 - s3 }
fldmias X!, { s4 - s7 }
fstmias Y!, { s0 - s3 }
fstmias Y!, { s4 - s7 }
vldmia.f32 X!, { s0 - s3 }
vldmia.f32 X!, { s4 - s7 }
vstmia.f32 Y!, { s0 - s3 }
vstmia.f32 Y!, { s4 - s7 }
.endm
.macro COPY_F1
fldmias X!, { s0 }
fstmias Y!, { s0 }
vldmia.f32 X!, { s0 }
vstmia.f32 Y!, { s0 }
.endm
@@ -85,23 +85,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S4
nop
fldmias X, { s0 }
fstmias Y, { s0 }
vldmia.f32 X, { s0 }
vstmia.f32 Y, { s0 }
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s1 }
fstmias Y, { s1 }
vldmia.f32 X, { s1 }
vstmia.f32 Y, { s1 }
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s0 }
fstmias Y, { s0 }
vldmia.f32 X, { s0 }
vstmia.f32 Y, { s0 }
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s1 }
fstmias Y, { s1 }
vldmia.f32 X, { s1 }
vstmia.f32 Y, { s1 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -110,8 +110,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S1
fldmias X, { s0 }
fstmias Y, { s0 }
vldmia.f32 X, { s0 }
vstmia.f32 Y, { s0 }
add X, X, INC_X
add Y, Y, INC_Y

View File

@@ -68,26 +68,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
fldmias X!, { s14 }
fldmias Y!, { s15 }
vldmia.f32 X!, { s14 }
vldmia.f32 Y!, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
fldmias X!, { s14 }
fldmias Y!, { s15 }
vldmia.f32 X!, { s14 }
vldmia.f32 Y!, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
fldmias X!, { s14 }
fldmias Y!, { s15 }
vldmia.f32 X!, { s14 }
vldmia.f32 Y!, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
fldmias X!, { s14 }
fldmias Y!, { s15 }
vldmia.f32 X!, { s14 }
vldmia.f32 Y!, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
@@ -96,8 +96,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s14 }
fldmias Y!, { s15 }
vldmia.f32 X!, { s14 }
vldmia.f32 Y!, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
@@ -109,32 +109,32 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
nop
fldmias X, { s14 }
fldmias Y, { s15 }
vldmia.f32 X, { s14 }
vldmia.f32 Y, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s14 }
fldmias Y, { s15 }
vldmia.f32 X, { s14 }
vldmia.f32 Y, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s14 }
fldmias Y, { s15 }
vldmia.f32 X, { s14 }
vldmia.f32 Y, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
add X, X, INC_X
add Y, Y, INC_Y
fldmias X, { s14 }
fldmias Y, { s15 }
vldmia.f32 X, { s14 }
vldmia.f32 Y, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
@@ -146,8 +146,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X, { s14 }
fldmias Y, { s15 }
vldmia.f32 X, { s14 }
vldmia.f32 Y, { s15 }
vmul.f32 s15, s14, s15
vcvt.f64.f32 d4, s15
vadd.f64 d0 , d0, d4
@@ -162,12 +162,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
fldmias X!, { s8 - s9 }
fldmias Y!, { s4 - s5}
vldmia.f32 X!, { s8 - s9 }
vldmia.f32 Y!, { s4 - s5}
fmacs s0 , s4, s8
fldmias X!, { s10 - s11 }
vldmia.f32 X!, { s10 - s11 }
fmacs s1 , s5, s9
fldmias Y!, { s6 - s7 }
vldmia.f32 Y!, { s6 - s7 }
fmacs s0 , s6, s10
fmacs s1 , s7, s11
@@ -175,8 +175,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmias X!, { s4 }
fldmias Y!, { s8 }
vldmia.f32 X!, { s4 }
vldmia.f32 Y!, { s8 }
fmacs s0 , s4, s8
.endm
@@ -185,26 +185,26 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S4
nop
fldmias X, { s4 }
fldmias Y, { s8 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s8 }
add X, X, INC_X
add Y, Y, INC_Y
fmacs s0 , s4, s8
fldmias X, { s5 }
fldmias Y, { s9 }
vldmia.f32 X, { s5 }
vldmia.f32 Y, { s9 }
add X, X, INC_X
add Y, Y, INC_Y
fmacs s1 , s5, s9
fldmias X, { s6 }
fldmias Y, { s10 }
vldmia.f32 X, { s6 }
vldmia.f32 Y, { s10 }
add X, X, INC_X
add Y, Y, INC_Y
fmacs s0 , s6, s10
fldmias X, { s7 }
fldmias Y, { s11 }
vldmia.f32 X, { s7 }
vldmia.f32 Y, { s11 }
add X, X, INC_X
add Y, Y, INC_Y
fmacs s1 , s7, s11
@@ -214,8 +214,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmias X, { s4 }
fldmias Y, { s8 }
vldmia.f32 X, { s4 }
vldmia.f32 Y, { s8 }
add X, X, INC_X
fmacs s0 , s4, s8
add Y, Y, INC_Y

View File

@@ -112,8 +112,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL4x2_SUB
fldmias AO! , { s0 - s3 }
fldmias BO! , { s4 - s5 }
vldmia.f32 AO! , { s0 - s3 }
vldmia.f32 BO! , { s4 - s5 }
fmacs s8 , s0, s4
fmacs s9 , s1, s4

View File

@@ -136,29 +136,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL4x4_I
pld [ AO , #A_PRE ]
fldmias AO!, { s0 - s1 }
vldmia.f32 AO!, { s0 - s1 }
pld [ BO , #B_PRE ]
fldmias BO!, { s8 - s9 }
vldmia.f32 BO!, { s8 - s9 }
fmuls s16 , s0, s8
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmuls s17 , s1, s8
fmuls s18 , s2, s8
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmuls s19 , s3, s8
fmuls s20 , s0, s9
fldmias AO!, { s4 - s5 }
vldmia.f32 AO!, { s4 - s5 }
fmuls s21 , s1, s9
fmuls s22 , s2, s9
fldmias AO!, { s6 - s7 }
vldmia.f32 AO!, { s6 - s7 }
fmuls s23 , s3, s9
fmuls s24 , s0, s10
fldmias BO!, { s12 - s13 }
vldmia.f32 BO!, { s12 - s13 }
fmuls s25 , s1, s10
fmuls s26 , s2, s10
fldmias BO!, { s14 - s15 }
vldmia.f32 BO!, { s14 - s15 }
fmuls s27 , s3, s10
fmuls s28 , s0, s11
@@ -174,20 +174,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ AO , #A_PRE ]
fmacs s16 , s4, s12
fmacs s17 , s5, s12
fldmias AO!, { s0 - s3 }
vldmia.f32 AO!, { s0 - s3 }
fmacs s18 , s6, s12
pld [ BO , #B_PRE ]
fmacs s19 , s7, s12
fmacs s20 , s4, s13
fldmias BO!, { s8 - s11 }
vldmia.f32 BO!, { s8 - s11 }
fmacs s21 , s5, s13
fmacs s22 , s6, s13
//fldmias AO!, { s2 - s3 }
//vldmia.f32 AO!, { s2 - s3 }
fmacs s23 , s7, s13
fmacs s24 , s4, s14
//fldmias BO!, { s10 - s11 }
//vldmia.f32 BO!, { s10 - s11 }
fmacs s25 , s5, s14
fmacs s26 , s6, s14
fmacs s27 , s7, s14
@@ -203,17 +203,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL4x4_M1
fmacs s16 , s0, s8
fldmias AO!, { s4 - s7 }
vldmia.f32 AO!, { s4 - s7 }
fmacs s17 , s1, s8
fmacs s18 , s2, s8
fldmias BO!, { s12 - s15 }
//fldmias AO!, { s6 - s7 }
vldmia.f32 BO!, { s12 - s15 }
//vldmia.f32 AO!, { s6 - s7 }
fmacs s19 , s3, s8
fmacs s20 , s0, s9
fmacs s21 , s1, s9
fmacs s22 , s2, s9
//fldmias BO!, { s14 - s15 }
//vldmia.f32 BO!, { s14 - s15 }
fmacs s23 , s3, s9
fmacs s24 , s0, s10
@@ -300,7 +300,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0, ALPHA
add r4 , CO2, r3
fldmias CO1, { s8 - s11 }
vldmia.f32 CO1, { s8 - s11 }
fmacs s8 , s0 , s16
flds s12, [CO2]
@@ -322,7 +322,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ CO1 , #C_PRE ]
fldmias r4, { s8 - s11 }
vldmia.f32 r4, { s8 - s11 }
fmacs s8 , s0 , s24
fsts s12, [CO2]
@@ -338,7 +338,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add CO2, r4 , r3
fldmias CO2, { s12 - s15 }
vldmia.f32 CO2, { s12 - s15 }
fsts s8 , [r4 ]
fmacs s12, s0 , s28
@@ -350,7 +350,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fmacs s15, s0 , s31
pld [ r4 , #C_PRE ]
fstmias CO2, { s12 - s15 }
vstmia.f32 CO2, { s12 - s15 }
pld [ CO2 , #C_PRE ]
add CO1, CO1, #16

View File

@@ -73,7 +73,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s3 , [ AO2, #4 ]
add AO1, AO1, #8
fstmias BO!, { s0 - s3 }
vstmia.f32 BO!, { s0 - s3 }
add AO2, AO2, #8
.endm
@@ -85,7 +85,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s1 , [ AO2, #0 ]
add AO1, AO1, #4
fstmias BO!, { s0 - s1 }
vstmia.f32 BO!, { s0 - s1 }
add AO2, AO2, #4
.endm
@@ -95,7 +95,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0 , [ AO1, #0 ]
flds s1 , [ AO1, #4 ]
fstmias BO!, { s0 - s1 }
vstmia.f32 BO!, { s0 - s1 }
add AO1, AO1, #8
.endm
@@ -105,7 +105,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0 , [ AO1, #0 ]
fstmias BO!, { s0 }
vstmia.f32 BO!, { s0 }
add AO1, AO1, #4
.endm

View File

@@ -100,10 +100,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s11, [ AO4, #8 ]
flds s15, [ AO4, #12 ]
fstmias BO!, { s0 - s3 }
vstmia.f32 BO!, { s0 - s3 }
add AO4, AO4, #16
fstmias BO!, { s4 - s7 }
fstmias BO!, { s8 - s15 }
vstmia.f32 BO!, { s4 - s7 }
vstmia.f32 BO!, { s8 - s15 }
.endm
@@ -117,7 +117,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s3 , [ AO4, #0 ]
add AO3, AO3, #4
fstmias BO!, { s0 - s3 }
vstmia.f32 BO!, { s0 - s3 }
add AO4, AO4, #4
.endm
@@ -135,7 +135,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s5 , [ AO2, #8 ]
flds s7 , [ AO2, #12 ]
fstmias BO!, { s0 - s7 }
vstmia.f32 BO!, { s0 - s7 }
add AO2, AO2, #16
.endm
@@ -147,7 +147,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s1 , [ AO2, #0 ]
add AO1, AO1, #4
fstmias BO!, { s0 - s1 }
vstmia.f32 BO!, { s0 - s1 }
add AO2, AO2, #4
.endm
@@ -159,7 +159,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s2 , [ AO1, #8 ]
flds s3 , [ AO1, #12 ]
fstmias BO!, { s0 - s3 }
vstmia.f32 BO!, { s0 - s3 }
add AO1, AO1, #16
.endm
@@ -169,7 +169,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
flds s0 , [ AO1, #0 ]
fstmias BO!, { s0 }
vstmia.f32 BO!, { s0 }
add AO1, AO1, #4
.endm

View File

@@ -76,21 +76,21 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY4x4_1
pld [ AO1, #A_PRE ]
fldmias AO1, { s0 - s3 }
vldmia.f32 AO1, { s0 - s3 }
add r3, AO1, LDA
pld [ r3, #A_PRE ]
fldmias r3, { s4 - s7 }
vldmia.f32 r3, { s4 - s7 }
add r3, r3, LDA
pld [ r3, #A_PRE ]
fldmias r3, { s8 - s11 }
vldmia.f32 r3, { s8 - s11 }
add r3, r3, LDA
pld [ r3, #A_PRE ]
fldmias r3, { s12 - s15 }
vldmia.f32 r3, { s12 - s15 }
fstmias BO1, { s0 - s15 }
vstmia.f32 BO1, { s0 - s15 }
add AO1, AO1, #16
add BO1, BO1, M4
@@ -98,18 +98,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY4x4_2
fldmias AO1, { s0 - s3 }
vldmia.f32 AO1, { s0 - s3 }
add r3, AO1, LDA
fldmias r3, { s4 - s7 }
vldmia.f32 r3, { s4 - s7 }
add r3, r3, LDA
fldmias r3, { s8 - s11 }
vldmia.f32 r3, { s8 - s11 }
add r3, r3, LDA
fldmias r3, { s12 - s15 }
vldmia.f32 r3, { s12 - s15 }
fstmias BO1, { s0 - s15 }
vstmia.f32 BO1, { s0 - s15 }
add AO1, AO1, #16
add BO1, BO1, M4
@@ -118,18 +118,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY2x4
fldmias AO1, { s0 - s1 }
vldmia.f32 AO1, { s0 - s1 }
add r3, AO1, LDA
fldmias r3, { s2 - s3 }
vldmia.f32 r3, { s2 - s3 }
add r3, r3, LDA
fldmias r3, { s4 - s5 }
vldmia.f32 r3, { s4 - s5 }
add r3, r3, LDA
fldmias r3, { s6 - s7 }
vldmia.f32 r3, { s6 - s7 }
fstmias BO2, { s0 - s7 }
vstmia.f32 BO2, { s0 - s7 }
add AO1, AO1, #8
add BO2, BO2, #32
@@ -137,18 +137,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x4
fldmias AO1, { s0 }
vldmia.f32 AO1, { s0 }
add r3, AO1, LDA
fldmias r3, { s1 }
vldmia.f32 r3, { s1 }
add r3, r3, LDA
fldmias r3, { s2 }
vldmia.f32 r3, { s2 }
add r3, r3, LDA
fldmias r3, { s3 }
vldmia.f32 r3, { s3 }
fstmias BO3, { s0 - s3 }
vstmia.f32 BO3, { s0 - s3 }
add AO1, AO1, #4
add BO3, BO3, #16
@@ -158,12 +158,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY4x2
fldmias AO1, { s0 - s3 }
vldmia.f32 AO1, { s0 - s3 }
add r3, AO1, LDA
fldmias r3, { s4 - s7 }
vldmia.f32 r3, { s4 - s7 }
fstmias BO1, { s0 - s7 }
vstmia.f32 BO1, { s0 - s7 }
add AO1, AO1, #16
add BO1, BO1, M4
@@ -171,12 +171,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY2x2
fldmias AO1, { s0 - s1 }
vldmia.f32 AO1, { s0 - s1 }
add r3, AO1, LDA
fldmias r3, { s2 - s3 }
vldmia.f32 r3, { s2 - s3 }
fstmias BO2, { s0 - s3 }
vstmia.f32 BO2, { s0 - s3 }
add AO1, AO1, #8
add BO2, BO2, #16
@@ -184,12 +184,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x2
fldmias AO1, { s0 }
vldmia.f32 AO1, { s0 }
add r3, AO1, LDA
fldmias r3, { s1 }
vldmia.f32 r3, { s1 }
fstmias BO3, { s0 - s1 }
vstmia.f32 BO3, { s0 - s1 }
add AO1, AO1, #4
add BO3, BO3, #8
@@ -199,9 +199,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY4x1
fldmias AO1, { s0 - s3 }
vldmia.f32 AO1, { s0 - s3 }
fstmias BO1, { s0 - s3 }
vstmia.f32 BO1, { s0 - s3 }
add AO1, AO1, #16
add BO1, BO1, M4
@@ -209,9 +209,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY2x1
fldmias AO1, { s0 - s1 }
vldmia.f32 AO1, { s0 - s1 }
fstmias BO2, { s0 - s1 }
vstmia.f32 BO2, { s0 - s1 }
add AO1, AO1, #8
add BO2, BO2, #8
@@ -219,9 +219,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x1
fldmias AO1, { s0 }
vldmia.f32 AO1, { s0 }
fstmias BO3, { s0 }
vstmia.f32 BO3, { s0 }
add AO1, AO1, #4
add BO3, BO3, #4

View File

@@ -118,8 +118,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL4x2_SUB
fldmias AO!, { s0 - s3 }
fldmias BO!, { s4 - s5 }
vldmia.f32 AO!, { s0 - s3 }
vldmia.f32 BO!, { s4 - s5 }
fmacs s8 , s0, s4
fmacs s9 , s1, s4

View File

@@ -122,30 +122,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL4x4_I
fldmias AO!, { s0 - s1 }
vldmia.f32 AO!, { s0 - s1 }
pld [ AO , #A_PRE-8 ]
fldmias BO!, { s8 - s9 }
vldmia.f32 BO!, { s8 - s9 }
pld [ BO , #B_PRE-8 ]
fmuls s16 , s0, s8
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmuls s17 , s1, s8
fmuls s18 , s2, s8
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmuls s19 , s3, s8
fmuls s20 , s0, s9
fldmias AO!, { s4 - s5 }
vldmia.f32 AO!, { s4 - s5 }
fmuls s21 , s1, s9
fmuls s22 , s2, s9
fldmias AO!, { s6 - s7 }
vldmia.f32 AO!, { s6 - s7 }
fmuls s23 , s3, s9
fmuls s24 , s0, s10
fldmias BO!, { s12 - s13 }
vldmia.f32 BO!, { s12 - s13 }
fmuls s25 , s1, s10
fmuls s26 , s2, s10
fldmias BO!, { s14 - s15 }
vldmia.f32 BO!, { s14 - s15 }
fmuls s27 , s3, s10
fmuls s28 , s0, s11
@@ -161,20 +161,20 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ AO , #A_PRE ]
fmacs s16 , s4, s12
fmacs s17 , s5, s12
fldmias AO!, { s0 - s1 }
vldmia.f32 AO!, { s0 - s1 }
fmacs s18 , s6, s12
pld [ BO , #B_PRE ]
fmacs s19 , s7, s12
fmacs s20 , s4, s13
fldmias AO!, { s2 - s3 }
vldmia.f32 AO!, { s2 - s3 }
fmacs s21 , s5, s13
fmacs s22 , s6, s13
fldmias BO!, { s8 - s9 }
vldmia.f32 BO!, { s8 - s9 }
fmacs s23 , s7, s13
fmacs s24 , s4, s14
fldmias BO!, { s10 - s11 }
vldmia.f32 BO!, { s10 - s11 }
fmacs s25 , s5, s14
fmacs s26 , s6, s14
fmacs s27 , s7, s14
@@ -190,17 +190,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL4x4_M1
fmacs s16 , s0, s8
fldmias AO!, { s4 - s5 }
vldmia.f32 AO!, { s4 - s5 }
fmacs s17 , s1, s8
fmacs s18 , s2, s8
fldmias AO!, { s6 - s7 }
vldmia.f32 AO!, { s6 - s7 }
fmacs s19 , s3, s8
fmacs s20 , s0, s9
fldmias BO!, { s12 - s13 }
vldmia.f32 BO!, { s12 - s13 }
fmacs s21 , s1, s9
fmacs s22 , s2, s9
fldmias BO!, { s14 - s15 }
vldmia.f32 BO!, { s14 - s15 }
fmacs s23 , s3, s9
fmacs s24 , s0, s10
@@ -325,7 +325,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fsts s11, [r4 , #12 ]
fmuls s15, s0 , s31
fstmias CO2, { s12 - s15 }
vstmia.f32 CO2, { s12 - s15 }
add CO1, CO1, #16

View File

@@ -103,29 +103,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmiad X, { d0 - d3 }
fldmiad Y, { d4 - d7 }
fstmiad Y!, { d0 - d3 }
fstmiad X!, { d4 - d7}
vldmia.f64 X, { d0 - d3 }
vldmia.f64 Y, { d4 - d7 }
vstmia.f64 Y!, { d0 - d3 }
vstmia.f64 X!, { d4 - d7}
.endm
.macro KERNEL_F1
fldmiad X, { d0 }
fldmiad Y, { d4 }
fstmiad Y!, { d0 }
fstmiad X!, { d4 }
vldmia.f64 X, { d0 }
vldmia.f64 Y, { d4 }
vstmia.f64 Y!, { d0 }
vstmia.f64 X!, { d4 }
.endm
.macro KERNEL_S1
fldmiad X, { d0 }
fldmiad Y, { d4 }
fstmiad Y, { d0 }
fstmiad X, { d4 }
vldmia.f64 X, { d0 }
vldmia.f64 Y, { d4 }
vstmia.f64 Y, { d0 }
vstmia.f64 X, { d4 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -135,29 +135,29 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F4
fldmias X, { s0 - s3 }
fldmias Y, { s4 - s7 }
fstmias Y!, { s0 - s3 }
fstmias X!, { s4 - s7}
vldmia.f32 X, { s0 - s3 }
vldmia.f32 Y, { s4 - s7 }
vstmia.f32 Y!, { s0 - s3 }
vstmia.f32 X!, { s4 - s7}
.endm
.macro KERNEL_F1
fldmias X, { s0 }
fldmias Y, { s4 }
fstmias Y!, { s0 }
fstmias X!, { s4 }
vldmia.f32 X, { s0 }
vldmia.f32 Y, { s4 }
vstmia.f32 Y!, { s0 }
vstmia.f32 X!, { s4 }
.endm
.macro KERNEL_S1
fldmias X, { s0 }
fldmias Y, { s4 }
fstmias Y, { s0 }
fstmias X, { s4 }
vldmia.f32 X, { s0 }
vldmia.f32 Y, { s4 }
vstmia.f32 Y, { s0 }
vstmia.f32 X, { s4 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -174,35 +174,35 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmiad X, { d0 - d3 }
fldmiad Y, { d4 - d7 }
fstmiad Y!, { d0 - d3 }
fstmiad X!, { d4 - d7}
vldmia.f64 X, { d0 - d3 }
vldmia.f64 Y, { d4 - d7 }
vstmia.f64 Y!, { d0 - d3 }
vstmia.f64 X!, { d4 - d7}
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmiad X, { d0 - d3 }
fldmiad Y, { d4 - d7 }
fstmiad Y!, { d0 - d3 }
fstmiad X!, { d4 - d7}
vldmia.f64 X, { d0 - d3 }
vldmia.f64 Y, { d4 - d7 }
vstmia.f64 Y!, { d0 - d3 }
vstmia.f64 X!, { d4 - d7}
.endm
.macro KERNEL_F1
fldmiad X, { d0 - d1 }
fldmiad Y, { d4 - d5 }
fstmiad Y!, { d0 - d1 }
fstmiad X!, { d4 - d5 }
vldmia.f64 X, { d0 - d1 }
vldmia.f64 Y, { d4 - d5 }
vstmia.f64 Y!, { d0 - d1 }
vstmia.f64 X!, { d4 - d5 }
.endm
.macro KERNEL_S1
fldmiad X, { d0 - d1 }
fldmiad Y, { d4 - d5 }
fstmiad Y, { d0 - d1 }
fstmiad X, { d4 - d5 }
vldmia.f64 X, { d0 - d1 }
vldmia.f64 Y, { d4 - d5 }
vstmia.f64 Y, { d0 - d1 }
vstmia.f64 X, { d4 - d5 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -215,33 +215,33 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmias X, { s0 - s3 }
fldmias Y, { s4 - s7 }
fstmias Y!, { s0 - s3 }
fstmias X!, { s4 - s7}
vldmia.f32 X, { s0 - s3 }
vldmia.f32 Y, { s4 - s7 }
vstmia.f32 Y!, { s0 - s3 }
vstmia.f32 X!, { s4 - s7}
fldmias X, { s0 - s3 }
fldmias Y, { s4 - s7 }
fstmias Y!, { s0 - s3 }
fstmias X!, { s4 - s7}
vldmia.f32 X, { s0 - s3 }
vldmia.f32 Y, { s4 - s7 }
vstmia.f32 Y!, { s0 - s3 }
vstmia.f32 X!, { s4 - s7}
.endm
.macro KERNEL_F1
fldmias X, { s0 - s1 }
fldmias Y, { s4 - s5 }
fstmias Y!, { s0 - s1 }
fstmias X!, { s4 - s5 }
vldmia.f32 X, { s0 - s1 }
vldmia.f32 Y, { s4 - s5 }
vstmia.f32 Y!, { s0 - s1 }
vstmia.f32 X!, { s4 - s5 }
.endm
.macro KERNEL_S1
fldmias X, { s0 - s1 }
fldmias Y, { s4 - s5 }
fstmias Y, { s0 - s1 }
fstmias X, { s4 - s5 }
vldmia.f32 X, { s0 - s1 }
vldmia.f32 Y, { s4 - s5 }
vstmia.f32 Y, { s0 - s1 }
vstmia.f32 X, { s4 - s5 }
add X, X, INC_X
add Y, Y, INC_Y

View File

@@ -66,15 +66,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ X, #X_PRE+32 ]
fldmiad X!, { d0 - d7 }
fstmiad Y!, { d0 - d7 }
vldmia.f64 X!, { d0 - d7 }
vstmia.f64 Y!, { d0 - d7 }
.endm
.macro COPY_F1
fldmiad X!, { d0 - d1 }
fstmiad Y!, { d0 - d1 }
vldmia.f64 X!, { d0 - d1 }
vstmia.f64 Y!, { d0 - d1 }
.endm
@@ -84,23 +84,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S4
nop
fldmiad X, { d0 - d1 }
fstmiad Y, { d0 - d1 }
vldmia.f64 X, { d0 - d1 }
vstmia.f64 Y, { d0 - d1 }
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d2 - d3 }
fstmiad Y, { d2 - d3 }
vldmia.f64 X, { d2 - d3 }
vstmia.f64 Y, { d2 - d3 }
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d0 - d1 }
fstmiad Y, { d0 - d1 }
vldmia.f64 X, { d0 - d1 }
vstmia.f64 Y, { d0 - d1 }
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d2 - d3 }
fstmiad Y, { d2 - d3 }
vldmia.f64 X, { d2 - d3 }
vstmia.f64 Y, { d2 - d3 }
add X, X, INC_X
add Y, Y, INC_Y
@@ -109,8 +109,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY_S1
fldmiad X, { d0 - d1 }
fstmiad Y, { d0 - d1 }
vldmia.f64 X, { d0 - d1 }
vstmia.f64 Y, { d0 - d1 }
add X, X, INC_X
add Y, Y, INC_Y

View File

@@ -76,15 +76,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ X, #X_PRE ]
pld [ Y, #X_PRE ]
fldmiad X!, { d4 - d5 }
fldmiad Y!, { d8 - d9 }
vldmia.f64 X!, { d4 - d5 }
vldmia.f64 Y!, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fldmiad X!, { d6 - d7 }
vldmia.f64 X!, { d6 - d7 }
fmacd d2 , d5, d9
fmacd d3 , d5, d8
fldmiad Y!, { d10 - d11 }
vldmia.f64 Y!, { d10 - d11 }
fmacd d0 , d6, d10
fmacd d1 , d6, d11
pld [ X, #X_PRE ]
@@ -93,15 +93,15 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
pld [ Y, #X_PRE ]
fldmiad X!, { d4 - d5 }
fldmiad Y!, { d8 - d9 }
vldmia.f64 X!, { d4 - d5 }
vldmia.f64 Y!, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fldmiad X!, { d6 - d7 }
vldmia.f64 X!, { d6 - d7 }
fmacd d2 , d5, d9
fmacd d3 , d5, d8
fldmiad Y!, { d10 - d11 }
vldmia.f64 Y!, { d10 - d11 }
fmacd d0 , d6, d10
fmacd d1 , d6, d11
fmacd d2 , d7, d11
@@ -111,8 +111,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1
fldmiad X!, { d4 - d5 }
fldmiad Y!, { d8 - d9 }
vldmia.f64 X!, { d4 - d5 }
vldmia.f64 Y!, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fmacd d2 , d5, d9
@@ -127,8 +127,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
nop
fldmiad X, { d4 - d5 }
fldmiad Y, { d8 - d9 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fmacd d2 , d5, d9
@@ -136,8 +136,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d4 - d5 }
fldmiad Y, { d8 - d9 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fmacd d2 , d5, d9
@@ -145,8 +145,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d4 - d5 }
fldmiad Y, { d8 - d9 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fmacd d2 , d5, d9
@@ -154,8 +154,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
add X, X, INC_X
add Y, Y, INC_Y
fldmiad X, { d4 - d5 }
fldmiad Y, { d8 - d9 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fmacd d2 , d5, d9
@@ -168,8 +168,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1
fldmiad X, { d4 - d5 }
fldmiad Y, { d8 - d9 }
vldmia.f64 X, { d4 - d5 }
vldmia.f64 Y, { d8 - d9 }
fmacd d0 , d4, d8
fmacd d1 , d4, d9
fmacd d2 , d5, d9

View File

@@ -360,7 +360,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d7 }
vldmia.f64 CO1, { d4 - d7 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
@@ -372,9 +372,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d11
FMAC_I2 d7 , d1 , d10
fstmiad CO1, { d4 - d7 }
vstmia.f64 CO1, { d4 - d7 }
fldmiad CO2, { d4 - d7 }
vldmia.f64 CO2, { d4 - d7 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
@@ -386,7 +386,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d15
FMAC_I2 d7 , d1 , d14
fstmiad CO2, { d4 - d7 }
vstmia.f64 CO2, { d4 - d7 }
add CO1, CO1, #32
@@ -543,23 +543,23 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d5 }
vldmia.f64 CO1, { d4 - d5 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
FMAC_R2 d4 , d1 , d9
FMAC_I2 d5 , d1 , d8
fstmiad CO1, { d4 - d5 }
vstmia.f64 CO1, { d4 - d5 }
fldmiad CO2, { d4 - d5 }
vldmia.f64 CO2, { d4 - d5 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
FMAC_R2 d4 , d1 , d13
FMAC_I2 d5 , d1 , d12
fstmiad CO2, { d4 - d5 }
vstmia.f64 CO2, { d4 - d5 }
add CO1, CO1, #16
@@ -714,7 +714,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d7 }
vldmia.f64 CO1, { d4 - d7 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
@@ -726,7 +726,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d11
FMAC_I2 d7 , d1 , d10
fstmiad CO1, { d4 - d7 }
vstmia.f64 CO1, { d4 - d7 }
add CO1, CO1, #32
@@ -843,14 +843,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d5 }
vldmia.f64 CO1, { d4 - d5 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
FMAC_R2 d4 , d1 , d9
FMAC_I2 d5 , d1 , d8
fstmiad CO1, { d4 - d5 }
vstmia.f64 CO1, { d4 - d5 }
add CO1, CO1, #16

View File

@@ -374,8 +374,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d7 }
fldmiad CO2, { d8 - d11 }
vldmia.f64 CO1, { d4 - d7 }
vldmia.f64 CO2, { d8 - d11 }
FADD_R d16, d24 , d16
FADD_I d17, d25 , d17
@@ -406,8 +406,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d10, d1 , d23
FMAC_I2 d11, d1 , d22
fstmiad CO1, { d4 - d7 }
fstmiad CO2, { d8 - d11 }
vstmia.f64 CO1, { d4 - d7 }
vstmia.f64 CO2, { d8 - d11 }
add CO1, CO1, #32
@@ -570,8 +570,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d5 }
fldmiad CO2, { d8 - d9 }
vldmia.f64 CO1, { d4 - d5 }
vldmia.f64 CO2, { d8 - d9 }
FADD_R d16, d24 , d16
FADD_I d17, d25 , d17
@@ -588,8 +588,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d8 , d1 , d21
FMAC_I2 d9 , d1 , d20
fstmiad CO1, { d4 - d5 }
fstmiad CO2, { d8 - d9 }
vstmia.f64 CO1, { d4 - d5 }
vstmia.f64 CO2, { d8 - d9 }
add CO1, CO1, #16
@@ -752,7 +752,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d7 }
vldmia.f64 CO1, { d4 - d7 }
FADD_R d16, d24 , d16
FADD_I d17, d25 , d17
@@ -769,7 +769,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d19
FMAC_I2 d7 , d1 , d18
fstmiad CO1, { d4 - d7 }
vstmia.f64 CO1, { d4 - d7 }
add CO1, CO1, #32
@@ -887,7 +887,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad CO1, { d4 - d5 }
vldmia.f64 CO1, { d4 - d5 }
FADD_R d16, d24 , d16
FADD_I d17, d25 , d17
@@ -897,7 +897,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d4 , d1 , d17
FMAC_I2 d5 , d1 , d16
fstmiad CO1, { d4 - d5 }
vstmia.f64 CO1, { d4 - d5 }
add CO1, CO1, #16

View File

@@ -87,7 +87,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d6 , [ AO2, #16 ]
fldd d7 , [ AO2, #24 ]
fstmiad BO!, { d0 - d7 }
vstmia.f64 BO!, { d0 - d7 }
add AO2, AO2, #32
.endm
@@ -101,7 +101,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d3 , [ AO2, #8 ]
add AO1, AO1, #16
fstmiad BO!, { d0 - d3 }
vstmia.f64 BO!, { d0 - d3 }
add AO2, AO2, #16
.endm
@@ -113,7 +113,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d2 , [ AO1, #16 ]
fldd d3 , [ AO1, #24 ]
fstmiad BO!, { d0 - d3 }
vstmia.f64 BO!, { d0 - d3 }
add AO1, AO1, #32
.endm
@@ -124,7 +124,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0 , [ AO1, #0 ]
fldd d1 , [ AO1, #8 ]
fstmiad BO!, { d0 - d1 }
vstmia.f64 BO!, { d0 - d1 }
add AO1, AO1, #16
.endm

View File

@@ -74,13 +74,13 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY2x2
pld [ AO1, #A_PRE ]
fldmiad AO1, { d0 - d3 }
vldmia.f64 AO1, { d0 - d3 }
add r3, AO1, LDA
pld [ r3, #A_PRE ]
fldmiad r3, { d4 - d7 }
vldmia.f64 r3, { d4 - d7 }
fstmiad BO1, { d0 - d7 }
vstmia.f64 BO1, { d0 - d7 }
add AO1, AO1, #32
add BO1, BO1, M4
@@ -88,12 +88,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x2
fldmiad AO1, { d0 -d1 }
vldmia.f64 AO1, { d0 -d1 }
add r3, AO1, LDA
fldmiad r3, { d2 - d3 }
vldmia.f64 r3, { d2 - d3 }
fstmiad BO2, { d0 - d3 }
vstmia.f64 BO2, { d0 - d3 }
add AO1, AO1, #16
add BO2, BO2, #32
@@ -102,9 +102,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*************************************************************************************************************************/
.macro COPY2x1
fldmiad AO1, { d0 - d3 }
vldmia.f64 AO1, { d0 - d3 }
fstmiad BO1, { d0 - d3 }
vstmia.f64 BO1, { d0 - d3 }
add AO1, AO1, #32
add BO1, BO1, M4
@@ -112,9 +112,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro COPY1x1
fldmiad AO1, { d0 - d1 }
vldmia.f64 AO1, { d0 - d1 }
fstmiad BO2, { d0 - d1 }
vstmia.f64 BO2, { d0 - d1 }
add AO1, AO1, #16
add BO2, BO2, #16

View File

@@ -204,7 +204,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad YO, { d4 - d7 }
vldmia.f64 YO, { d4 - d7 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
@@ -216,9 +216,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d11
FMAC_I2 d7 , d1 , d10
fstmiad YO!, { d4 - d7 }
vstmia.f64 YO!, { d4 - d7 }
fldmiad YO, { d4 - d7 }
vldmia.f64 YO, { d4 - d7 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
@@ -230,7 +230,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d15
FMAC_I2 d7 , d1 , d14
fstmiad YO!, { d4 - d7 }
vstmia.f64 YO!, { d4 - d7 }
.endm
@@ -269,14 +269,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
FMAC_R2 d4 , d1 , d9
FMAC_I2 d5 , d1 , d8
fstmiad YO, { d4 - d5 }
vstmia.f64 YO, { d4 - d5 }
add YO, YO, #16
@@ -352,47 +352,47 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
FMAC_R2 d4 , d1 , d9
FMAC_I2 d5 , d1 , d8
fstmiad YO, { d4 - d5 }
vstmia.f64 YO, { d4 - d5 }
add YO, YO, INC_Y
fldmiad YO, { d6 - d7 }
vldmia.f64 YO, { d6 - d7 }
FMAC_R1 d6 , d0 , d10
FMAC_I1 d7 , d0 , d11
FMAC_R2 d6 , d1 , d11
FMAC_I2 d7 , d1 , d10
fstmiad YO, { d6 - d7 }
vstmia.f64 YO, { d6 - d7 }
add YO, YO, INC_Y
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
FMAC_R2 d4 , d1 , d13
FMAC_I2 d5 , d1 , d12
fstmiad YO, { d4 - d5 }
vstmia.f64 YO, { d4 - d5 }
add YO, YO, INC_Y
fldmiad YO, { d6 - d7 }
vldmia.f64 YO, { d6 - d7 }
FMAC_R1 d6 , d0 , d14
FMAC_I1 d7 , d0 , d15
FMAC_R2 d6 , d1 , d15
FMAC_I2 d7 , d1 , d14
fstmiad YO, { d6 - d7 }
vstmia.f64 YO, { d6 - d7 }
add YO, YO, INC_Y
@@ -433,14 +433,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
fldd d0, ALPHA_R
fldd d1, ALPHA_I
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
FMAC_R1 d4 , d0 , d8
FMAC_I1 d5 , d0 , d9
FMAC_R2 d4 , d1 , d9
FMAC_I2 d5 , d1 , d8
fstmiad YO, { d4 - d5 }
vstmia.f64 YO, { d4 - d5 }
add YO, YO, INC_Y

View File

@@ -151,12 +151,12 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F2X1
fldmiad XO! , { d2 - d3 }
fldmiad AO1!, { d4 - d5 }
vldmia.f64 XO! , { d2 - d3 }
vldmia.f64 AO1!, { d4 - d5 }
fmacd d12 , d4 , d2
fmacd d13 , d4 , d3
fldmiad AO2!, { d8 - d9 }
vldmia.f64 AO2!, { d8 - d9 }
KMAC_R d12 , d5 , d3
KMAC_I d13 , d5 , d2
@@ -169,7 +169,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F2
fldmiad YO, { d4 - d7 }
vldmia.f64 YO, { d4 - d7 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
@@ -181,7 +181,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d15
FMAC_I2 d7 , d1 , d14
fstmiad YO!, { d4 - d7 }
vstmia.f64 YO!, { d4 - d7 }
.endm
@@ -205,8 +205,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_F1X1
fldmiad XO! , { d2 - d3 }
fldmiad AO1!, { d4 - d5 }
vldmia.f64 XO! , { d2 - d3 }
vldmia.f64 AO1!, { d4 - d5 }
fmacd d12 , d4 , d2
fmacd d13 , d4 , d3
@@ -217,14 +217,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_F1
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
FMAC_R2 d4 , d1 , d13
FMAC_I2 d5 , d1 , d12
fstmiad YO!, { d4 - d5 }
vstmia.f64 YO!, { d4 - d5 }
.endm
@@ -250,9 +250,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S2X1
fldmiad XO , { d2 - d3 }
fldmiad AO1!, { d4 - d5 }
fldmiad AO2!, { d8 - d9 }
vldmia.f64 XO , { d2 - d3 }
vldmia.f64 AO1!, { d4 - d5 }
vldmia.f64 AO2!, { d8 - d9 }
fmacd d12 , d4 , d2
fmacd d13 , d4 , d3
@@ -270,25 +270,25 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S2
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
FMAC_R2 d4 , d1 , d13
FMAC_I2 d5 , d1 , d12
fstmiad YO, { d4 - d5 }
vstmia.f64 YO, { d4 - d5 }
add YO, YO, INC_Y
fldmiad YO, { d6 - d7 }
vldmia.f64 YO, { d6 - d7 }
FMAC_R1 d6 , d0 , d14
FMAC_I1 d7 , d0 , d15
FMAC_R2 d6 , d1 , d15
FMAC_I2 d7 , d1 , d14
fstmiad YO, { d6 - d7 }
vstmia.f64 YO, { d6 - d7 }
add YO, YO, INC_Y
@@ -314,8 +314,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro KERNEL_S1X1
fldmiad XO , { d2 - d3 }
fldmiad AO1!, { d4 - d5 }
vldmia.f64 XO , { d2 - d3 }
vldmia.f64 AO1!, { d4 - d5 }
fmacd d12 , d4 , d2
fmacd d13 , d4 , d3
@@ -328,14 +328,14 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.macro SAVE_S1
fldmiad YO, { d4 - d5 }
vldmia.f64 YO, { d4 - d5 }
FMAC_R1 d4 , d0 , d12
FMAC_I1 d5 , d0 , d13
FMAC_R2 d4 , d1 , d13
FMAC_I2 d5 , d1 , d12
fstmiad YO, { d4 - d5 }
vstmia.f64 YO, { d4 - d5 }
add YO, YO, INC_Y

View File

@@ -385,7 +385,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d11
FMAC_I2 d7 , d1 , d10
fstmiad CO1, { d4 - d7 }
vstmia.f64 CO1, { d4 - d7 }
fldd d4 , FP_ZERO
vmov.f64 d5 , d4
@@ -402,7 +402,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d15
FMAC_I2 d7 , d1 , d14
fstmiad CO2, { d4 - d7 }
vstmia.f64 CO2, { d4 - d7 }
add CO1, CO1, #32
@@ -567,7 +567,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d4 , d1 , d9
FMAC_I2 d5 , d1 , d8
fstmiad CO1, { d4 - d5 }
vstmia.f64 CO1, { d4 - d5 }
fldd d4 , FP_ZERO
vmov.f64 d5 , d4
@@ -577,7 +577,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d4 , d1 , d13
FMAC_I2 d5 , d1 , d12
fstmiad CO2, { d4 - d5 }
vstmia.f64 CO2, { d4 - d5 }
add CO1, CO1, #16
@@ -747,7 +747,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d11
FMAC_I2 d7 , d1 , d10
fstmiad CO1, { d4 - d7 }
vstmia.f64 CO1, { d4 - d7 }
add CO1, CO1, #32
@@ -872,7 +872,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d4 , d1 , d9
FMAC_I2 d5 , d1 , d8
fstmiad CO1, { d4 - d5 }
vstmia.f64 CO1, { d4 - d5 }
add CO1, CO1, #16

View File

@@ -391,8 +391,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d10, d1 , d23
FMAC_I2 d11, d1 , d22
fstmiad CO1, { d4 - d7 }
fstmiad CO2, { d8 - d11 }
vstmia.f64 CO1, { d4 - d7 }
vstmia.f64 CO2, { d8 - d11 }
add CO1, CO1, #32
@@ -569,8 +569,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d8 , d1 , d21
FMAC_I2 d9 , d1 , d20
fstmiad CO1, { d4 - d5 }
fstmiad CO2, { d8 - d9 }
vstmia.f64 CO1, { d4 - d5 }
vstmia.f64 CO2, { d8 - d9 }
add CO1, CO1, #16
@@ -747,7 +747,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d6 , d1 , d19
FMAC_I2 d7 , d1 , d18
fstmiad CO1, { d4 - d7 }
vstmia.f64 CO1, { d4 - d7 }
add CO1, CO1, #32
@@ -872,7 +872,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FMAC_R2 d4 , d1 , d17
FMAC_I2 d5 , d1 , d16
fstmiad CO1, { d4 - d5 }
vstmia.f64 CO1, { d4 - d5 }
add CO1, CO1, #16

View File

@@ -1,17 +1,17 @@
ifndef SNRM2KERNEL
SNRM2KERNEL = nrm2.c
SNRM2KERNEL = ../arm/nrm2.c
endif
ifndef DNRM2KERNEL
DNRM2KERNEL = nrm2.c
DNRM2KERNEL = ../arm/nrm2.c
endif
ifndef CNRM2KERNEL
CNRM2KERNEL = znrm2.c
CNRM2KERNEL = ../arm/znrm2.c
endif
ifndef ZNRM2KERNEL
ZNRM2KERNEL = znrm2.c
ZNRM2KERNEL = ../arm/znrm2.c
endif
ifndef SCABS_KERNEL

View File

@@ -1,8 +1,3 @@
SAMAXKERNEL = amax.S
DAMAXKERNEL = amax.S
CAMAXKERNEL = zamax.S
ZAMAXKERNEL = zamax.S
SAMINKERNEL = ../arm/amin.c
DAMINKERNEL = ../arm/amin.c
CAMINKERNEL = ../arm/zamin.c
@@ -14,11 +9,6 @@ DMAXKERNEL = ../arm/max.c
SMINKERNEL = ../arm/min.c
DMINKERNEL = ../arm/min.c
ISAMAXKERNEL = iamax.S
IDAMAXKERNEL = iamax.S
ICAMAXKERNEL = izamax.S
IZAMAXKERNEL = izamax.S
ISAMINKERNEL = ../arm/iamin.c
IDAMINKERNEL = ../arm/iamin.c
ICAMINKERNEL = ../arm/izamin.c
@@ -30,86 +20,6 @@ IDMAXKERNEL = ../arm/imax.c
ISMINKERNEL = ../arm/imin.c
IDMINKERNEL = ../arm/imin.c
SASUMKERNEL = asum.S
DASUMKERNEL = asum.S
CASUMKERNEL = casum.S
ZASUMKERNEL = zasum.S
SAXPYKERNEL = axpy.S
DAXPYKERNEL = axpy.S
CAXPYKERNEL = zaxpy.S
ZAXPYKERNEL = zaxpy.S
SCOPYKERNEL = copy.S
DCOPYKERNEL = copy.S
CCOPYKERNEL = copy.S
ZCOPYKERNEL = copy.S
SDOTKERNEL = dot.S
DDOTKERNEL = dot.S
CDOTKERNEL = zdot.S
ZDOTKERNEL = zdot.S
DSDOTKERNEL = dot.S
SNRM2KERNEL = nrm2.S
DNRM2KERNEL = nrm2.S
CNRM2KERNEL = znrm2.S
ZNRM2KERNEL = znrm2.S
SROTKERNEL = rot.S
DROTKERNEL = rot.S
CROTKERNEL = zrot.S
ZROTKERNEL = zrot.S
SSCALKERNEL = scal.S
DSCALKERNEL = scal.S
CSCALKERNEL = zscal.S
ZSCALKERNEL = zscal.S
SSWAPKERNEL = swap.S
DSWAPKERNEL = swap.S
CSWAPKERNEL = swap.S
ZSWAPKERNEL = swap.S
SGEMVNKERNEL = gemv_n.S
DGEMVNKERNEL = gemv_n.S
CGEMVNKERNEL = zgemv_n.S
ZGEMVNKERNEL = zgemv_n.S
SGEMVTKERNEL = gemv_t.S
DGEMVTKERNEL = gemv_t.S
CGEMVTKERNEL = zgemv_t.S
ZGEMVTKERNEL = zgemv_t.S
STRMMKERNEL = ../generic/trmmkernel_4x4.c
DTRMMKERNEL = ../generic/trmmkernel_2x2.c
CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
SGEMMKERNEL = sgemm_kernel_4x4.S
SGEMMONCOPY = ../generic/gemm_ncopy_4.c
SGEMMOTCOPY = ../generic/gemm_tcopy_4.c
SGEMMONCOPYOBJ = sgemm_oncopy.o
SGEMMOTCOPYOBJ = sgemm_otcopy.o
DGEMMKERNEL = ../generic/gemmkernel_2x2.c
DGEMMONCOPY = ../generic/gemm_ncopy_2.c
DGEMMOTCOPY = ../generic/gemm_tcopy_2.c
DGEMMONCOPYOBJ = dgemm_oncopy.o
DGEMMOTCOPYOBJ = dgemm_otcopy.o
CGEMMKERNEL = ../generic/zgemmkernel_2x2.c
CGEMMONCOPY = ../generic/zgemm_ncopy_2.c
CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
CGEMMONCOPYOBJ = cgemm_oncopy.o
CGEMMOTCOPYOBJ = cgemm_otcopy.o
ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c
ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c
ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
ZGEMMONCOPYOBJ = zgemm_oncopy.o
ZGEMMOTCOPYOBJ = zgemm_otcopy.o
STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
@@ -130,6 +40,167 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
SAMAXKERNEL = amax.S
DAMAXKERNEL = amax.S
CAMAXKERNEL = zamax.S
ZAMAXKERNEL = zamax.S
SAXPYKERNEL = axpy.S
DAXPYKERNEL = axpy.S
CAXPYKERNEL = zaxpy.S
ZAXPYKERNEL = zaxpy.S
SROTKERNEL = rot.S
DROTKERNEL = rot.S
CROTKERNEL = zrot.S
ZROTKERNEL = zrot.S
SSCALKERNEL = scal.S
DSCALKERNEL = scal.S
CSCALKERNEL = zscal.S
ZSCALKERNEL = zscal.S
SGEMVNKERNEL = gemv_n.S
DGEMVNKERNEL = gemv_n.S
CGEMVNKERNEL = zgemv_n.S
ZGEMVNKERNEL = zgemv_n.S
SGEMVTKERNEL = gemv_t.S
DGEMVTKERNEL = gemv_t.S
CGEMVTKERNEL = zgemv_t.S
ZGEMVTKERNEL = zgemv_t.S
SASUMKERNEL = asum.S
DASUMKERNEL = asum.S
CASUMKERNEL = casum.S
ZASUMKERNEL = zasum.S
SCOPYKERNEL = copy.S
DCOPYKERNEL = copy.S
CCOPYKERNEL = copy.S
ZCOPYKERNEL = copy.S
SSWAPKERNEL = swap.S
DSWAPKERNEL = swap.S
CSWAPKERNEL = swap.S
ZSWAPKERNEL = swap.S
ISAMAXKERNEL = iamax.S
IDAMAXKERNEL = iamax.S
ICAMAXKERNEL = izamax.S
IZAMAXKERNEL = izamax.S
ifneq ($(OS_DARWIN)$(CROSS),11)
SNRM2KERNEL = nrm2.S
DNRM2KERNEL = nrm2.S
CNRM2KERNEL = znrm2.S
ZNRM2KERNEL = znrm2.S
endif
DDOTKERNEL = dot.S
SDOTKERNEL = dot.S
CDOTKERNEL = zdot.S
ZDOTKERNEL = zdot.S
DSDOTKERNEL = dot.S
ifeq ($(OS_DARWIN)$(CROSS),11)
STRMMKERNEL = ../generic/trmmkernel_2x2.c
DTRMMKERNEL = ../generic/trmmkernel_2x2.c
CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
SGEMMKERNEL = ../generic/gemmkernel_2x2.c
SGEMMONCOPY = ../generic/gemm_ncopy_2.c
SGEMMOTCOPY = ../generic/gemm_tcopy_2.c
SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX)
SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
DGEMMKERNEL = ../generic/gemmkernel_2x2.c
DGEMMONCOPY = ../generic/gemm_ncopy_2.c
DGEMMOTCOPY = ../generic/gemm_tcopy_2.c
DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
CGEMMKERNEL = ../generic/zgemmkernel_2x2.c
CGEMMONCOPY = ../generic/zgemm_ncopy_2.c
CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c
ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c
ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)
else
SGEMMKERNEL = sgemm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S
STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S
ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N))
SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c
SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c
SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX)
SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c
SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c
SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX)
SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
DGEMMKERNEL = dgemm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S
DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S
ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N))
ifeq ($(DGEMM_UNROLL_M), 8)
DGEMMINCOPY = dgemm_ncopy_$(DGEMM_UNROLL_M).S
DGEMMITCOPY = dgemm_tcopy_$(DGEMM_UNROLL_M).S
else
DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c
DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c
endif
DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX)
DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
ifeq ($(DGEMM_UNROLL_N), 4)
DGEMMONCOPY = dgemm_ncopy_$(DGEMM_UNROLL_N).S
DGEMMOTCOPY = dgemm_tcopy_$(DGEMM_UNROLL_N).S
else
DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c
DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c
endif
DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
CGEMMKERNEL = cgemm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S
CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S
ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N))
CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c
CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c
CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX)
CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c
CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c
CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
ZGEMMKERNEL = zgemm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S
ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S
ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N))
ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c
ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c
ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX)
ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c
ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c
ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)
endif

View File

@@ -0,0 +1,3 @@
include $(KERNELDIR)/KERNEL.ARMV8

View File

@@ -1,4 +1,49 @@
include $(KERNELDIR)/KERNEL.ARMV8
SAMINKERNEL = ../arm/amin.c
DAMINKERNEL = ../arm/amin.c
CAMINKERNEL = ../arm/zamin.c
ZAMINKERNEL = ../arm/zamin.c
SMAXKERNEL = ../arm/max.c
DMAXKERNEL = ../arm/max.c
SMINKERNEL = ../arm/min.c
DMINKERNEL = ../arm/min.c
ISAMINKERNEL = ../arm/iamin.c
IDAMINKERNEL = ../arm/iamin.c
ICAMINKERNEL = ../arm/izamin.c
IZAMINKERNEL = ../arm/izamin.c
ISMAXKERNEL = ../arm/imax.c
IDMAXKERNEL = ../arm/imax.c
ISMINKERNEL = ../arm/imin.c
IDMINKERNEL = ../arm/imin.c
STRMMKERNEL = ../generic/trmmkernel_4x4.c
DTRMMKERNEL = ../generic/trmmkernel_2x2.c
CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
SAMAXKERNEL = amax.S
DAMAXKERNEL = amax.S
@@ -66,13 +111,13 @@ STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S
ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N))
SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c
SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c
SGEMMINCOPYOBJ = sgemm_incopy.o
SGEMMITCOPYOBJ = sgemm_itcopy.o
SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX)
SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c
SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c
SGEMMONCOPYOBJ = sgemm_oncopy.o
SGEMMOTCOPYOBJ = sgemm_otcopy.o
SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX)
SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
DGEMMKERNEL = dgemm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S
DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S
@@ -87,8 +132,8 @@ DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c
DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c
endif
DGEMMINCOPYOBJ = dgemm_incopy.o
DGEMMITCOPYOBJ = dgemm_itcopy.o
DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX)
DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
ifeq ($(DGEMM_UNROLL_N), 4)
@@ -99,32 +144,32 @@ DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c
DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c
endif
DGEMMONCOPYOBJ = dgemm_oncopy.o
DGEMMOTCOPYOBJ = dgemm_otcopy.o
DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
CGEMMKERNEL = cgemm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S
CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S
ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N))
CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c
CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c
CGEMMINCOPYOBJ = cgemm_incopy.o
CGEMMITCOPYOBJ = cgemm_itcopy.o
CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX)
CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c
CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c
CGEMMONCOPYOBJ = cgemm_oncopy.o
CGEMMOTCOPYOBJ = cgemm_otcopy.o
CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
ZGEMMKERNEL = zgemm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S
ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S
ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N))
ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c
ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c
ZGEMMINCOPYOBJ = zgemm_incopy.o
ZGEMMITCOPYOBJ = zgemm_itcopy.o
ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX)
ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c
ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c
ZGEMMONCOPYOBJ = zgemm_oncopy.o
ZGEMMOTCOPYOBJ = zgemm_otcopy.o
ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)

View File

@@ -0,0 +1,3 @@
include $(KERNELDIR)/KERNEL.CORTEXA57

View File

@@ -0,0 +1,3 @@
include $(KERNELDIR)/KERNEL.CORTEXA57

View File

@@ -0,0 +1,3 @@
include $(KERNELDIR)/KERNEL.CORTEXA57

View File

@@ -1,6 +1,133 @@
include $(KERNELDIR)/KERNEL.ARMV8
SAMAXKERNEL = amax.S
DAMAXKERNEL = amax.S
CAMAXKERNEL = zamax.S
ZAMAXKERNEL = zamax.S
SAMINKERNEL = ../arm/amin.c
DAMINKERNEL = ../arm/amin.c
CAMINKERNEL = ../arm/zamin.c
ZAMINKERNEL = ../arm/zamin.c
SMAXKERNEL = ../arm/max.c
DMAXKERNEL = ../arm/max.c
SMINKERNEL = ../arm/min.c
DMINKERNEL = ../arm/min.c
ISAMAXKERNEL = iamax.S
IDAMAXKERNEL = iamax.S
ICAMAXKERNEL = izamax.S
IZAMAXKERNEL = izamax.S
ISAMINKERNEL = ../arm/iamin.c
IDAMINKERNEL = ../arm/iamin.c
ICAMINKERNEL = ../arm/izamin.c
IZAMINKERNEL = ../arm/izamin.c
ISMAXKERNEL = ../arm/imax.c
IDMAXKERNEL = ../arm/imax.c
ISMINKERNEL = ../arm/imin.c
IDMINKERNEL = ../arm/imin.c
SASUMKERNEL = asum.S
DASUMKERNEL = asum.S
CASUMKERNEL = casum.S
ZASUMKERNEL = zasum.S
SAXPYKERNEL = axpy.S
DAXPYKERNEL = daxpy_thunderx.c
CAXPYKERNEL = zaxpy.S
ZAXPYKERNEL = zaxpy.S
SCOPYKERNEL = copy.S
DCOPYKERNEL = copy.S
CCOPYKERNEL = copy.S
ZCOPYKERNEL = copy.S
SDOTKERNEL = dot_thunderx.c
DDOTKERNEL = ddot_thunderx.c
CDOTKERNEL = zdot.S
ZDOTKERNEL = zdot.S
DSDOTKERNEL = dot.S
SNRM2KERNEL = nrm2.S
DNRM2KERNEL = nrm2.S
CNRM2KERNEL = znrm2.S
ZNRM2KERNEL = znrm2.S
SROTKERNEL = rot.S
DROTKERNEL = rot.S
CROTKERNEL = zrot.S
ZROTKERNEL = zrot.S
SSCALKERNEL = scal.S
DSCALKERNEL = scal.S
CSCALKERNEL = zscal.S
ZSCALKERNEL = zscal.S
SSWAPKERNEL = swap.S
DSWAPKERNEL = swap.S
CSWAPKERNEL = swap.S
ZSWAPKERNEL = swap.S
SGEMVNKERNEL = gemv_n.S
DGEMVNKERNEL = gemv_n.S
CGEMVNKERNEL = zgemv_n.S
ZGEMVNKERNEL = zgemv_n.S
SGEMVTKERNEL = gemv_t.S
DGEMVTKERNEL = gemv_t.S
CGEMVTKERNEL = zgemv_t.S
ZGEMVTKERNEL = zgemv_t.S
STRMMKERNEL = ../generic/trmmkernel_4x4.c
DTRMMKERNEL = ../generic/trmmkernel_2x2.c
CTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
ZTRMMKERNEL = ../generic/ztrmmkernel_2x2.c
SGEMMKERNEL = sgemm_kernel_4x4.S
SGEMMONCOPY = ../generic/gemm_ncopy_4.c
SGEMMOTCOPY = ../generic/gemm_tcopy_4.c
SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX)
SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
DGEMMKERNEL = ../generic/gemmkernel_2x2.c
DGEMMONCOPY = ../generic/gemm_ncopy_2.c
DGEMMOTCOPY = ../generic/gemm_tcopy_2.c
DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
CGEMMKERNEL = ../generic/zgemmkernel_2x2.c
CGEMMONCOPY = ../generic/zgemm_ncopy_2.c
CGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
ZGEMMKERNEL = ../generic/zgemmkernel_2x2.c
ZGEMMONCOPY = ../generic/zgemm_ncopy_2.c
ZGEMMOTCOPY = ../generic/zgemm_tcopy_2.c
ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)
STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
SDOTKERNEL=dot_thunderx.c
DDOTKERNEL=ddot_thunderx.c
DAXPYKERNEL=daxpy_thunderx.c

View File

@@ -1,4 +1,137 @@
include $(KERNELDIR)/KERNEL.CORTEXA57
SAMINKERNEL = ../arm/amin.c
DAMINKERNEL = ../arm/amin.c
CAMINKERNEL = ../arm/zamin.c
ZAMINKERNEL = ../arm/zamin.c
SMAXKERNEL = ../arm/max.c
DMAXKERNEL = ../arm/max.c
SMINKERNEL = ../arm/min.c
DMINKERNEL = ../arm/min.c
ISAMINKERNEL = ../arm/iamin.c
IDAMINKERNEL = ../arm/iamin.c
ICAMINKERNEL = ../arm/izamin.c
IZAMINKERNEL = ../arm/izamin.c
ISMAXKERNEL = ../arm/imax.c
IDMAXKERNEL = ../arm/imax.c
ISMINKERNEL = ../arm/imin.c
IDMINKERNEL = ../arm/imin.c
STRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
STRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
STRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
STRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
DTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
DTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
DTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
DTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
CTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
CTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
CTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
CTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
ZTRSMKERNEL_LN = ../generic/trsm_kernel_LN.c
ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
SAMAXKERNEL = amax.S
DAMAXKERNEL = amax.S
CAMAXKERNEL = zamax.S
ZAMAXKERNEL = zamax.S
SAXPYKERNEL = axpy.S
DAXPYKERNEL = daxpy_thunderx2t99.S
CAXPYKERNEL = zaxpy.S
ZAXPYKERNEL = zaxpy.S
SROTKERNEL = rot.S
DROTKERNEL = rot.S
CROTKERNEL = zrot.S
ZROTKERNEL = zrot.S
SSCALKERNEL = scal.S
DSCALKERNEL = scal.S
CSCALKERNEL = zscal.S
ZSCALKERNEL = zscal.S
SGEMVNKERNEL = gemv_n.S
DGEMVNKERNEL = gemv_n.S
CGEMVNKERNEL = zgemv_n.S
ZGEMVNKERNEL = zgemv_n.S
SGEMVTKERNEL = gemv_t.S
DGEMVTKERNEL = gemv_t.S
CGEMVTKERNEL = zgemv_t.S
ZGEMVTKERNEL = zgemv_t.S
STRMMKERNEL = strmm_kernel_$(SGEMM_UNROLL_M)x$(SGEMM_UNROLL_N).S
ifneq ($(SGEMM_UNROLL_M), $(SGEMM_UNROLL_N))
SGEMMINCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_M).c
SGEMMITCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_M).c
SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX)
SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
SGEMMONCOPY = ../generic/gemm_ncopy_$(SGEMM_UNROLL_N).c
SGEMMOTCOPY = ../generic/gemm_tcopy_$(SGEMM_UNROLL_N).c
SGEMMONCOPYOBJ = sgemm_oncopy$(TSUFFIX).$(SUFFIX)
SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
DTRMMKERNEL = dtrmm_kernel_$(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N).S
ifneq ($(DGEMM_UNROLL_M), $(DGEMM_UNROLL_N))
ifeq ($(DGEMM_UNROLL_M), 8)
DGEMMINCOPY = dgemm_ncopy_$(DGEMM_UNROLL_M).S
DGEMMITCOPY = dgemm_tcopy_$(DGEMM_UNROLL_M).S
else
DGEMMINCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_M).c
DGEMMITCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_M).c
endif
DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX)
DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
ifeq ($(DGEMM_UNROLL_N), 4)
DGEMMONCOPY = dgemm_ncopy_$(DGEMM_UNROLL_N).S
DGEMMOTCOPY = dgemm_tcopy_$(DGEMM_UNROLL_N).S
else
DGEMMONCOPY = ../generic/gemm_ncopy_$(DGEMM_UNROLL_N).c
DGEMMOTCOPY = ../generic/gemm_tcopy_$(DGEMM_UNROLL_N).c
endif
DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
CTRMMKERNEL = ctrmm_kernel_$(CGEMM_UNROLL_M)x$(CGEMM_UNROLL_N).S
ifneq ($(CGEMM_UNROLL_M), $(CGEMM_UNROLL_N))
CGEMMINCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_M).c
CGEMMITCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_M).c
CGEMMINCOPYOBJ = cgemm_incopy$(TSUFFIX).$(SUFFIX)
CGEMMITCOPYOBJ = cgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
CGEMMONCOPY = ../generic/zgemm_ncopy_$(CGEMM_UNROLL_N).c
CGEMMOTCOPY = ../generic/zgemm_tcopy_$(CGEMM_UNROLL_N).c
CGEMMONCOPYOBJ = cgemm_oncopy$(TSUFFIX).$(SUFFIX)
CGEMMOTCOPYOBJ = cgemm_otcopy$(TSUFFIX).$(SUFFIX)
ZTRMMKERNEL = ztrmm_kernel_$(ZGEMM_UNROLL_M)x$(ZGEMM_UNROLL_N).S
ifneq ($(ZGEMM_UNROLL_M), $(ZGEMM_UNROLL_N))
ZGEMMINCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_M).c
ZGEMMITCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_M).c
ZGEMMINCOPYOBJ = zgemm_incopy$(TSUFFIX).$(SUFFIX)
ZGEMMITCOPYOBJ = zgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
ZGEMMONCOPY = ../generic/zgemm_ncopy_$(ZGEMM_UNROLL_N).c
ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c
ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)
SASUMKERNEL = sasum_thunderx2t99.c
DASUMKERNEL = dasum_thunderx2t99.c
@@ -27,12 +160,12 @@ CNRM2KERNEL = scnrm2_thunderx2t99.c
DNRM2KERNEL = dznrm2_thunderx2t99.c
ZNRM2KERNEL = dznrm2_thunderx2t99.c
DAXPYKERNEL = daxpy_thunderx2t99.S
DDOTKERNEL = dot_thunderx2t99.c
SDOTKERNEL = dot_thunderx2t99.c
CDOTKERNEL = zdot_thunderx2t99.c
ZDOTKERNEL = zdot_thunderx2t99.c
DSDOTKERNEL = dot.S
ifeq ($(DGEMM_UNROLL_M)x$(DGEMM_UNROLL_N), 8x4)
DGEMMKERNEL = dgemm_kernel_8x4_thunderx2t99.S

View File

@@ -1,3 +0,0 @@
include $(KERNELDIR)/KERNEL.THUNDERX2T99

View File

@@ -1 +0,0 @@
include $(KERNELDIR)/KERNEL.ARMV8

View File

@@ -943,13 +943,9 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
prfm PLDL1KEEP, [origPB]
prfm PLDL1KEEP, [origPA]
ldr A_PRE_SIZE, =dgemm_prefetch_size_a
ldr A_PRE_SIZE, [A_PRE_SIZE]
ldr B_PRE_SIZE, =dgemm_prefetch_size_b
ldr B_PRE_SIZE, [B_PRE_SIZE]
ldr C_PRE_SIZE, =dgemm_prefetch_size_c
ldr C_PRE_SIZE, [C_PRE_SIZE]
mov A_PRE_SIZE, #3584
mov B_PRE_SIZE, #512
mov C_PRE_SIZE, #128
add A_PRE_SIZE_64, A_PRE_SIZE, #64
add B_PRE_SIZE_64, B_PRE_SIZE, #64

View File

@@ -661,7 +661,7 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON
b[ 9] = ZERO;
b[ 10] = ZERO;
b[ 11] = ZERO;
b[ 11] = ZERO;
b[ 12] = ZERO;
b[ 13] = ZERO;
b[ 14] = ZERO;
b[ 15] = ZERO;

View File

@@ -45,7 +45,7 @@ BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
while(i < n)
{
if( x[ix] > minf )
if( x[ix] < minf )
{
min = i;
minf = x[ix];

View File

@@ -1,12 +1,13 @@
CAXPYKERNEL = ../mips/zaxpy.c
ZAXPYKERNEL = ../mips/zaxpy.c
SROTKERNEL = ../mips/rot.c
DROTKERNEL = ../mips/rot.c
CROTKERNEL = ../mips/zrot.c
ZROTKERNEL = ../mips/zrot.c
SROTKERNEL = ../mips/rot.c
DROTKERNEL = ../mips/rot.c
CROTKERNEL = ../mips/zrot.c
ZROTKERNEL = ../mips/zrot.c
CSWAPKERNEL = ../mips/zswap.c
ZSWAPKERNEL = ../mips/zswap.c
ifndef SNRM2KERNEL
SNRM2KERNEL = snrm2.S
endif

View File

@@ -63,6 +63,7 @@ ZTRSMKERNEL_LT = ../generic/trsm_kernel_LT.c
ZTRSMKERNEL_RN = ../generic/trsm_kernel_RN.c
ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
DSDOTKERNEL = ../mips/dot.c

View File

@@ -270,6 +270,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.align 5
.L20:
beqz INCY, .L27
dsra I, N, 3
move YY, Y
@@ -450,5 +451,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
j $31
NOP
.align 3
.L27:
LD b1, 0 * SIZE(Y)
.L28:
daddiu N, N, -1
LD a1, 0 * SIZE(X)
daddu X, X, INCX
bgtz N, .L28
MADD b1, b1, ALPHA, a1
j .L999
ST b1, 0 * SIZE(Y)
EPILOGUE

View File

@@ -562,6 +562,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//INCX!=1 or INCY != 1
.L20:
beq INCY, $0, .L27
dsra I, N, 3
move YY, Y
@@ -754,5 +755,18 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
j $31
NOP
.align 3
.L27:
LD b1, 0 * SIZE(Y)
.L28:
daddiu N, N, -1
LD a1, 0 * SIZE(X)
daddu X, X, INCX
bgtz N, .L28
MADD b1, b1, ALPHA, a1
j .L999
ST b1, 0 * SIZE(Y)
EPILOGUE

View File

@@ -146,11 +146,11 @@
sd $21, 40($sp)
sd $22, 48($sp)
ST $f24, 56($sp)
ST $f25, 64($sp)
ST $f26, 72($sp)
ST $f27, 80($sp)
ST $f28, 88($sp)
sdc1 $f24, 56($sp)
sdc1 $f25, 64($sp)
sdc1 $f26, 72($sp)
sdc1 $f27, 80($sp)
sdc1 $f28, 88($sp)
#if defined(TRMMKERNEL)
sd $23, 96($sp)
@@ -161,10 +161,10 @@
#endif
#ifndef __64BIT__
ST $f20,120($sp)
ST $f21,128($sp)
ST $f22,136($sp)
ST $f23,144($sp)
sdc1 $f20,120($sp)
sdc1 $f21,128($sp)
sdc1 $f22,136($sp)
sdc1 $f23,144($sp)
#endif
.align 4
@@ -7766,11 +7766,11 @@
ld $21, 40($sp)
ld $22, 48($sp)
LD $f24, 56($sp)
LD $f25, 64($sp)
LD $f26, 72($sp)
LD $f27, 80($sp)
LD $f28, 88($sp)
ldc1 $f24, 56($sp)
ldc1 $f25, 64($sp)
ldc1 $f26, 72($sp)
ldc1 $f27, 80($sp)
ldc1 $f28, 88($sp)
#if defined(TRMMKERNEL)
ld $23, 96($sp)
@@ -7779,10 +7779,10 @@
#endif
#ifndef __64BIT__
LD $f20,120($sp)
LD $f21,128($sp)
LD $f22,136($sp)
LD $f23,144($sp)
ldc1 $f20,120($sp)
ldc1 $f21,128($sp)
ldc1 $f22,136($sp)
ldc1 $f23,144($sp)
#endif
daddiu $sp,$sp,STACKSIZE

View File

@@ -89,14 +89,14 @@ ZTRSMKERNEL_RT = ../generic/trsm_kernel_RT.c
#SMINKERNEL = ../arm/min.c
#DMINKERNEL = ../arm/min.c
#
#ISAMAXKERNEL = ../arm/iamax.c
ISAMAXKERNEL = isamax.c
IDAMAXKERNEL = idamax.c
#ICAMAXKERNEL = ../arm/izamax.c
IZAMAXKERNEL = izamax.c
ICAMAXKERNEL = icamax.c
IZAMAXKERNEL = izamax.c
#
#ISAMINKERNEL = ../arm/iamin.c
IDAMINKERNEL = idamin.c
#ICAMINKERNEL = ../arm/izamin.c
ISAMINKERNEL = isamin.c
IDAMINKERNEL = idamin.c
ICAMINKERNEL = icamin.c
IZAMINKERNEL = izamin.c
#
#ISMAXKERNEL = ../arm/imax.c
@@ -110,9 +110,9 @@ DASUMKERNEL = dasum.c
CASUMKERNEL = casum.c
ZASUMKERNEL = zasum.c
#
#SAXPYKERNEL = ../arm/axpy.c
SAXPYKERNEL = saxpy.c
DAXPYKERNEL = daxpy.c
#CAXPYKERNEL = ../arm/zaxpy.c
CAXPYKERNEL = caxpy.c
ZAXPYKERNEL = zaxpy.c
#
SCOPYKERNEL = scopy.c
@@ -123,7 +123,7 @@ ZCOPYKERNEL = zcopy.c
SDOTKERNEL = sdot.c
DDOTKERNEL = ddot.c
DSDOTKERNEL = sdot.c
#CDOTKERNEL = ../arm/zdot.c
CDOTKERNEL = cdot.c
ZDOTKERNEL = zdot.c
#
SNRM2KERNEL = ../arm/nrm2.c
@@ -133,7 +133,7 @@ ZNRM2KERNEL = ../arm/znrm2.c
#
SROTKERNEL = srot.c
DROTKERNEL = drot.c
CROTKERNEL = zrot.c
CROTKERNEL = crot.c
ZROTKERNEL = zrot.c
#
SSCALKERNEL = sscal.c

145
kernel/power/caxpy.c Normal file
View File

@@ -0,0 +1,145 @@
/*
Copyright (c) 2013-2018, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#ifndef HAVE_ASM_KERNEL
#include <altivec.h>
static void caxpy_kernel_16(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT alpha_r, FLOAT alpha_i)
{
#if ( !defined(CONJ) && !defined(XCONJ) ) || ( defined(CONJ) && defined(XCONJ) )
register __vector float valpha_r = {alpha_r, alpha_r,alpha_r, alpha_r};
register __vector float valpha_i = {-alpha_i, alpha_i,-alpha_i, alpha_i};
#else
register __vector float valpha_r = {alpha_r, -alpha_r,alpha_r, -alpha_r};
register __vector float valpha_i = {alpha_i, alpha_i,alpha_i, alpha_i};
#endif
__vector unsigned char swap_mask = { 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11};
register __vector float *vy = (__vector float *) y;
register __vector float *vx = (__vector float *) x;
BLASLONG i=0;
for (; i < n/2; i += 8) {
register __vector float vy_0 = vy[i];
register __vector float vy_1 = vy[i + 1];
register __vector float vy_2 = vy[i + 2];
register __vector float vy_3 = vy[i + 3];
register __vector float vy_4 = vy[i + 4];
register __vector float vy_5 = vy[i + 5];
register __vector float vy_6 = vy[i + 6];
register __vector float vy_7 = vy[i + 7];
register __vector float vx_0 = vx[i];
register __vector float vx_1 = vx[i + 1];
register __vector float vx_2 = vx[i + 2];
register __vector float vx_3 = vx[i + 3];
register __vector float vx_4 = vx[i + 4];
register __vector float vx_5 = vx[i + 5];
register __vector float vx_6 = vx[i + 6];
register __vector float vx_7 = vx[i + 7];
vy_0 += vx_0*valpha_r;
vy_1 += vx_1*valpha_r;
vy_2 += vx_2*valpha_r;
vy_3 += vx_3*valpha_r;
vy_4 += vx_4*valpha_r;
vy_5 += vx_5*valpha_r;
vy_6 += vx_6*valpha_r;
vy_7 += vx_7*valpha_r;
vx_0 = vec_perm(vx_0, vx_0, swap_mask);
vx_1 = vec_perm(vx_1, vx_1, swap_mask);
vx_2 = vec_perm(vx_2, vx_2, swap_mask);
vx_3 = vec_perm(vx_3, vx_3, swap_mask);
vx_4 = vec_perm(vx_4, vx_4, swap_mask);
vx_5 = vec_perm(vx_5, vx_5, swap_mask);
vx_6 = vec_perm(vx_6, vx_6, swap_mask);
vx_7 = vec_perm(vx_7, vx_7, swap_mask);
vy_0 += vx_0*valpha_i;
vy_1 += vx_1*valpha_i;
vy_2 += vx_2*valpha_i;
vy_3 += vx_3*valpha_i;
vy_4 += vx_4*valpha_i;
vy_5 += vx_5*valpha_i;
vy_6 += vx_6*valpha_i;
vy_7 += vx_7*valpha_i;
vy[i] = vy_0;
vy[i + 1] = vy_1;
vy[i + 2] = vy_2;
vy[i + 3] = vy_3;
vy[i + 4] = vy_4;
vy[i + 5] = vy_5 ;
vy[i + 6] = vy_6 ;
vy[i + 7] = vy_7 ;
}
}
#endif
int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da_r, FLOAT da_i, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2) {
BLASLONG i = 0;
BLASLONG ix = 0, iy = 0;
if (n <= 0) return (0);
if ((inc_x == 1) && (inc_y == 1)) {
BLASLONG n1 = n & -16;
if (n1) {
caxpy_kernel_16(n1, x, y, da_r,da_i);
ix = 2 * n1;
}
i = n1;
while (i < n) {
#if !defined(CONJ)
y[ix] += (da_r * x[ix] - da_i * x[ix + 1]);
y[ix + 1] += (da_r * x[ix + 1] + da_i * x[ix]);
#else
y[ix] += (da_r * x[ix] + da_i * x[ix + 1]);
y[ix + 1] -= (da_r * x[ix + 1] - da_i * x[ix]);
#endif
i++;
ix += 2;
}
return (0);
}
inc_x *= 2;
inc_y *= 2;
while (i < n) {
#if !defined(CONJ)
y[iy] += (da_r * x[ix] - da_i * x[ix + 1]);
y[iy + 1] += (da_r * x[ix + 1] + da_i * x[ix]);
#else
y[iy] += (da_r * x[ix] + da_i * x[ix + 1]);
y[iy + 1] -= (da_r * x[ix + 1] - da_i * x[ix]);
#endif
ix += inc_x;
iy += inc_y;
i++;
}
return (0);
}

164
kernel/power/cdot.c Normal file
View File

@@ -0,0 +1,164 @@
/*Copyright (c) 2013-201\n8, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#ifndef HAVE_KERNEL_8
#include <altivec.h>
static void cdot_kernel_8(BLASLONG n, FLOAT *x, FLOAT *y, float *dot)
{
__vector unsigned char swap_mask = { 4,5,6,7,0,1,2,3, 12,13,14,15, 8,9,10,11};
register __vector float *vy = (__vector float *) y;
register __vector float *vx = (__vector float *) x;
BLASLONG i = 0;
register __vector float vd_0 = { 0 };
register __vector float vd_1 = { 0 };
register __vector float vd_2 = { 0 };
register __vector float vd_3 = { 0 };
register __vector float vdd_0 = { 0 };
register __vector float vdd_1 = { 0 };
register __vector float vdd_2 = { 0 };
register __vector float vdd_3 = { 0 };
for (; i < n/2; i += 4) {
register __vector float vyy_0 ;
register __vector float vyy_1 ;
register __vector float vyy_2 ;
register __vector float vyy_3 ;
register __vector float vy_0 = vy[i];
register __vector float vy_1 = vy[i + 1];
register __vector float vy_2 = vy[i + 2];
register __vector float vy_3 = vy[i + 3];
register __vector float vx_0= vx[i];
register __vector float vx_1 = vx[i + 1];
register __vector float vx_2 = vx[i + 2];
register __vector float vx_3 = vx[i + 3];
vyy_0 = vec_perm(vy_0, vy_0, swap_mask);
vyy_1 = vec_perm(vy_1, vy_1, swap_mask);
vyy_2 = vec_perm(vy_2, vy_2, swap_mask);
vyy_3 = vec_perm(vy_3, vy_3, swap_mask);
vd_0 += vx_0 * vy_0;
vd_1 += vx_1 * vy_1;
vd_2 += vx_2 * vy_2;
vd_3 += vx_3 * vy_3;
vdd_0 += vx_0 * vyy_0;
vdd_1 += vx_1 * vyy_1;
vdd_2 += vx_2 * vyy_2;
vdd_3 += vx_3 * vyy_3;
}
//aggregate
vd_0 = vd_0 + vd_1 +vd_2 +vd_3;
vdd_0= vdd_0 + vdd_1 +vdd_2 +vdd_3;
//reverse and aggregate
vd_1=vec_xxpermdi(vd_0,vd_0,2) ;
vdd_1=vec_xxpermdi(vdd_0,vdd_0,2);
vd_2=vd_0+vd_1;
vdd_2=vdd_0+vdd_1;
dot[0]=vd_2[0];
dot[1]=vd_2[1];
dot[2]=vdd_2[0];
dot[3]=vdd_2[1];
}
#endif
OPENBLAS_COMPLEX_FLOAT CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y) {
BLASLONG i = 0;
BLASLONG ix=0, iy=0;
OPENBLAS_COMPLEX_FLOAT result;
FLOAT dot[4] __attribute__ ((aligned(16))) = {0.0, 0.0, 0.0, 0.0};
if (n <= 0) {
CREAL(result) = 0.0;
CIMAG(result) = 0.0;
return (result);
}
if ((inc_x == 1) && (inc_y == 1)) {
BLASLONG n1 = n & -8;
BLASLONG j=0;
if (n1){
cdot_kernel_8(n1, x, y, dot);
i = n1;
j = n1 <<1;
}
while (i < n) {
dot[0] += x[j] * y[j];
dot[1] += x[j + 1] * y[j + 1];
dot[2] += x[j] * y[j + 1];
dot[3] += x[j + 1] * y[j];
j += 2;
i++;
}
} else {
i = 0;
ix = 0;
iy = 0;
inc_x <<= 1;
inc_y <<= 1;
while (i < n) {
dot[0] += x[ix] * y[iy];
dot[1] += x[ix + 1] * y[iy + 1];
dot[2] += x[ix] * y[iy + 1];
dot[3] += x[ix + 1] * y[iy];
ix += inc_x;
iy += inc_y;
i++;
}
}
#if !defined(CONJ)
CREAL(result) = dot[0] - dot[1];
CIMAG(result) = dot[2] + dot[3];
#else
CREAL(result) = dot[0] + dot[1];
CIMAG(result) = dot[2] - dot[3];
#endif
return (result);
}

231
kernel/power/crot.c Normal file
View File

@@ -0,0 +1,231 @@
/***************************************************************************
Copyright (c) 2013-2018, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#if defined(POWER8)
static void crot_kernel_8 (long n, float *x, float *y, float c, float s)
{
__vector float t0;
__vector float t1;
__vector float t2;
__vector float t3;
__vector float t4;
__vector float t5;
__vector float t6;
__vector float t7;
__asm__
(
"xscvdpspn 36, %x[cos] \n\t" // load c to all words
"xxspltw 36, 36, 0 \n\t"
"xscvdpspn 37, %x[sin] \n\t" // load s to all words
"xxspltw 37, 37, 0 \n\t"
"lxvd2x 32, 0, %[x_ptr] \n\t" // load x
"lxvd2x 33, %[i16], %[x_ptr] \n\t"
"lxvd2x 34, %[i32], %[x_ptr] \n\t"
"lxvd2x 35, %[i48], %[x_ptr] \n\t"
"lxvd2x 48, 0, %[y_ptr] \n\t" // load y
"lxvd2x 49, %[i16], %[y_ptr] \n\t"
"lxvd2x 50, %[i32], %[y_ptr] \n\t"
"lxvd2x 51, %[i48], %[y_ptr] \n\t"
"addi %[x_ptr], %[x_ptr], 64 \n\t"
"addi %[y_ptr], %[y_ptr], 64 \n\t"
"addic. %[temp_n], %[temp_n], -8 \n\t"
"ble 2f \n\t"
".p2align 5 \n\t"
"1: \n\t"
"xvmulsp 40, 32, 36 \n\t" // c * x
"xvmulsp 41, 33, 36 \n\t"
"xvmulsp 42, 34, 36 \n\t"
"xvmulsp 43, 35, 36 \n\t"
"xvmulsp %x[x0], 48, 36 \n\t" // c * y
"xvmulsp %x[x2], 49, 36 \n\t"
"xvmulsp %x[x1], 50, 36 \n\t"
"xvmulsp %x[x3], 51, 36 \n\t"
"xvmulsp 44, 32, 37 \n\t" // s * x
"xvmulsp 45, 33, 37 \n\t"
"lxvd2x 32, 0, %[x_ptr] \n\t" // load x
"lxvd2x 33, %[i16], %[x_ptr] \n\t"
"xvmulsp 46, 34, 37 \n\t"
"xvmulsp 47, 35, 37 \n\t"
"lxvd2x 34, %[i32], %[x_ptr] \n\t"
"lxvd2x 35, %[i48], %[x_ptr] \n\t"
"xvmulsp %x[x4], 48, 37 \n\t" // s * y
"xvmulsp %x[x5], 49, 37 \n\t"
"lxvd2x 48, 0, %[y_ptr] \n\t" // load y
"lxvd2x 49, %[i16], %[y_ptr] \n\t"
"xvmulsp %x[x6], 50, 37 \n\t"
"xvmulsp %x[x7], 51, 37 \n\t"
"lxvd2x 50, %[i32], %[y_ptr] \n\t"
"lxvd2x 51, %[i48], %[y_ptr] \n\t"
"xvaddsp 40, 40, %x[x4] \n\t" // c * x + s * y
"xvaddsp 41, 41, %x[x5] \n\t" // c * x + s * y
"addi %[x_ptr], %[x_ptr], -64 \n\t"
"addi %[y_ptr], %[y_ptr], -64 \n\t"
"xvaddsp 42, 42, %x[x6] \n\t" // c * x + s * y
"xvaddsp 43, 43, %x[x7] \n\t" // c * x + s * y
"xvsubsp %x[x0], %x[x0], 44 \n\t" // c * y - s * x
"xvsubsp %x[x2], %x[x2], 45 \n\t" // c * y - s * x
"xvsubsp %x[x1], %x[x1], 46 \n\t" // c * y - s * x
"xvsubsp %x[x3], %x[x3], 47 \n\t" // c * y - s * x
"stxvd2x 40, 0, %[x_ptr] \n\t" // store x
"stxvd2x 41, %[i16], %[x_ptr] \n\t"
"stxvd2x 42, %[i32], %[x_ptr] \n\t"
"stxvd2x 43, %[i48], %[x_ptr] \n\t"
"stxvd2x %x[x0], 0, %[y_ptr] \n\t" // store y
"stxvd2x %x[x2], %[i16], %[y_ptr] \n\t"
"stxvd2x %x[x1], %[i32], %[y_ptr] \n\t"
"stxvd2x %x[x3], %[i48], %[y_ptr] \n\t"
"addi %[x_ptr], %[x_ptr], 128 \n\t"
"addi %[y_ptr], %[y_ptr], 128 \n\t"
"addic. %[temp_n], %[temp_n], -8 \n\t"
"bgt 1b \n\t"
"2: \n\t"
"xvmulsp 40, 32, 36 \n\t" // c * x
"xvmulsp 41, 33, 36 \n\t"
"xvmulsp 42, 34, 36 \n\t"
"xvmulsp 43, 35, 36 \n\t"
"xvmulsp %x[x0], 48, 36 \n\t" // c * y
"xvmulsp %x[x2], 49, 36 \n\t"
"xvmulsp %x[x1], 50, 36 \n\t"
"xvmulsp %x[x3], 51, 36 \n\t"
"xvmulsp 44, 32, 37 \n\t" // s * x
"xvmulsp 45, 33, 37 \n\t"
"xvmulsp 46, 34, 37 \n\t"
"xvmulsp 47, 35, 37 \n\t"
"xvmulsp %x[x4], 48, 37 \n\t" // s * y
"xvmulsp %x[x5], 49, 37 \n\t"
"xvmulsp %x[x6], 50, 37 \n\t"
"xvmulsp %x[x7], 51, 37 \n\t"
"addi %[x_ptr], %[x_ptr], -64 \n\t"
"addi %[y_ptr], %[y_ptr], -64 \n\t"
"xvaddsp 40, 40, %x[x4] \n\t" // c * x + s * y
"xvaddsp 41, 41, %x[x5] \n\t" // c * x + s * y
"xvaddsp 42, 42, %x[x6] \n\t" // c * x + s * y
"xvaddsp 43, 43, %x[x7] \n\t" // c * x + s * y
"xvsubsp %x[x0], %x[x0], 44 \n\t" // c * y - s * x
"xvsubsp %x[x2], %x[x2], 45 \n\t" // c * y - s * x
"xvsubsp %x[x1], %x[x1], 46 \n\t" // c * y - s * x
"xvsubsp %x[x3], %x[x3], 47 \n\t" // c * y - s * x
"stxvd2x 40, 0, %[x_ptr] \n\t" // store x
"stxvd2x 41, %[i16], %[x_ptr] \n\t"
"stxvd2x 42, %[i32], %[x_ptr] \n\t"
"stxvd2x 43, %[i48], %[x_ptr] \n\t"
"stxvd2x %x[x0], 0, %[y_ptr] \n\t" // store y
"stxvd2x %x[x2], %[i16], %[y_ptr] \n\t"
"stxvd2x %x[x1], %[i32], %[y_ptr] \n\t"
"stxvd2x %x[x3], %[i48], %[y_ptr] "
:
[mem_x] "+m" (*(float (*)[2*n])x),
[mem_y] "+m" (*(float (*)[2*n])y),
[temp_n] "+r" (n),
[x_ptr] "+&b" (x),
[y_ptr] "+&b" (y),
[x0] "=wa" (t0),
[x1] "=wa" (t2),
[x2] "=wa" (t1),
[x3] "=wa" (t3),
[x4] "=wa" (t4),
[x5] "=wa" (t5),
[x6] "=wa" (t6),
[x7] "=wa" (t7)
:
[cos] "f" (c),
[sin] "f" (s),
[i16] "b" (16),
[i32] "b" (32),
[i48] "b" (48)
:
"cr0",
"vs32","vs33","vs34","vs35","vs36","vs37",
"vs40","vs41","vs42","vs43","vs44","vs45","vs46","vs47",
"vs48","vs49","vs50","vs51"
);
}
#endif
int CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT c, FLOAT s)
{
BLASLONG i=0;
BLASLONG ix=0,iy=0;
FLOAT temp[2];
BLASLONG inc_x2;
BLASLONG inc_y2;
if ( n <= 0 ) return(0);
if ( (inc_x == 1) && (inc_y == 1) )
{
BLASLONG n1 = n & -8;
if ( n1 > 0 )
{
crot_kernel_8(n1, x, y, c, s);
i=n1;
ix=2*n1;
}
while(i < n)
{
temp[0] = c*x[ix] + s*y[ix] ;
temp[1] = c*x[ix+1] + s*y[ix+1] ;
y[ix] = c*y[ix] - s*x[ix] ;
y[ix+1] = c*y[ix+1] - s*x[ix+1] ;
x[ix] = temp[0] ;
x[ix+1] = temp[1] ;
ix += 2 ;
i++ ;
}
}
else
{
inc_x2 = 2 * inc_x ;
inc_y2 = 2 * inc_y ;
while(i < n)
{
temp[0] = c*x[ix] + s*y[iy] ;
temp[1] = c*x[ix+1] + s*y[iy+1] ;
y[iy] = c*y[iy] - s*x[ix] ;
y[iy+1] = c*y[iy+1] - s*x[ix+1] ;
x[ix] = temp[0] ;
x[ix+1] = temp[1] ;
ix += inc_x2 ;
iy += inc_y2 ;
i++ ;
}
}
return(0);
}

261
kernel/power/icamax.c Normal file
View File

@@ -0,0 +1,261 @@
/***************************************************************************
Copyright (c) 2019, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#include <math.h>
#include <altivec.h>
#if defined(DOUBLE)
#define ABS fabs
#else
#define ABS fabsf
#endif
#define CABS1(x,i) ABS(x[i])+ABS(x[i+1])
/**
* Find maximum index
* Warning: requirements n>0 and n % 32 == 0
* @param n
* @param x pointer to the vector
* @param maxf (out) maximum absolute value .( only for output )
* @return index
*/
static BLASLONG ciamax_kernel_32(BLASLONG n, FLOAT *x, FLOAT *maxf) {
BLASLONG index;
BLASLONG i;
register __vector unsigned int static_index0 = {0,1,2,3};
register __vector unsigned int temp0 = {4,4,4, 4}; //temporary vector register
register __vector unsigned int temp1= temp0<<1; //{8,8,8,8}
register __vector unsigned int static_index1=static_index0 +temp0;//{4,5,6,7};
register __vector unsigned int static_index2=static_index0 +temp1;//{8,9,10,11};
register __vector unsigned int static_index3=static_index1 +temp1; //{12,13,14,15};
temp0=vec_xor(temp0,temp0);
temp1=temp1 <<1 ; //{16,16,16,16}
register __vector unsigned int temp_add=temp1 <<1; //{32,32,32,32}
register __vector unsigned int quadruple_indices=temp0;//{0,0,0,0}
register __vector float quadruple_values={0,0,0,0};
register __vector float * v_ptrx=(__vector float *)x;
register __vector unsigned char real_pack_mask = { 0,1,2,3,8,9,10,11,16,17,18,19, 24,25,26,27};
register __vector unsigned char image_pack_mask= {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31};
for(; i<n; i+=32){
//absolute temporary complex vectors
register __vector float v0=vec_abs(v_ptrx[0]);
register __vector float v1=vec_abs(v_ptrx[1]);
register __vector float v2=vec_abs(v_ptrx[2]);
register __vector float v3=vec_abs(v_ptrx[3]);
register __vector float v4=vec_abs(v_ptrx[4]);
register __vector float v5=vec_abs(v_ptrx[5]);
register __vector float v6=vec_abs(v_ptrx[6]);
register __vector float v7=vec_abs(v_ptrx[7]);
//pack complex real and imaginary parts together to sum real+image
register __vector float t1=vec_perm(v0,v1,real_pack_mask);
register __vector float ti=vec_perm(v0,v1,image_pack_mask);
v0=t1+ti; //sum quadruple real with quadruple image
register __vector float t2=vec_perm(v2,v3,real_pack_mask);
register __vector float ti2=vec_perm(v2,v3,image_pack_mask);
v1=t2+ti2;
t1=vec_perm(v4,v5,real_pack_mask);
ti=vec_perm(v4,v5,image_pack_mask);
v2=t1+ti; //sum
t2=vec_perm(v6,v7,real_pack_mask);
ti2=vec_perm(v6,v7,image_pack_mask);
v3=t2+ti2;
// now we have 16 summed elements . lets compare them
v_ptrx+=8;
register __vector bool int r1=vec_cmpgt(v1,v0);
register __vector bool int r2=vec_cmpgt(v3,v2);
register __vector unsigned int ind2= vec_sel(static_index0,static_index1,r1);
v0=vec_sel(v0,v1,r1);
register __vector unsigned int ind3= vec_sel(static_index2,static_index3,r2);
v1=vec_sel(v2,v3,r2);
//final cmp and select index and value for first 16 values
r1=vec_cmpgt(v1,v0);
register __vector unsigned int indf0 = vec_sel(ind2,ind3,r1);
register __vector float vf0= vec_sel(v0,v1,r1);
//absolute temporary complex vectors
v0=vec_abs(v_ptrx[0]);
v1=vec_abs(v_ptrx[1]);
v2=vec_abs(v_ptrx[2]);
v3=vec_abs(v_ptrx[3]);
v4=vec_abs(v_ptrx[4]);
v5=vec_abs(v_ptrx[5]);
v6=vec_abs(v_ptrx[6]);
v7=vec_abs(v_ptrx[7]);
//pack complex real and imaginary parts together to sum real+image
t1=vec_perm(v0,v1,real_pack_mask);
ti=vec_perm(v0,v1,image_pack_mask);
v0=t1+ti; //sum quadruple real with quadruple image
t2=vec_perm(v2,v3,real_pack_mask);
ti2=vec_perm(v2,v3,image_pack_mask);
v1=t2+ti2;
t1=vec_perm(v4,v5,real_pack_mask);
ti=vec_perm(v4,v5,image_pack_mask);
v2=t1+ti; //sum
t2=vec_perm(v6,v7,real_pack_mask);
ti2=vec_perm(v6,v7,image_pack_mask);
v3=t2+ti2;
// now we have 16 summed elements {from 16 to 31} . lets compare them
v_ptrx+=8;
r1=vec_cmpgt(v1,v0);
r2=vec_cmpgt(v3,v2);
ind2= vec_sel(static_index0,static_index1,r1);
v0=vec_sel(v0,v1,r1);
ind3= vec_sel(static_index2,static_index3,r2);
v1=vec_sel(v2,v3,r2);
//final cmp and select index and value for the second 16 values
r1=vec_cmpgt(v1,v0);
register __vector unsigned int indv0 = vec_sel(ind2,ind3,r1);
register __vector float vv0= vec_sel(v0,v1,r1);
indv0+=temp1; //make index from 16->31
//find final quadruple from 32 elements
r2=vec_cmpgt(vv0,vf0);
ind2 = vec_sel( indf0,indv0,r2);
vv0= vec_sel(vf0,vv0,r2);
//get asbolute index
ind2+=temp0;
//compare with old quadruple and update
r1=vec_cmpgt(vv0,quadruple_values);
quadruple_indices = vec_sel( quadruple_indices,ind2,r1);
quadruple_values= vec_sel(quadruple_values,vv0,r1);
temp0+=temp_add;
}
//now we have to chose from 4 values and 4 different indices
// we will compare pairwise if pairs are exactly the same we will choose minimum between index
// otherwise we will assign index of the maximum value
float a1,a2,a3,a4;
unsigned int i1,i2,i3,i4;
a1=vec_extract(quadruple_values,0);
a2=vec_extract(quadruple_values,1);
a3=vec_extract(quadruple_values,2);
a4=vec_extract(quadruple_values,3);
i1=vec_extract(quadruple_indices,0);
i2=vec_extract(quadruple_indices,1);
i3=vec_extract(quadruple_indices,2);
i4=vec_extract(quadruple_indices,3);
if(a1==a2){
index=i1>i2?i2:i1;
}else if(a2>a1){
index=i2;
a1=a2;
}else{
index= i1;
}
if(a4==a3){
i1=i3>i4?i4:i3;
}else if(a4>a3){
i1=i4;
a3=a4;
}else{
i1= i3;
}
if(a1==a3){
index=i1>index?index:i1;
*maxf=a1;
}else if(a3>a1){
index=i1;
*maxf=a3;
}else{
*maxf=a1;
}
return index;
}
BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
{
BLASLONG i = 0;
BLASLONG ix = 0;
FLOAT maxf = 0;
BLASLONG max = 0;
BLASLONG inc_x2;
if (n <= 0 || inc_x <= 0) return(max);
if (inc_x == 1) {
BLASLONG n1 = n & -32;
if (n1 > 0) {
max = ciamax_kernel_32(n1, x, &maxf);
i = n1;
ix = n1 << 1;
}
while(i < n)
{
if( CABS1(x,ix) > maxf )
{
max = i;
maxf = CABS1(x,ix);
}
ix += 2;
i++;
}
return (max + 1);
} else {
inc_x2 = 2 * inc_x;
maxf = CABS1(x,0);
ix += inc_x2;
i++;
while(i < n)
{
if( CABS1(x,ix) > maxf )
{
max = i;
maxf = CABS1(x,ix);
}
ix += inc_x2;
i++;
}
return (max + 1);
}
}

266
kernel/power/icamin.c Normal file
View File

@@ -0,0 +1,266 @@
/***************************************************************************
Copyright (c) 2019, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#include <math.h>
#include <altivec.h>
#if defined(DOUBLE)
#define ABS fabs
#else
#define ABS fabsf
#endif
#define CABS1(x,i) ABS(x[i])+ABS(x[i+1])
/**
* Find minimum index
* Warning: requirements n>0 and n % 32 == 0
* @param n
* @param x pointer to the vector
* @param minf (out) minimum absolute value .( only for output )
* @return index
*/
static BLASLONG ciamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
BLASLONG index;
BLASLONG i;
register __vector unsigned int static_index0 = {0,1,2,3};
register __vector unsigned int temp0 = {4,4,4, 4}; //temporary vector register
register __vector unsigned int temp1= temp0<<1; //{8,8,8,8}
register __vector unsigned int static_index1=static_index0 +temp0;//{4,5,6,7};
register __vector unsigned int static_index2=static_index0 +temp1;//{8,9,10,11};
register __vector unsigned int static_index3=static_index1 +temp1; //{12,13,14,15};
temp0=vec_xor(temp0,temp0);
temp1=temp1 <<1 ; //{16,16,16,16}
register __vector unsigned int temp_add=temp1 <<1; //{32,32,32,32}
register __vector unsigned int quadruple_indices=temp0;//{0,0,0,0}
float first_min=CABS1(x,0);
register __vector float quadruple_values={first_min,first_min,first_min,first_min};
register __vector float * v_ptrx=(__vector float *)x;
register __vector unsigned char real_pack_mask = { 0,1,2,3,8,9,10,11,16,17,18,19, 24,25,26,27};
register __vector unsigned char image_pack_mask= {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31};
for(; i<n; i+=32){
//absolute temporary complex vectors
register __vector float v0=vec_abs(v_ptrx[0]);
register __vector float v1=vec_abs(v_ptrx[1]);
register __vector float v2=vec_abs(v_ptrx[2]);
register __vector float v3=vec_abs(v_ptrx[3]);
register __vector float v4=vec_abs(v_ptrx[4]);
register __vector float v5=vec_abs(v_ptrx[5]);
register __vector float v6=vec_abs(v_ptrx[6]);
register __vector float v7=vec_abs(v_ptrx[7]);
//pack complex real and imaginary parts together to sum real+image
register __vector float t1=vec_perm(v0,v1,real_pack_mask);
register __vector float ti=vec_perm(v0,v1,image_pack_mask);
v0=t1+ti; //sum quadruple real with quadruple image
register __vector float t2=vec_perm(v2,v3,real_pack_mask);
register __vector float ti2=vec_perm(v2,v3,image_pack_mask);
v1=t2+ti2;
t1=vec_perm(v4,v5,real_pack_mask);
ti=vec_perm(v4,v5,image_pack_mask);
v2=t1+ti; //sum
t2=vec_perm(v6,v7,real_pack_mask);
ti2=vec_perm(v6,v7,image_pack_mask);
v3=t2+ti2;
// now we have 16 summed elements . lets compare them
v_ptrx+=8;
register __vector bool int r1=vec_cmpgt(v0,v1);
register __vector bool int r2=vec_cmpgt(v2,v3);
register __vector unsigned int ind2= vec_sel(static_index0,static_index1,r1);
v0=vec_sel(v0,v1,r1);
register __vector unsigned int ind3= vec_sel(static_index2,static_index3,r2);
v1=vec_sel(v2,v3,r2);
//final cmp and select index and value for first 16 values
r1=vec_cmpgt(v0,v1);
register __vector unsigned int indf0 = vec_sel(ind2,ind3,r1);
register __vector float vf0= vec_sel(v0,v1,r1);
//absolute temporary complex vectors
v0=vec_abs(v_ptrx[0]);
v1=vec_abs(v_ptrx[1]);
v2=vec_abs(v_ptrx[2]);
v3=vec_abs(v_ptrx[3]);
v4=vec_abs(v_ptrx[4]);
v5=vec_abs(v_ptrx[5]);
v6=vec_abs(v_ptrx[6]);
v7=vec_abs(v_ptrx[7]);
//pack complex real and imaginary parts together to sum real+image
t1=vec_perm(v0,v1,real_pack_mask);
ti=vec_perm(v0,v1,image_pack_mask);
v0=t1+ti; //sum quadruple real with quadruple image
t2=vec_perm(v2,v3,real_pack_mask);
ti2=vec_perm(v2,v3,image_pack_mask);
v1=t2+ti2;
t1=vec_perm(v4,v5,real_pack_mask);
ti=vec_perm(v4,v5,image_pack_mask);
v2=t1+ti; //sum
t2=vec_perm(v6,v7,real_pack_mask);
ti2=vec_perm(v6,v7,image_pack_mask);
v3=t2+ti2;
// now we have 16 summed elements {from 16 to 31} . lets compare them
v_ptrx+=8;
r1=vec_cmpgt(v0,v1);
r2=vec_cmpgt(v2,v3);
ind2= vec_sel(static_index0,static_index1,r1);
v0=vec_sel(v0,v1,r1);
ind3= vec_sel(static_index2,static_index3,r2);
v1=vec_sel(v2,v3,r2);
//final cmp and select index and value for the second 16 values
r1=vec_cmpgt(v0,v1);
register __vector unsigned int indv0 = vec_sel(ind2,ind3,r1);
register __vector float vv0= vec_sel(v0,v1,r1);
indv0+=temp1; //make index from 16->31
//find final quadruple from 32 elements
r2=vec_cmpgt(vf0,vv0);
ind2 = vec_sel( indf0,indv0,r2);
vv0= vec_sel(vf0,vv0,r2);
//get asbolute index
ind2+=temp0;
//compare with old quadruple and update
r1=vec_cmpgt(quadruple_values,vv0);
quadruple_indices = vec_sel( quadruple_indices,ind2,r1);
quadruple_values= vec_sel(quadruple_values,vv0,r1);
temp0+=temp_add;
}
//now we have to chose from 4 values and 4 different indices
// we will compare pairwise if pairs are exactly the same we will choose minimum between index
// otherwise we will assign index of the minimum value
float a1,a2,a3,a4;
unsigned int i1,i2,i3,i4;
a1=vec_extract(quadruple_values,0);
a2=vec_extract(quadruple_values,1);
a3=vec_extract(quadruple_values,2);
a4=vec_extract(quadruple_values,3);
i1=vec_extract(quadruple_indices,0);
i2=vec_extract(quadruple_indices,1);
i3=vec_extract(quadruple_indices,2);
i4=vec_extract(quadruple_indices,3);
if(a1==a2){
index=i1>i2?i2:i1;
}else if(a2<a1){
index=i2;
a1=a2;
}else{
index= i1;
}
if(a4==a3){
i1=i3>i4?i4:i3;
}else if(a4<a3){
i1=i4;
a3=a4;
}else{
i1= i3;
}
if(a1==a3){
index=i1>index?index:i1;
*minf=a1;
}else if(a3<a1){
index=i1;
*minf=a3;
}else{
*minf=a1;
}
return index;
}
BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x)
{
BLASLONG i=0;
BLASLONG ix=0;
FLOAT minf;
BLASLONG min=0;
BLASLONG inc_x2;
if (n <= 0 || inc_x <= 0) return(min);
if (inc_x == 1) {
minf = CABS1(x,0); //index will not be incremented
BLASLONG n1 = n & -32;
if (n1 > 0) {
min = ciamin_kernel_32(n1, x, &minf);
i = n1;
ix = n1 << 1;
}
while(i < n)
{
if( CABS1(x,ix) < minf )
{
min = i;
minf = CABS1(x,ix);
}
ix += 2;
i++;
}
return (min + 1);
} else {
inc_x2 = 2 * inc_x;
minf = CABS1(x,0);
ix += inc_x2;
i++;
while(i < n)
{
if( CABS1(x,ix) < minf )
{
min = i;
minf = CABS1(x,ix);
}
ix += inc_x2;
i++;
}
return (min + 1);
}
}

View File

@@ -89,10 +89,10 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
".p2align 5 \n\t"
"1: \n\t"
"xvcmpgedp 2,44,45 \n\t "
"xvcmpgedp 3,46,47 \n\t "
"xvcmpgedp 4,48,49 \n\t "
"xvcmpgedp 5,50,51 \n\t"
"xvcmpgtdp 2,44,45 \n\t "
"xvcmpgtdp 3,46,47 \n\t "
"xvcmpgtdp 4,48,49 \n\t "
"xvcmpgtdp 5,50,51 \n\t"
"xxsel 32,40,41,2 \n\t"
"xxsel 0,44,45,2 \n\t"
@@ -103,8 +103,8 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
"xxsel 35,42,43,5 \n\t"
"xxsel 47,50,51,5 \n\t"
"xvcmpgedp 2,0, 1 \n\t"
"xvcmpgedp 3, 45,47 \n\t"
"xvcmpgtdp 2,0, 1 \n\t"
"xvcmpgtdp 3, 45,47 \n\t"
"addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t"
@@ -125,7 +125,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
"lxvd2x 47, %[i48],%[ptr_tmp] \n\t"
//choose smaller from first and second part
"xvcmpgedp 4, 0,5 \n\t"
"xvcmpgtdp 4, 0,5 \n\t"
"xxsel 3, 0,5,4 \n\t"
"xxsel 33,32,34,4 \n\t"
@@ -139,7 +139,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
"lxvd2x 51,%[i112],%[ptr_tmp] \n\t"
//compare with previous to get vec_min_index(v6 | vs38 ) and vec_min_value (vs39)
"xvcmpgedp 2,39, 3 \n\t"
"xvcmpgtdp 2,39, 3 \n\t"
"xxsel 39,39,3,2 \n\t"
"xxsel 38,38,33,2 \n\t"
@@ -162,10 +162,10 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
//<-----------jump here from first load
"2: \n\t"
"xvcmpgedp 2,44,45 \n\t "
"xvcmpgedp 3,46,47 \n\t "
"xvcmpgedp 4,48,49 \n\t "
"xvcmpgedp 5,50,51 \n\t"
"xvcmpgtdp 2,44,45 \n\t "
"xvcmpgtdp 3,46,47 \n\t "
"xvcmpgtdp 4,48,49 \n\t "
"xvcmpgtdp 5,50,51 \n\t"
"xxsel 32,40,41,2 \n\t"
"xxsel 0,44,45,2 \n\t"
@@ -176,8 +176,8 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
"xxsel 35,42,43,5 \n\t"
"xxsel 47,50,51,5 \n\t"
"xvcmpgedp 2,0, 1 \n\t"
"xvcmpgedp 3, 45,47 \n\t"
"xvcmpgtdp 2,0, 1 \n\t"
"xvcmpgtdp 3, 45,47 \n\t"
"xxsel 32,32,33,2 \n\t"
"xxsel 0 ,0,1,2 \n\t"
"xxsel 34,34,35,3 \n\t"
@@ -194,7 +194,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
"lxvd2x 47, %[i48],%[ptr_tmp] \n\t"
//choose smaller from first and second part
"xvcmpgedp 4, 0,5 \n\t"
"xvcmpgtdp 4, 0,5 \n\t"
"xxsel 3, 0,5,4 \n\t"
"xxsel 33,32,34,4 \n\t"
@@ -210,7 +210,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
//compare with previous to get vec_min_index(v6 | vs38 ) and vec_min_value (vs39)
"xvcmpgedp 2,39, 3 \n\t"
"xvcmpgtdp 2,39, 3 \n\t"
"xxsel 39,39,3,2 \n\t"
"xxsel 38,38,33,2 \n\t"
@@ -238,10 +238,10 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
//==============================================================================
"xvcmpgedp 2,44,45 \n\t "
"xvcmpgedp 3,46,47 \n\t "
"xvcmpgedp 4,48,49 \n\t "
"xvcmpgedp 5,50,51 \n\t"
"xvcmpgtdp 2,44,45 \n\t "
"xvcmpgtdp 3,46,47 \n\t "
"xvcmpgtdp 4,48,49 \n\t "
"xvcmpgtdp 5,50,51 \n\t"
"xxsel 32,40,41,2 \n\t"
"xxsel 0,44,45,2 \n\t"
@@ -252,8 +252,8 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
"xxsel 35,42,43,5 \n\t"
"xxsel 47,50,51,5 \n\t"
"xvcmpgedp 2,0, 1 \n\t"
"xvcmpgedp 3, 45,47 \n\t"
"xvcmpgtdp 2,0, 1 \n\t"
"xvcmpgtdp 3, 45,47 \n\t"
"xxsel 32,32,33,2 \n\t"
@@ -264,14 +264,14 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
// for {second 8 elements } we have to add 8 to each so that it became {from 8 to 16}
"vaddudm 2,2,4 \n\t" // vs34=vs34 + vs36{8,8}
//choose smaller from first and second part
"xvcmpgedp 4, 0,5 \n\t"
"xvcmpgtdp 4, 0,5 \n\t"
"xxsel 3, 0,5,4 \n\t"
"xxsel 33,32,34,4 \n\t"
"vaddudm 1,1,5 \n\t" // get real index for first smaller
//compare with previous to get vec_min_index(v6 | vs38 ) and vec_min_value (vs39)
"xvcmpgedp 2,39, 3 \n\t"
"xvcmpgtdp 2,39, 3 \n\t"
"xxsel 39,39,3,2 \n\t"
"xxsel 38,38,33,2 \n\t"
@@ -284,7 +284,7 @@ static BLASLONG diamin_kernel_32(BLASLONG n, FLOAT *x, FLOAT *minf) {
//cr6 0 bit set if all true, cr6=4*6+bit_ind=24,0011at CR(BI)==1, at=10 hint that it occurs rarely
//0b001110=14
"bc 14,24, 3f \n\t"
"xvcmpgedp 4,39, 40 \n\t"
"xvcmpgtdp 4,39, 40 \n\t"
"xxsel 0,39,40,4 \n\t"
"xxsel 1,38,32,4 \n\t"
"stxsdx 0,0,%[ptr_minf] \n\t"

288
kernel/power/isamax.c Normal file
View File

@@ -0,0 +1,288 @@
/***************************************************************************
Copyright (c) 2013-2019, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#include <math.h>
#include <altivec.h>
#if defined(DOUBLE)
#define ABS fabs
#else
#define ABS fabsf
#endif
/**
* Find maximum index
* Warning: requirements n>0 and n % 64 == 0
* @param n
* @param x pointer to the vector
* @param maxf (out) maximum absolute value .( only for output )
* @return index
*/
static BLASLONG siamax_kernel_64(BLASLONG n, FLOAT *x, FLOAT *maxf) {
BLASLONG index;
BLASLONG i=0;
register __vector unsigned int static_index0 = {0,1,2,3};
register __vector unsigned int temp0 = {4,4,4, 4}; //temporary vector register
register __vector unsigned int temp1= temp0<<1; //{8,8,8,8}
register __vector unsigned int static_index1=static_index0 +temp0;//{4,5,6,7};
register __vector unsigned int static_index2=static_index0 +temp1;//{8,9,10,11};
register __vector unsigned int static_index3=static_index1 +temp1; //{12,13,14,15};
temp0=vec_xor(temp0,temp0);
temp1=temp1 <<1 ; //{16,16,16,16}
register __vector unsigned int quadruple_indices=temp0;//{0,0,0,0}
register __vector float quadruple_values={0,0,0,0};
register __vector float * v_ptrx=(__vector float *)x;
for(; i<n; i+=64){
//absolute temporary vectors
register __vector float v0=vec_abs(v_ptrx[0]);
register __vector float v1=vec_abs(v_ptrx[1]);
register __vector float v2=vec_abs(v_ptrx[2]);
register __vector float v3=vec_abs(v_ptrx[3]);
register __vector float v4=vec_abs(v_ptrx[4]);
register __vector float v5=vec_abs(v_ptrx[5]);
register __vector float v6=vec_abs(v_ptrx[6]);
register __vector float v7=vec_abs(v_ptrx[7]);
//cmp quadruple pairs
register __vector bool int r1=vec_cmpgt(v1,v0);
register __vector bool int r2=vec_cmpgt(v3,v2);
register __vector bool int r3=vec_cmpgt(v5,v4);
register __vector bool int r4=vec_cmpgt(v7,v6);
//select
register __vector unsigned int ind0_first= vec_sel(static_index0,static_index1,r1);
register __vector float vf0= vec_sel(v0,v1,r1);
register __vector unsigned int ind1= vec_sel(static_index2,static_index3,r2);
register __vector float vf1= vec_sel(v2,v3,r2);
register __vector unsigned int ind2= vec_sel(static_index0,static_index1,r3);
v0=vec_sel(v4,v5,r3);
register __vector unsigned int ind3= vec_sel(static_index2,static_index3,r4);
v1=vec_sel(v6,v7,r4);
// cmp selected
r1=vec_cmpgt(vf1,vf0);
r2=vec_cmpgt(v1,v0);
v_ptrx+=8;
//select from above
ind0_first= vec_sel(ind0_first,ind1,r1);
vf0= vec_sel(vf0,vf1,r1) ;
ind2= vec_sel(ind2,ind3,r2);
vf1= vec_sel(v0,v1,r2);
//second indices actually should be within [16,31] so ind2+16
ind2 +=temp1;
//final cmp and select index and value for the first 32 values
r1=vec_cmpgt(vf1,vf0);
ind0_first = vec_sel(ind0_first,ind2,r1);
vf0= vec_sel(vf0,vf1,r1);
ind0_first+=temp0; //get absolute index
temp0+=temp1;
temp0+=temp1; //temp0+32
//second part of 32
// absolute temporary vectors
v0=vec_abs(v_ptrx[0]);
v1=vec_abs(v_ptrx[1]);
v2=vec_abs(v_ptrx[2]);
v3=vec_abs(v_ptrx[3]);
v4=vec_abs(v_ptrx[4]);
v5=vec_abs(v_ptrx[5]);
v6=vec_abs(v_ptrx[6]);
v7=vec_abs(v_ptrx[7]);
//cmp quadruple pairs
r1=vec_cmpgt(v1,v0);
r2=vec_cmpgt(v3,v2);
r3=vec_cmpgt(v5,v4);
r4=vec_cmpgt(v7,v6);
//select
register __vector unsigned int ind0_second= vec_sel(static_index0,static_index1,r1);
register __vector float vv0= vec_sel(v0,v1,r1);
ind1= vec_sel(static_index2,static_index3,r2);
register __vector float vv1= vec_sel(v2,v3,r2);
ind2= vec_sel(static_index0,static_index1,r3);
v0=vec_sel(v4,v5,r3);
ind3= vec_sel(static_index2,static_index3,r4);
v1=vec_sel(v6,v7,r4);
// cmp selected
r1=vec_cmpgt(vv1,vv0);
r2=vec_cmpgt(v1,v0);
v_ptrx+=8;
//select from above
ind0_second= vec_sel(ind0_second,ind1,r1);
vv0= vec_sel(vv0,vv1,r1) ;
ind2= vec_sel(ind2,ind3,r2);
vv1= vec_sel(v0,v1,r2) ;
//second indices actually should be within [16,31] so ind2+16
ind2 +=temp1;
//final cmp and select index and value for the second 32 values
r1=vec_cmpgt(vv1,vv0);
ind0_second = vec_sel(ind0_second,ind2,r1);
vv0= vec_sel(vv0,vv1,r1);
ind0_second+=temp0; //get absolute index
//find final quadruple from 64 elements
r2=vec_cmpgt(vv0,vf0);
ind2 = vec_sel( ind0_first,ind0_second,r2);
vv0= vec_sel(vf0,vv0,r2);
//compare with old quadruple and update
r3=vec_cmpgt(vv0,quadruple_values);
quadruple_indices = vec_sel( quadruple_indices,ind2,r3);
quadruple_values= vec_sel(quadruple_values,vv0,r3);
temp0+=temp1;
temp0+=temp1; //temp0+32
}
//now we have to chose from 4 values and 4 different indices
// we will compare pairwise if pairs are exactly the same we will choose minimum between index
// otherwise we will assign index of the maximum value
float a1,a2,a3,a4;
unsigned int i1,i2,i3,i4;
a1=vec_extract(quadruple_values,0);
a2=vec_extract(quadruple_values,1);
a3=vec_extract(quadruple_values,2);
a4=vec_extract(quadruple_values,3);
i1=vec_extract(quadruple_indices,0);
i2=vec_extract(quadruple_indices,1);
i3=vec_extract(quadruple_indices,2);
i4=vec_extract(quadruple_indices,3);
if(a1==a2){
index=i1>i2?i2:i1;
}else if(a2>a1){
index=i2;
a1=a2;
}else{
index= i1;
}
if(a4==a3){
i1=i3>i4?i4:i3;
}else if(a4>a3){
i1=i4;
a3=a4;
}else{
i1= i3;
}
if(a1==a3){
index=i1>index?index:i1;
*maxf=a1;
}else if(a3>a1){
index=i1;
*maxf=a3;
}else{
*maxf=a1;
}
return index;
}
BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) {
BLASLONG i = 0;
BLASLONG j = 0;
FLOAT maxf = 0.0;
BLASLONG max = 0;
if (n <= 0 || inc_x <= 0) return (max);
if (inc_x == 1) {
BLASLONG n1 = n & -64;
if (n1 > 0) {
max = siamax_kernel_64(n1, x, &maxf);
i = n1;
}
while (i < n) {
if (ABS(x[i]) > maxf) {
max = i;
maxf = ABS(x[i]);
}
i++;
}
return (max + 1);
} else {
BLASLONG n1 = n & -4;
while (j < n1) {
if (ABS(x[i]) > maxf) {
max = j;
maxf = ABS(x[i]);
}
if (ABS(x[i + inc_x]) > maxf) {
max = j + 1;
maxf = ABS(x[i + inc_x]);
}
if (ABS(x[i + 2 * inc_x]) > maxf) {
max = j + 2;
maxf = ABS(x[i + 2 * inc_x]);
}
if (ABS(x[i + 3 * inc_x]) > maxf) {
max = j + 3;
maxf = ABS(x[i + 3 * inc_x]);
}
i += inc_x * 4;
j += 4;
}
while (j < n) {
if (ABS(x[i]) > maxf) {
max = j;
maxf = ABS(x[i]);
}
i += inc_x;
j++;
}
return (max + 1);
}
}

288
kernel/power/isamin.c Normal file
View File

@@ -0,0 +1,288 @@
/***************************************************************************
Copyright (c) 2013-2019, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#include <math.h>
#include <altivec.h>
#if defined(DOUBLE)
#define ABS fabs
#else
#define ABS fabsf
#endif
/**
* Find minimum index
* Warning: requirements n>0 and n % 64 == 0
* @param n
* @param x pointer to the vector
* @param minf (out) minimum absolute value .( only for output )
* @return index
*/
static BLASLONG siamin_kernel_64(BLASLONG n, FLOAT *x, FLOAT *minf) {
BLASLONG index;
BLASLONG i=0;
register __vector unsigned int static_index0 = {0,1,2,3};
register __vector unsigned int temp0 = {4,4,4, 4}; //temporary vector register
register __vector unsigned int temp1= temp0<<1; //{8,8,8,8}
register __vector unsigned int static_index1=static_index0 +temp0;//{4,5,6,7};
register __vector unsigned int static_index2=static_index0 +temp1;//{8,9,10,11};
register __vector unsigned int static_index3=static_index1 +temp1; //{12,13,14,15};
temp0=vec_xor(temp0,temp0);
temp1=temp1 <<1 ; //{16,16,16,16}
register __vector unsigned int quadruple_indices=static_index0;//{0,1,2,3};
register __vector float * v_ptrx=(__vector float *)x;
register __vector float quadruple_values=vec_abs(v_ptrx[0]);
for(; i<n; i+=64){
//absolute temporary vectors
register __vector float v0=vec_abs(v_ptrx[0]);
register __vector float v1=vec_abs(v_ptrx[1]);
register __vector float v2=vec_abs(v_ptrx[2]);
register __vector float v3=vec_abs(v_ptrx[3]);
register __vector float v4=vec_abs(v_ptrx[4]);
register __vector float v5=vec_abs(v_ptrx[5]);
register __vector float v6=vec_abs(v_ptrx[6]);
register __vector float v7=vec_abs(v_ptrx[7]);
//cmp quadruple pairs
register __vector bool int r1=vec_cmpgt(v0,v1);
register __vector bool int r2=vec_cmpgt(v2,v3);
register __vector bool int r3=vec_cmpgt(v4,v5);
register __vector bool int r4=vec_cmpgt(v6,v7);
//select
register __vector unsigned int ind0_first= vec_sel(static_index0,static_index1,r1);
register __vector float vf0= vec_sel(v0,v1,r1);
register __vector unsigned int ind1= vec_sel(static_index2,static_index3,r2);
register __vector float vf1= vec_sel(v2,v3,r2);
register __vector unsigned int ind2= vec_sel(static_index0,static_index1,r3);
v0=vec_sel(v4,v5,r3);
register __vector unsigned int ind3= vec_sel(static_index2,static_index3,r4);
v1=vec_sel(v6,v7,r4);
// cmp selected
r1=vec_cmpgt(vf0,vf1);
r2=vec_cmpgt(v0,v1);
v_ptrx+=8;
//select from above
ind0_first= vec_sel(ind0_first,ind1,r1);
vf0= vec_sel(vf0,vf1,r1) ;
ind2= vec_sel(ind2,ind3,r2);
vf1= vec_sel(v0,v1,r2);
//second indices actually should be within [16,31] so ind2+16
ind2 +=temp1;
//final cmp and select index and value for the first 32 values
r1=vec_cmpgt(vf0,vf1);
ind0_first = vec_sel(ind0_first,ind2,r1);
vf0= vec_sel(vf0,vf1,r1);
ind0_first+=temp0; //get absolute index
temp0+=temp1;
temp0+=temp1; //temp0+32
//second part of 32
// absolute temporary vectors
v0=vec_abs(v_ptrx[0]);
v1=vec_abs(v_ptrx[1]);
v2=vec_abs(v_ptrx[2]);
v3=vec_abs(v_ptrx[3]);
v4=vec_abs(v_ptrx[4]);
v5=vec_abs(v_ptrx[5]);
v6=vec_abs(v_ptrx[6]);
v7=vec_abs(v_ptrx[7]);
//cmp quadruple pairs
r1=vec_cmpgt(v0,v1);
r2=vec_cmpgt(v2,v3);
r3=vec_cmpgt(v4,v5);
r4=vec_cmpgt(v6,v7);
//select
register __vector unsigned int ind0_second= vec_sel(static_index0,static_index1,r1);
register __vector float vv0= vec_sel(v0,v1,r1);
ind1= vec_sel(static_index2,static_index3,r2);
register __vector float vv1= vec_sel(v2,v3,r2);
ind2= vec_sel(static_index0,static_index1,r3);
v0=vec_sel(v4,v5,r3);
ind3= vec_sel(static_index2,static_index3,r4);
v1=vec_sel(v6,v7,r4);
// cmp selected
r1=vec_cmpgt(vv0,vv1);
r2=vec_cmpgt(v0,v1);
v_ptrx+=8;
//select from above
ind0_second= vec_sel(ind0_second,ind1,r1);
vv0= vec_sel(vv0,vv1,r1) ;
ind2= vec_sel(ind2,ind3,r2);
vv1= vec_sel(v0,v1,r2) ;
//second indices actually should be within [16,31] so ind2+16
ind2 +=temp1;
//final cmp and select index and value for the second 32 values
r1=vec_cmpgt(vv0,vv1);
ind0_second = vec_sel(ind0_second,ind2,r1);
vv0= vec_sel(vv0,vv1,r1);
ind0_second+=temp0; //get absolute index
//find final quadruple from 64 elements
r2=vec_cmpgt(vf0,vv0);
ind2 = vec_sel( ind0_first,ind0_second,r2);
vv0= vec_sel(vf0,vv0,r2);
//compare with old quadruple and update
r3=vec_cmpgt( quadruple_values,vv0);
quadruple_indices = vec_sel( quadruple_indices,ind2,r3);
quadruple_values= vec_sel(quadruple_values,vv0,r3);
temp0+=temp1;
temp0+=temp1; //temp0+32
}
//now we have to chose from 4 values and 4 different indices
// we will compare pairwise if pairs are exactly the same we will choose minimum between index
// otherwise we will assign index of the minimum value
float a1,a2,a3,a4;
unsigned int i1,i2,i3,i4;
a1=vec_extract(quadruple_values,0);
a2=vec_extract(quadruple_values,1);
a3=vec_extract(quadruple_values,2);
a4=vec_extract(quadruple_values,3);
i1=vec_extract(quadruple_indices,0);
i2=vec_extract(quadruple_indices,1);
i3=vec_extract(quadruple_indices,2);
i4=vec_extract(quadruple_indices,3);
if(a1==a2){
index=i1>i2?i2:i1;
}else if(a2<a1){
index=i2;
a1=a2;
}else{
index= i1;
}
if(a4==a3){
i1=i3>i4?i4:i3;
}else if(a4<a3){
i1=i4;
a3=a4;
}else{
i1= i3;
}
if(a1==a3){
index=i1>index?index:i1;
*minf=a1;
}else if(a3<a1){
index=i1;
*minf=a3;
}else{
*minf=a1;
}
return index;
}
BLASLONG CNAME(BLASLONG n, FLOAT *x, BLASLONG inc_x) {
BLASLONG i = 0;
BLASLONG j = 0;
BLASLONG min = 0;
FLOAT minf = 0.0;
if (n <= 0 || inc_x <= 0) return (min);
minf = ABS(x[0]); //index's not incremented
if (inc_x == 1) {
BLASLONG n1 = n & -64;
if (n1 > 0) {
min = siamin_kernel_64(n1, x, &minf);
i = n1;
}
while (i < n) {
if (ABS(x[i]) < minf) {
min = i;
minf = ABS(x[i]);
}
i++;
}
return (min + 1);
} else {
BLASLONG n1 = n & -4;
while (j < n1) {
if (ABS(x[i]) < minf) {
min = j;
minf = ABS(x[i]);
}
if (ABS(x[i + inc_x]) < minf) {
min = j + 1;
minf = ABS(x[i + inc_x]);
}
if (ABS(x[i + 2 * inc_x]) < minf) {
min = j + 2;
minf = ABS(x[i + 2 * inc_x]);
}
if (ABS(x[i + 3 * inc_x]) < minf) {
min = j + 3;
minf = ABS(x[i + 3 * inc_x]);
}
i += inc_x * 4;
j += 4;
}
while (j < n) {
if (ABS(x[i]) < minf) {
min = j;
minf = ABS(x[i]);
}
i += inc_x;
j++;
}
return (min + 1);
}
}

View File

@@ -101,8 +101,8 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
"xvcmpgedp 50,46,47 \n\t "
"xvcmpgedp 51,48,49 \n\t "
"xvcmpgtdp 50,46,47 \n\t "
"xvcmpgtdp 51,48,49 \n\t "
"addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t"
@@ -114,7 +114,7 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
"lxvd2x 44, 0,%[ptr_tmp] \n\t"
"lxvd2x 45, %[i16],%[ptr_tmp] \n\t"
"xvcmpgedp 2,0,1 \n\t "
"xvcmpgtdp 2,0,1 \n\t "
"lxvd2x 46, %[i32],%[ptr_tmp] \n\t"
"lxvd2x 47, %[i48],%[ptr_tmp] \n\t"
@@ -126,7 +126,7 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
//cmp with previous
"xvcmpgedp 4,39,3 \n\t "
"xvcmpgtdp 4,39,3 \n\t "
"vaddudm 5,5,4 \n\t"
"lxvd2x 48, %[i64],%[ptr_tmp] \n\t"
@@ -166,8 +166,8 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
"xvadddp 48, 4,5 \n\t"
"xvadddp 49, 44,45 \n\t"
"xvcmpgedp 50,46,47 \n\t "
"xvcmpgedp 51,48,49 \n\t "
"xvcmpgtdp 50,46,47 \n\t "
"xvcmpgtdp 51,48,49 \n\t "
"addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t"
@@ -179,7 +179,7 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
"lxvd2x 44, 0,%[ptr_tmp] \n\t"
"lxvd2x 45, %[i16],%[ptr_tmp] \n\t"
"xvcmpgedp 2,0,1 \n\t "
"xvcmpgtdp 2,0,1 \n\t "
"lxvd2x 46, %[i32],%[ptr_tmp] \n\t"
"lxvd2x 47, %[i48],%[ptr_tmp] \n\t"
@@ -191,7 +191,7 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
//cmp with previous
"xvcmpgedp 4,39,3 \n\t "
"xvcmpgtdp 4,39,3 \n\t "
"vaddudm 5,5,4 \n\t"
"lxvd2x 48, %[i64],%[ptr_tmp] \n\t"
@@ -235,15 +235,15 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
"xvcmpgedp 50,46,47 \n\t "
"xvcmpgedp 51,48,49 \n\t "
"xvcmpgtdp 50,46,47 \n\t "
"xvcmpgtdp 51,48,49 \n\t "
"xxsel 32,40,41,50 \n\t"
"xxsel 0,46,47,50 \n\t"
"xxsel 33,42,43,51 \n\t"
"xxsel 1,48,49,51 \n\t"
"xvcmpgedp 2,0,1 \n\t "
"xvcmpgtdp 2,0,1 \n\t "
"xxsel 32,32,33,2 \n\t"
"xxsel 3,0,1,2 \n\t"
@@ -252,7 +252,7 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
"addi %[ptr_tmp] ,%[ptr_tmp] , 128 \n\t"
//cmp with previous
"xvcmpgedp 4,39,3 \n\t "
"xvcmpgtdp 4,39,3 \n\t "
"vaddudm 5,5,4 \n\t"
"xxsel 38,38,32,4 \n\t"
"xxsel 39,39,3,4 \n\t"
@@ -267,7 +267,7 @@ static BLASLONG ziamin_kernel_16_TUNED(BLASLONG n, FLOAT *x, FLOAT *minf) {
//cr6 0 bit set if all true, cr6=4*6+bit_ind=24,0011at CR(BI)==1, at=10 hint that it occurs rarely
//0b001110=14
"bc 14,24, 3f \n\t"
"xvcmpgedp 4,39, 40 \n\t"
"xvcmpgtdp 4,39, 40 \n\t"
"xxsel 0,39,40,4 \n\t"
"xxsel 1,38,32,4 \n\t"
"stxsdx 0,0,%[ptr_minf] \n\t"

129
kernel/power/saxpy.c Normal file
View File

@@ -0,0 +1,129 @@
/***************************************************************************
Copyright (c) 2013-2018, The OpenBLAS Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the OpenBLAS project nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include "common.h"
#ifndef HAVE_KERNEL_8
#include <altivec.h>
static void saxpy_kernel_64(BLASLONG n, FLOAT *x, FLOAT *y, FLOAT alpha)
{
BLASLONG i = 0;
__vector float v_a = {alpha,alpha,alpha,alpha};
__vector float * v_y=(__vector float *)y;
__vector float * v_x=(__vector float *)x;
for(; i<n/4; i+=16){
v_y[i] += v_a * v_x[i];
v_y[i+1] += v_a * v_x[i+1];
v_y[i+2] += v_a * v_x[i+2];
v_y[i+3] += v_a * v_x[i+3];
v_y[i+4] += v_a * v_x[i+4];
v_y[i+5] += v_a * v_x[i+5];
v_y[i+6] += v_a * v_x[i+6];
v_y[i+7] += v_a * v_x[i+7];
v_y[i+8] += v_a * v_x[i+8];
v_y[i+9] += v_a * v_x[i+9];
v_y[i+10] += v_a * v_x[i+10];
v_y[i+11] += v_a * v_x[i+11];
v_y[i+12] += v_a * v_x[i+12];
v_y[i+13] += v_a * v_x[i+13];
v_y[i+14] += v_a * v_x[i+14];
v_y[i+15] += v_a * v_x[i+15];
}
}
#endif
int CNAME(BLASLONG n, BLASLONG dummy0, BLASLONG dummy1, FLOAT da, FLOAT *x, BLASLONG inc_x, FLOAT *y, BLASLONG inc_y, FLOAT *dummy, BLASLONG dummy2)
{
BLASLONG i=0;
BLASLONG ix=0,iy=0;
if ( n <= 0 ) return(0);
if ( (inc_x == 1) && (inc_y == 1) )
{
BLASLONG n1 = n & -64;
if ( n1 )
saxpy_kernel_64(n1, x, y, da);
i = n1;
while(i < n)
{
y[i] += da * x[i] ;
i++ ;
}
return(0);
}
BLASLONG n1 = n & -4;
while(i < n1)
{
FLOAT m1 = da * x[ix] ;
FLOAT m2 = da * x[ix+inc_x] ;
FLOAT m3 = da * x[ix+2*inc_x] ;
FLOAT m4 = da * x[ix+3*inc_x] ;
y[iy] += m1 ;
y[iy+inc_y] += m2 ;
y[iy+2*inc_y] += m3 ;
y[iy+3*inc_y] += m4 ;
ix += inc_x*4 ;
iy += inc_y*4 ;
i+=4 ;
}
while(i < n)
{
y[iy] += da * x[ix] ;
ix += inc_x ;
iy += inc_y ;
i++ ;
}
return(0);
}

View File

@@ -294,6 +294,8 @@ gotoblas_t TABLE_NAME = {
chemm_outcopyTS, chemm_oltcopyTS,
0, 0, 0,
#if defined(USE_GEMM3M)
#ifdef CGEMM3M_DEFAULT_UNROLL_M
CGEMM3M_DEFAULT_UNROLL_M, CGEMM3M_DEFAULT_UNROLL_N, MAX(CGEMM3M_DEFAULT_UNROLL_M, CGEMM3M_DEFAULT_UNROLL_N),
#else
@@ -324,6 +326,33 @@ gotoblas_t TABLE_NAME = {
chemm3m_oucopybTS, chemm3m_olcopybTS,
chemm3m_oucopyrTS, chemm3m_olcopyrTS,
chemm3m_oucopyiTS, chemm3m_olcopyiTS,
#else
0, 0, 0,
NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
#endif
#ifndef NO_LAPACK
cneg_tcopyTS, claswp_ncopyTS,
@@ -400,6 +429,7 @@ gotoblas_t TABLE_NAME = {
zhemm_outcopyTS, zhemm_oltcopyTS,
0, 0, 0,
#if defined(USE_GEMM3M)
#ifdef ZGEMM3M_DEFAULT_UNROLL_M
ZGEMM3M_DEFAULT_UNROLL_M, ZGEMM3M_DEFAULT_UNROLL_N, MAX(ZGEMM3M_DEFAULT_UNROLL_M, ZGEMM3M_DEFAULT_UNROLL_N),
#else
@@ -430,6 +460,33 @@ gotoblas_t TABLE_NAME = {
zhemm3m_oucopybTS, zhemm3m_olcopybTS,
zhemm3m_oucopyrTS, zhemm3m_olcopyrTS,
zhemm3m_oucopyiTS, zhemm3m_olcopyiTS,
#else
0, 0, 0,
NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
#endif
#ifndef NO_LAPACK
zneg_tcopyTS, zlaswp_ncopyTS,
@@ -503,6 +560,7 @@ gotoblas_t TABLE_NAME = {
xhemm_outcopyTS, xhemm_oltcopyTS,
0, 0, 0,
#if defined(USE_GEMM3M)
QGEMM_DEFAULT_UNROLL_M, QGEMM_DEFAULT_UNROLL_N, MAX(QGEMM_DEFAULT_UNROLL_M, QGEMM_DEFAULT_UNROLL_N),
xgemm3m_kernelTS,
@@ -528,6 +586,33 @@ gotoblas_t TABLE_NAME = {
xhemm3m_oucopybTS, xhemm3m_olcopybTS,
xhemm3m_oucopyrTS, xhemm3m_olcopyrTS,
xhemm3m_oucopyiTS, xhemm3m_olcopyiTS,
#else
0, 0, 0,
NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
NULL, NULL,
#endif
#ifndef NO_LAPACK
xneg_tcopyTS, xlaswp_ncopyTS,
@@ -561,6 +646,78 @@ gotoblas_t TABLE_NAME = {
};
#if defined(ARCH_ARM64)
static void init_parameter(void) {
TABLE_NAME.sgemm_p = SGEMM_DEFAULT_P;
TABLE_NAME.dgemm_p = DGEMM_DEFAULT_P;
TABLE_NAME.cgemm_p = CGEMM_DEFAULT_P;
TABLE_NAME.zgemm_p = ZGEMM_DEFAULT_P;
TABLE_NAME.sgemm_q = SGEMM_DEFAULT_Q;
TABLE_NAME.dgemm_q = DGEMM_DEFAULT_Q;
TABLE_NAME.cgemm_q = CGEMM_DEFAULT_Q;
TABLE_NAME.zgemm_q = ZGEMM_DEFAULT_Q;
TABLE_NAME.sgemm_r = SGEMM_DEFAULT_R;
TABLE_NAME.dgemm_r = DGEMM_DEFAULT_R;
TABLE_NAME.cgemm_r = CGEMM_DEFAULT_R;
TABLE_NAME.zgemm_r = ZGEMM_DEFAULT_R;
#ifdef EXPRECISION
TABLE_NAME.qgemm_p = QGEMM_DEFAULT_P;
TABLE_NAME.xgemm_p = XGEMM_DEFAULT_P;
TABLE_NAME.qgemm_q = QGEMM_DEFAULT_Q;
TABLE_NAME.xgemm_q = XGEMM_DEFAULT_Q;
TABLE_NAME.qgemm_r = QGEMM_DEFAULT_R;
TABLE_NAME.xgemm_r = XGEMM_DEFAULT_R;
#endif
#if defined(USE_GEMM3M)
#ifdef CGEMM3M_DEFAULT_P
TABLE_NAME.cgemm3m_p = CGEMM3M_DEFAULT_P;
#else
TABLE_NAME.cgemm3m_p = TABLE_NAME.sgemm_p;
#endif
#ifdef ZGEMM3M_DEFAULT_P
TABLE_NAME.zgemm3m_p = ZGEMM3M_DEFAULT_P;
#else
TABLE_NAME.zgemm3m_p = TABLE_NAME.dgemm_p;
#endif
#ifdef CGEMM3M_DEFAULT_Q
TABLE_NAME.cgemm3m_q = CGEMM3M_DEFAULT_Q;
#else
TABLE_NAME.cgemm3m_q = TABLE_NAME.sgemm_q;
#endif
#ifdef ZGEMM3M_DEFAULT_Q
TABLE_NAME.zgemm3m_q = ZGEMM3M_DEFAULT_Q;
#else
TABLE_NAME.zgemm3m_q = TABLE_NAME.dgemm_q;
#endif
#ifdef CGEMM3M_DEFAULT_R
TABLE_NAME.cgemm3m_r = CGEMM3M_DEFAULT_R;
#else
TABLE_NAME.cgemm3m_r = TABLE_NAME.sgemm_r;
#endif
#ifdef ZGEMM3M_DEFAULT_R
TABLE_NAME.zgemm3m_r = ZGEMM3M_DEFAULT_R;
#else
TABLE_NAME.zgemm3m_r = TABLE_NAME.dgemm_r;
#endif
#ifdef EXPRECISION
TABLE_NAME.xgemm3m_p = TABLE_NAME.qgemm_p;
TABLE_NAME.xgemm3m_q = TABLE_NAME.qgemm_q;
TABLE_NAME.xgemm3m_r = TABLE_NAME.qgemm_r;
#endif
#endif
}
#else // defined(ARCH_ARM64)
#ifdef ARCH_X86
static int get_l2_size_old(void){
int i, eax, ebx, ecx, edx, cpuid_level;
@@ -1146,3 +1303,4 @@ static void init_parameter(void) {
}
#endif //defined(ARCH_ARM64)

View File

@@ -33,9 +33,10 @@ ZAXPYKERNEL = zaxpy.c
STRMMKERNEL = sgemm_kernel_16x4_haswell.S
SGEMMKERNEL = sgemm_kernel_16x4_haswell.S
SGEMM_BETA = sgemm_beta_skylakex.c
SGEMMINCOPY = ../generic/gemm_ncopy_16.c
SGEMMITCOPY = ../generic/gemm_tcopy_16.c
SGEMMONCOPY = ../generic/gemm_ncopy_4.c
SGEMMONCOPY = sgemm_ncopy_4_skylakex.c
SGEMMOTCOPY = ../generic/gemm_tcopy_4.c
SGEMMINCOPYOBJ = sgemm_incopy$(TSUFFIX).$(SUFFIX)
SGEMMITCOPYOBJ = sgemm_itcopy$(TSUFFIX).$(SUFFIX)
@@ -44,9 +45,10 @@ SGEMMOTCOPYOBJ = sgemm_otcopy$(TSUFFIX).$(SUFFIX)
DTRMMKERNEL = dtrmm_kernel_4x8_haswell.c
DGEMMKERNEL = dgemm_kernel_4x8_haswell.S
DGEMM_BETA = dgemm_beta_skylakex.c
DGEMMINCOPY = ../generic/gemm_ncopy_4.c
DGEMMITCOPY = ../generic/gemm_tcopy_4.c
DGEMMONCOPY = ../generic/gemm_ncopy_8.c
DGEMMONCOPY = dgemm_ncopy_8_skylakex.c
DGEMMOTCOPY = ../generic/gemm_tcopy_8.c
DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX)
DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX)

View File

@@ -1,19 +1,18 @@
include $(KERNELDIR)/KERNEL.HASWELL
SGEMMKERNEL = sgemm_kernel_16x4_skylakex.S
SGEMMKERNEL = sgemm_kernel_16x4_skylakex.c
SGEMMINCOPY = ../generic/gemm_ncopy_16.c
SGEMMITCOPY = sgemm_tcopy_16_skylakex.c
SGEMMONCOPY = sgemm_ncopy_4_skylakex.c
SGEMMOTCOPY = ../generic/gemm_tcopy_4.c
#DTRMMKERNEL = ../generic/trmmkernel_16x2.c
#DGEMMKERNEL = dgemm_kernel_16x2_skylakex.S
#DGEMMINCOPY = ../generic/gemm_ncopy_16.c
#DGEMMITCOPY = ../generic/gemm_tcopy_16.c
#DGEMMONCOPY = ../generic/gemm_ncopy_2.c
#DGEMMOTCOPY = ../generic/gemm_tcopy_2.c
#DGEMMINCOPYOBJ = dgemm_incopy$(TSUFFIX).$(SUFFIX)
#DGEMMITCOPYOBJ = dgemm_itcopy$(TSUFFIX).$(SUFFIX)
#DGEMMONCOPYOBJ = dgemm_oncopy$(TSUFFIX).$(SUFFIX)
#DGEMMOTCOPYOBJ = dgemm_otcopy$(TSUFFIX).$(SUFFIX)
DGEMMKERNEL = dgemm_kernel_4x8_skylakex.c
DGEMMINCOPY = dgemm_ncopy_8_skylakex.c
DGEMMITCOPY = dgemm_tcopy_8_skylakex.c
DGEMMONCOPY = dgemm_ncopy_8_skylakex.c
DGEMMOTCOPY = dgemm_tcopy_8_skylakex.c
SGEMM_BETA = ../generic/gemm_beta.c
DGEMM_BETA = ../generic/gemm_beta.c
SGEMM_BETA = sgemm_beta_skylakex.c
DGEMM_BETA = dgemm_beta_skylakex.c

View File

@@ -114,9 +114,9 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha), // 4
@@ -180,10 +180,10 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"jnz 1b \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
:
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha), // 4

View File

@@ -112,9 +112,9 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha), // 4

View File

@@ -95,10 +95,10 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"jnz 1b \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
:
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha), // 4

View File

@@ -113,10 +113,10 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"jnz 1b \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
:
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha), // 4
@@ -181,9 +181,9 @@ static void caxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha), // 4

View File

@@ -97,9 +97,9 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4
@@ -175,10 +175,10 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
"vmovups %%xmm4, 16(%4) \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
:
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4

View File

@@ -98,9 +98,9 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4

View File

@@ -105,10 +105,10 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
"vmovups %%xmm4, 16(%4) \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
:
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4

View File

@@ -97,9 +97,9 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4
@@ -175,10 +175,10 @@ static void cdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
"vmovups %%xmm4, 16(%4) \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
:
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (dot) // 4

View File

@@ -116,11 +116,11 @@ static void cscal_kernel_16( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"%0", "%1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
@@ -208,11 +208,11 @@ static void cscal_kernel_16_zero_r( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"%0", "%1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
@@ -285,11 +285,11 @@ static void cscal_kernel_16_zero_i( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"%0", "%1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
@@ -330,11 +330,11 @@ static void cscal_kernel_16_zero( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"%0", "%1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",

View File

@@ -116,11 +116,11 @@ static void cscal_kernel_16( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"0", "1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
@@ -208,9 +208,9 @@ static void cscal_kernel_16_zero_r( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", // "0", "1",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
@@ -285,9 +285,9 @@ static void cscal_kernel_16_zero_i( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"%0", "%1",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
@@ -329,12 +329,12 @@ static void cscal_kernel_16_zero( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
:
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"0", "1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",

View File

@@ -117,11 +117,11 @@ static void cscal_kernel_16( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"0", "1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
@@ -208,12 +208,12 @@ static void cscal_kernel_16_zero_r( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
"+r" (n), // 0
"+r" (x) // 1
:
:
"r" (n), // 0
"r" (x), // 1
"r" (alpha) // 2
: "cc", //"0", "1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
@@ -286,11 +286,11 @@ static void cscal_kernel_16_zero_i( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"%0", "%1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",
@@ -331,11 +331,11 @@ static void cscal_kernel_16_zero( BLASLONG n, FLOAT *alpha, FLOAT *x)
"vzeroupper \n\t"
:
:
"r" (n), // 0
"r" (x), // 1
"+r" (n), // 0
"+r" (x) // 1
:
"r" (alpha) // 2
: "cc", //"0", "1",
: "cc",
"%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7",
"%xmm8", "%xmm9", "%xmm10", "%xmm11",

View File

@@ -37,8 +37,10 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "daxpy_microk_steamroller-2.c"
#elif defined(PILEDRIVER)
#include "daxpy_microk_piledriver-2.c"
#elif defined(HASWELL) || defined(ZEN) || defined (SKYLAKEX)
#elif defined(HASWELL) || defined(ZEN)
#include "daxpy_microk_haswell-2.c"
#elif defined (SKYLAKEX)
#include "daxpy_microk_skylakex-2.c"
#elif defined(SANDYBRIDGE)
#include "daxpy_microk_sandy-2.c"
#endif

View File

@@ -64,9 +64,9 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"jnz 1b \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4

View File

@@ -59,10 +59,10 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"jnz 1b \n\t"
"vzeroupper \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
:
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4

View File

@@ -73,9 +73,9 @@ static void daxpy_kernel_8( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *alpha)
"jnz 1b \n\t"
:
:
"r" (i), // 0
"r" (n), // 1
"+r" (i), // 0
"+r" (n) // 1
:
"r" (x), // 2
"r" (y), // 3
"r" (alpha) // 4

Some files were not shown because too many files have changed in this diff Show More