diff --git a/interface/lapack/fortran/dlaqr5.f b/interface/lapack/fortran/dlaqr5.f
new file mode 100644
index 000000000..a8fad0a79
--- /dev/null
+++ b/interface/lapack/fortran/dlaqr5.f
@@ -0,0 +1,1083 @@
+! Copyright (c) 2013-2016, The OpenBLAS Project
+! All rights reserved.
+! Redistribution and use in source and binary forms, with or without
+! modification, are permitted provided that the following conditions are
+! met:
+! 1. Redistributions of source code must retain the above copyright
+! notice, this list of conditions and the following disclaimer.
+! 2. Redistributions in binary form must reproduce the above copyright
+! notice, this list of conditions and the following disclaimer in
+! the documentation and/or other materials provided with the
+! distribution.
+! 3. Neither the name of the OpenBLAS project nor the names of
+! its contributors may be used to endorse or promote products
+! derived from this software without specific prior written permission.
+! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+! ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
+! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+! USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*> \brief \b DLAQR5 performs a single small-bulge multi-shift QR sweep.
+*
+* =========== DOCUMENTATION ===========
+*
+* Online html documentation available at
+* http://www.netlib.org/lapack/explore-html/
+*
+*> \htmlonly
+*> Download DLAQR5 + dependencies
+*>
+*> [TGZ]
+*>
+*> [ZIP]
+*>
+*> [TXT]
+*> \endhtmlonly
+*
+* Definition:
+* ===========
+*
+* SUBROUTINE DLAQR5( WANTT, WANTZ, KACC22, N, KTOP, KBOT, NSHFTS,
+* SR, SI, H, LDH, ILOZ, IHIZ, Z, LDZ, V, LDV, U,
+* LDU, NV, WV, LDWV, NH, WH, LDWH )
+*
+* .. Scalar Arguments ..
+* INTEGER IHIZ, ILOZ, KACC22, KBOT, KTOP, LDH, LDU, LDV,
+* $ LDWH, LDWV, LDZ, N, NH, NSHFTS, NV
+* LOGICAL WANTT, WANTZ
+* ..
+* .. Array Arguments ..
+* DOUBLE PRECISION H( LDH, * ), SI( * ), SR( * ), U( LDU, * ),
+* $ V( LDV, * ), WH( LDWH, * ), WV( LDWV, * ),
+* $ Z( LDZ, * )
+* ..
+*
+*
+*> \par Purpose:
+* =============
+*>
+*> \verbatim
+*>
+*> DLAQR5, called by DLAQR0, performs a
+*> single small-bulge multi-shift QR sweep.
+*> \endverbatim
+*
+* Arguments:
+* ==========
+*
+*> \param[in] WANTT
+*> \verbatim
+*> WANTT is logical scalar
+*> WANTT = .true. if the quasi-triangular Schur factor
+*> is being computed. WANTT is set to .false. otherwise.
+*> \endverbatim
+*>
+*> \param[in] WANTZ
+*> \verbatim
+*> WANTZ is logical scalar
+*> WANTZ = .true. if the orthogonal Schur factor is being
+*> computed. WANTZ is set to .false. otherwise.
+*> \endverbatim
+*>
+*> \param[in] KACC22
+*> \verbatim
+*> KACC22 is integer with value 0, 1, or 2.
+*> Specifies the computation mode of far-from-diagonal
+*> orthogonal updates.
+*> = 0: DLAQR5 does not accumulate reflections and does not
+*> use matrix-matrix multiply to update far-from-diagonal
+*> matrix entries.
+*> = 1: DLAQR5 accumulates reflections and uses matrix-matrix
+*> multiply to update the far-from-diagonal matrix entries.
+*> = 2: DLAQR5 accumulates reflections, uses matrix-matrix
+*> multiply to update the far-from-diagonal matrix entries,
+*> and takes advantage of 2-by-2 block structure during
+*> matrix multiplies.
+*> \endverbatim
+*>
+*> \param[in] N
+*> \verbatim
+*> N is integer scalar
+*> N is the order of the Hessenberg matrix H upon which this
+*> subroutine operates.
+*> \endverbatim
+*>
+*> \param[in] KTOP
+*> \verbatim
+*> KTOP is integer scalar
+*> \endverbatim
+*>
+*> \param[in] KBOT
+*> \verbatim
+*> KBOT is integer scalar
+*> These are the first and last rows and columns of an
+*> isolated diagonal block upon which the QR sweep is to be
+*> applied. It is assumed without a check that
+*> either KTOP = 1 or H(KTOP,KTOP-1) = 0
+*> and
+*> either KBOT = N or H(KBOT+1,KBOT) = 0.
+*> \endverbatim
+*>
+*> \param[in] NSHFTS
+*> \verbatim
+*> NSHFTS is integer scalar
+*> NSHFTS gives the number of simultaneous shifts. NSHFTS
+*> must be positive and even.
+*> \endverbatim
+*>
+*> \param[in,out] SR
+*> \verbatim
+*> SR is DOUBLE PRECISION array of size (NSHFTS)
+*> \endverbatim
+*>
+*> \param[in,out] SI
+*> \verbatim
+*> SI is DOUBLE PRECISION array of size (NSHFTS)
+*> SR contains the real parts and SI contains the imaginary
+*> parts of the NSHFTS shifts of origin that define the
+*> multi-shift QR sweep. On output SR and SI may be
+*> reordered.
+*> \endverbatim
+*>
+*> \param[in,out] H
+*> \verbatim
+*> H is DOUBLE PRECISION array of size (LDH,N)
+*> On input H contains a Hessenberg matrix. On output a
+*> multi-shift QR sweep with shifts SR(J)+i*SI(J) is applied
+*> to the isolated diagonal block in rows and columns KTOP
+*> through KBOT.
+*> \endverbatim
+*>
+*> \param[in] LDH
+*> \verbatim
+*> LDH is integer scalar
+*> LDH is the leading dimension of H just as declared in the
+*> calling procedure. LDH.GE.MAX(1,N).
+*> \endverbatim
+*>
+*> \param[in] ILOZ
+*> \verbatim
+*> ILOZ is INTEGER
+*> \endverbatim
+*>
+*> \param[in] IHIZ
+*> \verbatim
+*> IHIZ is INTEGER
+*> Specify the rows of Z to which transformations must be
+*> applied if WANTZ is .TRUE.. 1 .LE. ILOZ .LE. IHIZ .LE. N
+*> \endverbatim
+*>
+*> \param[in,out] Z
+*> \verbatim
+*> Z is DOUBLE PRECISION array of size (LDZ,IHI)
+*> If WANTZ = .TRUE., then the QR Sweep orthogonal
+*> similarity transformation is accumulated into
+*> Z(ILOZ:IHIZ,ILO:IHI) from the right.
+*> If WANTZ = .FALSE., then Z is unreferenced.
+*> \endverbatim
+*>
+*> \param[in] LDZ
+*> \verbatim
+*> LDZ is integer scalar
+*> LDA is the leading dimension of Z just as declared in
+*> the calling procedure. LDZ.GE.N.
+*> \endverbatim
+*>
+*> \param[out] V
+*> \verbatim
+*> V is DOUBLE PRECISION array of size (LDV,NSHFTS/2)
+*> \endverbatim
+*>
+*> \param[in] LDV
+*> \verbatim
+*> LDV is integer scalar
+*> LDV is the leading dimension of V as declared in the
+*> calling procedure. LDV.GE.3.
+*> \endverbatim
+*>
+*> \param[out] U
+*> \verbatim
+*> U is DOUBLE PRECISION array of size
+*> (LDU,3*NSHFTS-3)
+*> \endverbatim
+*>
+*> \param[in] LDU
+*> \verbatim
+*> LDU is integer scalar
+*> LDU is the leading dimension of U just as declared in the
+*> in the calling subroutine. LDU.GE.3*NSHFTS-3.
+*> \endverbatim
+*>
+*> \param[in] NH
+*> \verbatim
+*> NH is integer scalar
+*> NH is the number of columns in array WH available for
+*> workspace. NH.GE.1.
+*> \endverbatim
+*>
+*> \param[out] WH
+*> \verbatim
+*> WH is DOUBLE PRECISION array of size (LDWH,NH)
+*> \endverbatim
+*>
+*> \param[in] LDWH
+*> \verbatim
+*> LDWH is integer scalar
+*> Leading dimension of WH just as declared in the
+*> calling procedure. LDWH.GE.3*NSHFTS-3.
+*> \endverbatim
+*>
+*> \param[in] NV
+*> \verbatim
+*> NV is integer scalar
+*> NV is the number of rows in WV agailable for workspace.
+*> NV.GE.1.
+*> \endverbatim
+*>
+*> \param[out] WV
+*> \verbatim
+*> WV is DOUBLE PRECISION array of size
+*> (LDWV,3*NSHFTS-3)
+*> \endverbatim
+*>
+*> \param[in] LDWV
+*> \verbatim
+*> LDWV is integer scalar
+*> LDWV is the leading dimension of WV as declared in the
+*> in the calling subroutine. LDWV.GE.NV.
+*> \endverbatim
+*
+* Authors:
+* ========
+*
+*> \author Univ. of Tennessee
+*> \author Univ. of California Berkeley
+*> \author Univ. of Colorado Denver
+*> \author NAG Ltd.
+*
+*> \date September 2012
+*
+*> \ingroup doubleOTHERauxiliary
+*
+*> \par Contributors:
+* ==================
+*>
+*> Karen Braman and Ralph Byers, Department of Mathematics,
+*> University of Kansas, USA
+*
+*> \par References:
+* ================
+*>
+*> K. Braman, R. Byers and R. Mathias, The Multi-Shift QR
+*> Algorithm Part I: Maintaining Well Focused Shifts, and Level 3
+*> Performance, SIAM Journal of Matrix Analysis, volume 23, pages
+*> 929--947, 2002.
+*>
+* =====================================================================
+ SUBROUTINE DLAQR5( WANTT, WANTZ, KACC22, N, KTOP, KBOT, NSHFTS,
+ $ SR, SI, H, LDH, ILOZ, IHIZ, Z, LDZ, V, LDV, U,
+ $ LDU, NV, WV, LDWV, NH, WH, LDWH )
+*
+* -- LAPACK auxiliary routine (version 3.4.2) --
+* -- LAPACK is a software package provided by Univ. of Tennessee, --
+* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
+* September 2012
+*
+* .. Scalar Arguments ..
+ INTEGER IHIZ, ILOZ, KACC22, KBOT, KTOP, LDH, LDU, LDV,
+ $ LDWH, LDWV, LDZ, N, NH, NSHFTS, NV
+ LOGICAL WANTT, WANTZ
+* ..
+* .. Array Arguments ..
+ DOUBLE PRECISION H( LDH, * ), SI( * ), SR( * ), U( LDU, * ),
+ $ V( LDV, * ), WH( LDWH, * ), WV( LDWV, * ),
+ $ Z( LDZ, * )
+* ..
+*
+* ================================================================
+* .. Parameters ..
+ DOUBLE PRECISION ZERO, ONE
+ PARAMETER ( ZERO = 0.0d0, ONE = 1.0d0 )
+* ..
+* .. Local Scalars ..
+ DOUBLE PRECISION ALPHA, BETA, H11, H12, H21, H22, REFSUM,
+ $ SAFMAX, SAFMIN, SCL, SMLNUM, SWAP, TST1, TST2,
+ $ ULP
+ INTEGER I, I2, I4, INCOL, J, J2, J4, JBOT, JCOL, JLEN,
+ $ JROW, JTOP, K, K1, KDU, KMS, KNZ, KRCOL, KZS,
+ $ M, M22, MBOT, MEND, MSTART, MTOP, NBMPS, NDCOL,
+ $ NS, NU
+ LOGICAL ACCUM, BLK22, BMP22
+* ..
+* .. External Functions ..
+ DOUBLE PRECISION DLAMCH
+ EXTERNAL DLAMCH
+* ..
+* .. Intrinsic Functions ..
+*
+ INTRINSIC ABS, DBLE, MAX, MIN, MOD
+* ..
+* .. Local Arrays ..
+ DOUBLE PRECISION VT( 3 )
+* temp scalars
+ DOUBLE PRECISION tempv1, tempv2, tempv3,
+ $ tempv4, tempv5, tempv6,
+ $ temph1, temph2, temph3,
+ $ temph4, temph5, temph6,
+ $ tempz1, tempz2, tempz3,
+ $ tempz4, tempz5, tempz6,
+ $ tempu1, tempu2, tempu3,
+ $ tempu4, tempu5, tempu6,
+ $ REFSU1
+ INTEGER JBEGIN, M1
+* ..
+* .. External Subroutines ..
+ EXTERNAL DGEMM, DLABAD, DLACPY, DLAQR1, DLARFG, DLASET,
+ $ DTRMM
+* ..
+* .. Executable Statements ..
+*
+* ==== If there are no shifts, then there is nothing to do. ====
+*
+ IF( NSHFTS.LT.2 )
+ $ RETURN
+*
+* ==== If the active block is empty or 1-by-1, then there
+* . is nothing to do. ====
+*
+ IF( KTOP.GE.KBOT )
+ $ RETURN
+*
+* ==== Shuffle shifts into pairs of real shifts and pairs
+* . of complex conjugate shifts assuming complex
+* . conjugate shifts are already adjacent to one
+* . another. ====
+*
+ DO 10 I = 1, NSHFTS - 2, 2
+ IF( SI( I ).NE.-SI( I+1 ) ) THEN
+*
+ SWAP = SR( I )
+ SR( I ) = SR( I+1 )
+ SR( I+1 ) = SR( I+2 )
+ SR( I+2 ) = SWAP
+*
+ SWAP = SI( I )
+ SI( I ) = SI( I+1 )
+ SI( I+1 ) = SI( I+2 )
+ SI( I+2 ) = SWAP
+ END IF
+ 10 CONTINUE
+*
+* ==== NSHFTS is supposed to be even, but if it is odd,
+* . then simply reduce it by one. The shuffle above
+* . ensures that the dropped shift is real and that
+* . the remaining shifts are paired. ====
+*
+ NS = NSHFTS - MOD( NSHFTS, 2 )
+*
+* ==== Machine constants for deflation ====
+*
+ SAFMIN = DLAMCH( 'SAFE MINIMUM' )
+ SAFMAX = ONE / SAFMIN
+ CALL DLABAD( SAFMIN, SAFMAX )
+ ULP = DLAMCH( 'PRECISION' )
+ SMLNUM = SAFMIN*( DBLE( N ) / ULP )
+*
+* ==== Use accumulated reflections to update far-from-diagonal
+* . entries ? ====
+*
+ ACCUM = ( KACC22.EQ.1 ) .OR. ( KACC22.EQ.2 )
+*
+* ==== If so, exploit the 2-by-2 block structure? ====
+*
+ BLK22 = ( NS.GT.2 ) .AND. ( KACC22.EQ.2 )
+*
+* ==== clear trash ====
+*
+ IF( KTOP+2.LE.KBOT )
+ $ H( KTOP+2, KTOP ) = ZERO
+*
+* ==== NBMPS = number of 2-shift bulges in the chain ====
+*
+ NBMPS = NS / 2
+*
+* ==== KDU = width of slab ====
+*
+ KDU = 6*NBMPS - 3
+*
+* ==== Create and chase chains of NBMPS bulges ====
+*
+ DO 220 INCOL = 3*( 1-NBMPS ) + KTOP - 1, KBOT - 2, 3*NBMPS - 2
+ NDCOL = INCOL + KDU
+ IF( ACCUM )
+ $ CALL DLASET( 'ALL', KDU, KDU, ZERO, ONE, U, LDU )
+*
+* ==== Near-the-diagonal bulge chase. The following loop
+* . performs the near-the-diagonal part of a small bulge
+* . multi-shift QR sweep. Each 6*NBMPS-2 column diagonal
+* . chunk extends from column INCOL to column NDCOL
+* . (including both column INCOL and column NDCOL). The
+* . following loop chases a 3*NBMPS column long chain of
+* . NBMPS bulges 3*NBMPS-2 columns to the right. (INCOL
+* . may be less than KTOP and and NDCOL may be greater than
+* . KBOT indicating phantom columns from which to chase
+* . bulges before they are actually introduced or to which
+* . to chase bulges beyond column KBOT.) ====
+*
+ DO 150 KRCOL = INCOL, MIN( INCOL+3*NBMPS-3, KBOT-2 )
+*
+* ==== Bulges number MTOP to MBOT are active double implicit
+* . shift bulges. There may or may not also be small
+* . 2-by-2 bulge, if there is room. The inactive bulges
+* . (if any) must wait until the active bulges have moved
+* . down the diagonal to make room. The phantom matrix
+* . paradigm described above helps keep track. ====
+*
+ MTOP = MAX( 1, ( ( KTOP-1 )-KRCOL+2 ) / 3+1 )
+ MBOT = MIN( NBMPS, ( KBOT-KRCOL ) / 3 )
+ M22 = MBOT + 1
+ BMP22 = ( MBOT.LT.NBMPS ) .AND. ( KRCOL+3*( M22-1 ) ).EQ.
+ $ ( KBOT-2 )
+*
+* ==== Generate reflections to chase the chain right
+* . one column. (The minimum value of K is KTOP-1.) ====
+*
+ DO 20 M = MTOP, MBOT
+ K = KRCOL + 3*( M-1 )
+ IF( K.EQ.KTOP-1 ) THEN
+ CALL DLAQR1( 3, H( KTOP, KTOP ), LDH, SR( 2*M-1 ),
+ $ SI( 2*M-1 ), SR( 2*M ), SI( 2*M ),
+ $ V( 1, M ) )
+ ALPHA = V( 1, M )
+ CALL DLARFG( 3, ALPHA, V( 2, M ), 1, V( 1, M ) )
+ ELSE
+ BETA = H( K+1, K )
+ V( 2, M ) = H( K+2, K )
+ V( 3, M ) = H( K+3, K )
+ CALL DLARFG( 3, BETA, V( 2, M ), 1, V( 1, M ) )
+*
+* ==== A Bulge may collapse because of vigilant
+* . deflation or destructive underflow. In the
+* . underflow case, try the two-small-subdiagonals
+* . trick to try to reinflate the bulge. ====
+*
+ IF( H( K+3, K ).NE.ZERO .OR. H( K+3, K+1 ).NE.
+ $ ZERO .OR. H( K+3, K+2 ).EQ.ZERO ) THEN
+*
+* ==== Typical case: not collapsed (yet). ====
+*
+ H( K+1, K ) = BETA
+ H( K+2, K ) = ZERO
+ H( K+3, K ) = ZERO
+ ELSE
+*
+* ==== Atypical case: collapsed. Attempt to
+* . reintroduce ignoring H(K+1,K) and H(K+2,K).
+* . If the fill resulting from the new
+* . reflector is too large, then abandon it.
+* . Otherwise, use the new one. ====
+*
+ CALL DLAQR1( 3, H( K+1, K+1 ), LDH, SR( 2*M-1 ),
+ $ SI( 2*M-1 ), SR( 2*M ), SI( 2*M ),
+ $ VT )
+ ALPHA = VT( 1 )
+ CALL DLARFG( 3, ALPHA, VT( 2 ), 1, VT( 1 ) )
+ REFSUM = VT( 1 )*( H( K+1, K )+VT( 2 )*
+ $ H( K+2, K ) )
+*
+ IF( ABS( H( K+2, K )-REFSUM*VT( 2 ) )+
+ $ ABS( REFSUM*VT( 3 ) ).GT.ULP*
+ $ ( ABS( H( K, K ) )+ABS( H( K+1,
+ $ K+1 ) )+ABS( H( K+2, K+2 ) ) ) ) THEN
+*
+* ==== Starting a new bulge here would
+* . create non-negligible fill. Use
+* . the old one with trepidation. ====
+*
+ H( K+1, K ) = BETA
+ H( K+2, K ) = ZERO
+ H( K+3, K ) = ZERO
+ ELSE
+*
+* ==== Stating a new bulge here would
+* . create only negligible fill.
+* . Replace the old reflector with
+* . the new one. ====
+*
+ H( K+1, K ) = H( K+1, K ) - REFSUM
+ H( K+2, K ) = ZERO
+ H( K+3, K ) = ZERO
+ V( 1, M ) = VT( 1 )
+ V( 2, M ) = VT( 2 )
+ V( 3, M ) = VT( 3 )
+ END IF
+ END IF
+ END IF
+ 20 CONTINUE
+*
+* ==== Generate a 2-by-2 reflection, if needed. ====
+*
+ K = KRCOL + 3*( M22-1 )
+ IF( BMP22 ) THEN
+ IF( K.EQ.KTOP-1 ) THEN
+ CALL DLAQR1( 2, H( K+1, K+1 ), LDH, SR( 2*M22-1 ),
+ $ SI( 2*M22-1 ), SR( 2*M22 ), SI( 2*M22 ),
+ $ V( 1, M22 ) )
+ BETA = V( 1, M22 )
+ CALL DLARFG( 2, BETA, V( 2, M22 ), 1, V( 1, M22 ) )
+ ELSE
+ BETA = H( K+1, K )
+ V( 2, M22 ) = H( K+2, K )
+ CALL DLARFG( 2, BETA, V( 2, M22 ), 1, V( 1, M22 ) )
+ H( K+1, K ) = BETA
+ H( K+2, K ) = ZERO
+ END IF
+ END IF
+*
+* ==== Multiply H by reflections from the left ====
+*
+ IF( ACCUM ) THEN
+ JBOT = MIN( NDCOL, KBOT )
+ ELSE IF( WANTT ) THEN
+ JBOT = N
+ ELSE
+ JBOT = KBOT
+ END IF
+ DO 40 J = MAX( KTOP, KRCOL ), JBOT
+ MEND = MIN( MBOT, ( J-KRCOL+2 ) / 3 )
+
+ DO 30 M = MTOP, MEND
+
+ M1 = M -1
+
+ tempv1 = V( 1, M )
+ K = KRCOL + 2*M1
+ tempv2 = V( 2, M )
+ K = K + M1
+ tempv3 = V( 3, M )
+ temph1 = H( K+1, J )
+ temph2 = H( K+2, J )
+ temph3 = H( K+3, J )
+
+ REFSUM = tempv1*( temph1+tempv2*
+ $ temph2+tempv3*temph3 )
+
+
+ H( K+1, J ) = temph1 - REFSUM
+ H( K+2, J ) = temph2 - REFSUM*tempv2
+ H( K+3, J ) = temph3 - REFSUM*tempv3
+
+ 30 CONTINUE
+
+ 40 CONTINUE
+ IF( BMP22 ) THEN
+ K = KRCOL + 3*( M22-1 )
+ DO 50 J = MAX( K+1, KTOP ), JBOT
+ REFSUM = V( 1, M22 )*( H( K+1, J )+V( 2, M22 )*
+ $ H( K+2, J ) )
+ H( K+1, J ) = H( K+1, J ) - REFSUM
+ H( K+2, J ) = H( K+2, J ) - REFSUM*V( 2, M22 )
+ 50 CONTINUE
+ END IF
+*
+* ==== Multiply H by reflections from the right.
+* . Delay filling in the last row until the
+* . vigilant deflation check is complete. ====
+*
+ IF( ACCUM ) THEN
+ JTOP = MAX( KTOP, INCOL )
+ ELSE IF( WANTT ) THEN
+ JTOP = 1
+ ELSE
+ JTOP = KTOP
+ END IF
+ DO 90 M = MTOP, MBOT
+ IF( V( 1, M ).NE.ZERO ) THEN
+ tempv1 = V( 1, M )
+ tempv2 = V( 2, M )
+ tempv3 = V( 3, M )
+ K = KRCOL + 3*( M-1 )
+ JBEGIN = JTOP
+
+ IF ( MOD( MIN( KBOT, K+3 )-JTOP+1, 2).GT.0 ) THEN
+ J = JBEGIN
+
+ temph1 = H( J, K+1 )
+ temph2 = H( J, K+2 )
+ temph3 = H( J, K+3 )
+ REFSUM = tempv1* ( temph1+tempv2*temph2+
+ $ tempv3*temph3 )
+ H( J, K+1 ) = temph1 - REFSUM
+ H( J, K+2 ) = temph2 - REFSUM*tempv2
+ H( J, K+3 ) = temph3 - REFSUM*tempv3
+
+ JBEGIN = JBEGIN + 1
+
+ END IF
+
+
+ DO 60 J = JBEGIN, MIN( KBOT, K+3 ), 2
+
+ temph1 = H( J, K+1 )
+ temph4 = H( J+1, K+1 )
+ temph2 = H( J, K+2 )
+ temph5 = H( J+1, K+2 )
+ temph3 = H( J, K+3 )
+ temph6 = H( J+1, K+3 )
+
+ REFSUM = tempv1* ( temph1+tempv2*temph2+
+ $ tempv3*temph3 )
+
+ REFSU1 = tempv1* ( temph4+tempv2*temph5+
+ $ tempv3*temph6 )
+
+ H( J, K+1 ) = temph1 - REFSUM
+ H( J+1, K+1 ) = temph4 - REFSU1
+ H( J, K+2 ) = temph2 - REFSUM*tempv2
+ H( J+1, K+2 ) = temph5 - REFSU1*tempv2
+ H( J, K+3 ) = temph3 - REFSUM*tempv3
+ H( J+1, K+3 ) = temph6 - REFSU1*tempv3
+
+ 60 CONTINUE
+*
+ IF( ACCUM ) THEN
+*
+* ==== Accumulate U. (If necessary, update Z later
+* . with with an efficient matrix-matrix
+* . multiply.) ====
+*
+ KMS = K - INCOL
+ JBEGIN=MAX( 1, KTOP-INCOL )
+
+ IF ( MOD(KDU-JBEGIN+1,2).GT.0 ) THEN
+ J = JBEGIN
+ tempu1 = U( J, KMS+1 )
+ tempu2 = U( J, KMS+2 )
+ tempu3 = U( J, KMS+3 )
+ REFSUM = tempv1* ( tempu1+tempv2*tempu2+
+ $ tempv3*tempu3 )
+ U( J, KMS+1 ) = tempu1 - REFSUM
+ U( J, KMS+2 ) = tempu2 - REFSUM*tempv2
+ U( J, KMS+3 ) = tempu3 - REFSUM*tempv3
+ JBEGIN = JBEGIN + 1
+
+ END IF
+
+
+ DO 70 J = JBEGIN, KDU , 2
+
+ tempu1 = U( J, KMS+1 )
+ tempu4 = U( J+1, KMS+1 )
+ tempu2 = U( J, KMS+2 )
+ tempu5 = U( J+1, KMS+2 )
+ tempu3 = U( J, KMS+3 )
+ tempu6 = U( J+1, KMS+3 )
+ REFSUM = tempv1* ( tempu1+tempv2*tempu2+
+ $ tempv3*tempu3 )
+
+ REFSU1 = tempv1* ( tempu4+tempv2*tempu5+
+ $ tempv3*tempu6 )
+
+ U( J, KMS+1 ) = tempu1 - REFSUM
+ U( J+1, KMS+1 ) = tempu4 - REFSU1
+ U( J, KMS+2 ) = tempu2 - REFSUM*tempv2
+ U( J+1, KMS+2 ) = tempu5 - REFSU1*tempv2
+ U( J, KMS+3 ) = tempu3 - REFSUM*tempv3
+ U( J+1, KMS+3 ) = tempu6 - REFSU1*tempv3
+
+ 70 CONTINUE
+
+
+ ELSE IF( WANTZ ) THEN
+*
+* ==== U is not accumulated, so update Z
+* . now by multiplying by reflections
+* . from the right. ====
+*
+ JBEGIN = ILOZ
+
+ IF ( MOD(IHIZ-ILOZ+1,2).GT.0 ) THEN
+ J = JBEGIN
+
+ tempz1 = Z( J, K+1 )
+ tempz2 = Z( J, K+2 )
+ tempz3 = Z( J, K+3 )
+ REFSUM = tempv1* ( tempz1+tempv2*tempz2+
+ $ tempv3*tempz3 )
+ Z( J, K+1 ) = tempz1 - REFSUM
+ Z( J, K+2 ) = tempz2 - REFSUM*tempv2
+ Z( J, K+3 ) = tempz3 - REFSUM*tempv3
+
+ JBEGIN = JBEGIN + 1
+
+ END IF
+
+ DO 80 J = JBEGIN, IHIZ, 2
+
+ tempz1 = Z( J, K+1 )
+ tempz4 = Z( J+1, K+1 )
+ tempz2 = Z( J, K+2 )
+ tempz5 = Z( J+1, K+2 )
+ tempz3 = Z( J, K+3 )
+ tempz6 = Z( J+1, K+3 )
+
+ REFSUM = tempv1* ( tempz1+tempv2*tempz2+
+ $ tempv3*tempz3 )
+
+ REFSU1 = tempv1* ( tempz4+tempv2*tempz5+
+ $ tempv3*tempz6 )
+
+ Z( J, K+1 ) = tempz1 - REFSUM
+ Z( J, K+2 ) = tempz2 - REFSUM*tempv2
+ Z( J, K+3 ) = tempz3 - REFSUM*tempv3
+
+
+ Z( J+1, K+1 ) = tempz4 - REFSU1
+ Z( J+1, K+2 ) = tempz5 - REFSU1*tempv2
+ Z( J+1, K+3 ) = tempz6 - REFSU1*tempv3
+
+
+ 80 CONTINUE
+
+ END IF
+ END IF
+ 90 CONTINUE
+*
+* ==== Special case: 2-by-2 reflection (if needed) ====
+*
+ K = KRCOL + 3*( M22-1 )
+ IF( BMP22 ) THEN
+ IF ( V( 1, M22 ).NE.ZERO ) THEN
+ DO 100 J = JTOP, MIN( KBOT, K+3 )
+ REFSUM = V( 1, M22 )*( H( J, K+1 )+V( 2, M22 )*
+ $ H( J, K+2 ) )
+ H( J, K+1 ) = H( J, K+1 ) - REFSUM
+ H( J, K+2 ) = H( J, K+2 ) - REFSUM*V( 2, M22 )
+ 100 CONTINUE
+*
+ IF( ACCUM ) THEN
+ KMS = K - INCOL
+ DO 110 J = MAX( 1, KTOP-INCOL ), KDU
+ REFSUM = V( 1, M22 )*( U( J, KMS+1 )+
+ $ V( 2, M22 )*U( J, KMS+2 ) )
+ U( J, KMS+1 ) = U( J, KMS+1 ) - REFSUM
+ U( J, KMS+2 ) = U( J, KMS+2 ) -
+ $ REFSUM*V( 2, M22 )
+ 110 CONTINUE
+ ELSE IF( WANTZ ) THEN
+ DO 120 J = ILOZ, IHIZ
+ REFSUM = V( 1, M22 )*( Z( J, K+1 )+V( 2, M22 )*
+ $ Z( J, K+2 ) )
+ Z( J, K+1 ) = Z( J, K+1 ) - REFSUM
+ Z( J, K+2 ) = Z( J, K+2 ) - REFSUM*V( 2, M22 )
+ 120 CONTINUE
+ END IF
+ END IF
+ END IF
+*
+* ==== Vigilant deflation check ====
+*
+ MSTART = MTOP
+ IF( KRCOL+3*( MSTART-1 ).LT.KTOP )
+ $ MSTART = MSTART + 1
+ MEND = MBOT
+ IF( BMP22 )
+ $ MEND = MEND + 1
+ IF( KRCOL.EQ.KBOT-2 )
+ $ MEND = MEND + 1
+ DO 130 M = MSTART, MEND
+ K = MIN( KBOT-1, KRCOL+3*( M-1 ) )
+*
+* ==== The following convergence test requires that
+* . the tradition small-compared-to-nearby-diagonals
+* . criterion and the Ahues & Tisseur (LAWN 122, 1997)
+* . criteria both be satisfied. The latter improves
+* . accuracy in some examples. Falling back on an
+* . alternate convergence criterion when TST1 or TST2
+* . is zero (as done here) is traditional but probably
+* . unnecessary. ====
+*
+ IF( H( K+1, K ).NE.ZERO ) THEN
+ TST1 = ABS( H( K, K ) ) + ABS( H( K+1, K+1 ) )
+ IF( TST1.EQ.ZERO ) THEN
+ IF( K.GE.KTOP+1 )
+ $ TST1 = TST1 + ABS( H( K, K-1 ) )
+ IF( K.GE.KTOP+2 )
+ $ TST1 = TST1 + ABS( H( K, K-2 ) )
+ IF( K.GE.KTOP+3 )
+ $ TST1 = TST1 + ABS( H( K, K-3 ) )
+ IF( K.LE.KBOT-2 )
+ $ TST1 = TST1 + ABS( H( K+2, K+1 ) )
+ IF( K.LE.KBOT-3 )
+ $ TST1 = TST1 + ABS( H( K+3, K+1 ) )
+ IF( K.LE.KBOT-4 )
+ $ TST1 = TST1 + ABS( H( K+4, K+1 ) )
+ END IF
+ IF( ABS( H( K+1, K ) ).LE.MAX( SMLNUM, ULP*TST1 ) )
+ $ THEN
+ H12 = MAX( ABS( H( K+1, K ) ), ABS( H( K, K+1 ) ) )
+ H21 = MIN( ABS( H( K+1, K ) ), ABS( H( K, K+1 ) ) )
+ H11 = MAX( ABS( H( K+1, K+1 ) ),
+ $ ABS( H( K, K )-H( K+1, K+1 ) ) )
+ H22 = MIN( ABS( H( K+1, K+1 ) ),
+ $ ABS( H( K, K )-H( K+1, K+1 ) ) )
+ SCL = H11 + H12
+ TST2 = H22*( H11 / SCL )
+*
+ IF( TST2.EQ.ZERO .OR. H21*( H12 / SCL ).LE.
+ $ MAX( SMLNUM, ULP*TST2 ) )H( K+1, K ) = ZERO
+ END IF
+ END IF
+ 130 CONTINUE
+*
+* ==== Fill in the last row of each bulge. ====
+*
+ MEND = MIN( NBMPS, ( KBOT-KRCOL-1 ) / 3 )
+ DO 140 M = MTOP, MEND
+ K = KRCOL + 3*( M-1 )
+ REFSUM = V( 1, M )*V( 3, M )*H( K+4, K+3 )
+ H( K+4, K+1 ) = -REFSUM
+ H( K+4, K+2 ) = -REFSUM*V( 2, M )
+ H( K+4, K+3 ) = H( K+4, K+3 ) - REFSUM*V( 3, M )
+ 140 CONTINUE
+*
+* ==== End of near-the-diagonal bulge chase. ====
+*
+ 150 CONTINUE
+*
+* ==== Use U (if accumulated) to update far-from-diagonal
+* . entries in H. If required, use U to update Z as
+* . well. ====
+*
+ IF( ACCUM ) THEN
+ IF( WANTT ) THEN
+ JTOP = 1
+ JBOT = N
+ ELSE
+ JTOP = KTOP
+ JBOT = KBOT
+ END IF
+ IF( ( .NOT.BLK22 ) .OR. ( INCOL.LT.KTOP ) .OR.
+ $ ( NDCOL.GT.KBOT ) .OR. ( NS.LE.2 ) ) THEN
+*
+* ==== Updates not exploiting the 2-by-2 block
+* . structure of U. K1 and NU keep track of
+* . the location and size of U in the special
+* . cases of introducing bulges and chasing
+* . bulges off the bottom. In these special
+* . cases and in case the number of shifts
+* . is NS = 2, there is no 2-by-2 block
+* . structure to exploit. ====
+*
+ K1 = MAX( 1, KTOP-INCOL )
+ NU = ( KDU-MAX( 0, NDCOL-KBOT ) ) - K1 + 1
+*
+* ==== Horizontal Multiply ====
+*
+ DO 160 JCOL = MIN( NDCOL, KBOT ) + 1, JBOT, NH
+ JLEN = MIN( NH, JBOT-JCOL+1 )
+ CALL DGEMM( 'C', 'N', NU, JLEN, NU, ONE, U( K1, K1 ),
+ $ LDU, H( INCOL+K1, JCOL ), LDH, ZERO, WH,
+ $ LDWH )
+ CALL DLACPY( 'ALL', NU, JLEN, WH, LDWH,
+ $ H( INCOL+K1, JCOL ), LDH )
+ 160 CONTINUE
+*
+* ==== Vertical multiply ====
+*
+ DO 170 JROW = JTOP, MAX( KTOP, INCOL ) - 1, NV
+ JLEN = MIN( NV, MAX( KTOP, INCOL )-JROW )
+ CALL DGEMM( 'N', 'N', JLEN, NU, NU, ONE,
+ $ H( JROW, INCOL+K1 ), LDH, U( K1, K1 ),
+ $ LDU, ZERO, WV, LDWV )
+ CALL DLACPY( 'ALL', JLEN, NU, WV, LDWV,
+ $ H( JROW, INCOL+K1 ), LDH )
+ 170 CONTINUE
+*
+* ==== Z multiply (also vertical) ====
+*
+ IF( WANTZ ) THEN
+ DO 180 JROW = ILOZ, IHIZ, NV
+ JLEN = MIN( NV, IHIZ-JROW+1 )
+ CALL DGEMM( 'N', 'N', JLEN, NU, NU, ONE,
+ $ Z( JROW, INCOL+K1 ), LDZ, U( K1, K1 ),
+ $ LDU, ZERO, WV, LDWV )
+ CALL DLACPY( 'ALL', JLEN, NU, WV, LDWV,
+ $ Z( JROW, INCOL+K1 ), LDZ )
+ 180 CONTINUE
+ END IF
+ ELSE
+*
+* ==== Updates exploiting U's 2-by-2 block structure.
+* . (I2, I4, J2, J4 are the last rows and columns
+* . of the blocks.) ====
+*
+ I2 = ( KDU+1 ) / 2
+ I4 = KDU
+ J2 = I4 - I2
+ J4 = KDU
+*
+* ==== KZS and KNZ deal with the band of zeros
+* . along the diagonal of one of the triangular
+* . blocks. ====
+*
+ KZS = ( J4-J2 ) - ( NS+1 )
+ KNZ = NS + 1
+*
+* ==== Horizontal multiply ====
+*
+ DO 190 JCOL = MIN( NDCOL, KBOT ) + 1, JBOT, NH
+ JLEN = MIN( NH, JBOT-JCOL+1 )
+*
+* ==== Copy bottom of H to top+KZS of scratch ====
+* (The first KZS rows get multiplied by zero.) ====
+*
+ CALL DLACPY( 'ALL', KNZ, JLEN, H( INCOL+1+J2, JCOL ),
+ $ LDH, WH( KZS+1, 1 ), LDWH )
+*
+* ==== Multiply by U21**T ====
+*
+ CALL DLASET( 'ALL', KZS, JLEN, ZERO, ZERO, WH, LDWH )
+ CALL DTRMM( 'L', 'U', 'C', 'N', KNZ, JLEN, ONE,
+ $ U( J2+1, 1+KZS ), LDU, WH( KZS+1, 1 ),
+ $ LDWH )
+*
+* ==== Multiply top of H by U11**T ====
+*
+ CALL DGEMM( 'C', 'N', I2, JLEN, J2, ONE, U, LDU,
+ $ H( INCOL+1, JCOL ), LDH, ONE, WH, LDWH )
+*
+* ==== Copy top of H to bottom of WH ====
+*
+ CALL DLACPY( 'ALL', J2, JLEN, H( INCOL+1, JCOL ), LDH,
+ $ WH( I2+1, 1 ), LDWH )
+*
+* ==== Multiply by U21**T ====
+*
+ CALL DTRMM( 'L', 'L', 'C', 'N', J2, JLEN, ONE,
+ $ U( 1, I2+1 ), LDU, WH( I2+1, 1 ), LDWH )
+*
+* ==== Multiply by U22 ====
+*
+ CALL DGEMM( 'C', 'N', I4-I2, JLEN, J4-J2, ONE,
+ $ U( J2+1, I2+1 ), LDU,
+ $ H( INCOL+1+J2, JCOL ), LDH, ONE,
+ $ WH( I2+1, 1 ), LDWH )
+*
+* ==== Copy it back ====
+*
+ CALL DLACPY( 'ALL', KDU, JLEN, WH, LDWH,
+ $ H( INCOL+1, JCOL ), LDH )
+ 190 CONTINUE
+*
+* ==== Vertical multiply ====
+*
+ DO 200 JROW = JTOP, MAX( INCOL, KTOP ) - 1, NV
+ JLEN = MIN( NV, MAX( INCOL, KTOP )-JROW )
+*
+* ==== Copy right of H to scratch (the first KZS
+* . columns get multiplied by zero) ====
+*
+ CALL DLACPY( 'ALL', JLEN, KNZ, H( JROW, INCOL+1+J2 ),
+ $ LDH, WV( 1, 1+KZS ), LDWV )
+*
+* ==== Multiply by U21 ====
+*
+ CALL DLASET( 'ALL', JLEN, KZS, ZERO, ZERO, WV, LDWV )
+ CALL DTRMM( 'R', 'U', 'N', 'N', JLEN, KNZ, ONE,
+ $ U( J2+1, 1+KZS ), LDU, WV( 1, 1+KZS ),
+ $ LDWV )
+*
+* ==== Multiply by U11 ====
+*
+ CALL DGEMM( 'N', 'N', JLEN, I2, J2, ONE,
+ $ H( JROW, INCOL+1 ), LDH, U, LDU, ONE, WV,
+ $ LDWV )
+*
+* ==== Copy left of H to right of scratch ====
+*
+ CALL DLACPY( 'ALL', JLEN, J2, H( JROW, INCOL+1 ), LDH,
+ $ WV( 1, 1+I2 ), LDWV )
+*
+* ==== Multiply by U21 ====
+*
+ CALL DTRMM( 'R', 'L', 'N', 'N', JLEN, I4-I2, ONE,
+ $ U( 1, I2+1 ), LDU, WV( 1, 1+I2 ), LDWV )
+*
+* ==== Multiply by U22 ====
+*
+ CALL DGEMM( 'N', 'N', JLEN, I4-I2, J4-J2, ONE,
+ $ H( JROW, INCOL+1+J2 ), LDH,
+ $ U( J2+1, I2+1 ), LDU, ONE, WV( 1, 1+I2 ),
+ $ LDWV )
+*
+* ==== Copy it back ====
+*
+ CALL DLACPY( 'ALL', JLEN, KDU, WV, LDWV,
+ $ H( JROW, INCOL+1 ), LDH )
+ 200 CONTINUE
+*
+* ==== Multiply Z (also vertical) ====
+*
+ IF( WANTZ ) THEN
+ DO 210 JROW = ILOZ, IHIZ, NV
+ JLEN = MIN( NV, IHIZ-JROW+1 )
+*
+* ==== Copy right of Z to left of scratch (first
+* . KZS columns get multiplied by zero) ====
+*
+ CALL DLACPY( 'ALL', JLEN, KNZ,
+ $ Z( JROW, INCOL+1+J2 ), LDZ,
+ $ WV( 1, 1+KZS ), LDWV )
+*
+* ==== Multiply by U12 ====
+*
+ CALL DLASET( 'ALL', JLEN, KZS, ZERO, ZERO, WV,
+ $ LDWV )
+ CALL DTRMM( 'R', 'U', 'N', 'N', JLEN, KNZ, ONE,
+ $ U( J2+1, 1+KZS ), LDU, WV( 1, 1+KZS ),
+ $ LDWV )
+*
+* ==== Multiply by U11 ====
+*
+ CALL DGEMM( 'N', 'N', JLEN, I2, J2, ONE,
+ $ Z( JROW, INCOL+1 ), LDZ, U, LDU, ONE,
+ $ WV, LDWV )
+*
+* ==== Copy left of Z to right of scratch ====
+*
+ CALL DLACPY( 'ALL', JLEN, J2, Z( JROW, INCOL+1 ),
+ $ LDZ, WV( 1, 1+I2 ), LDWV )
+*
+* ==== Multiply by U21 ====
+*
+ CALL DTRMM( 'R', 'L', 'N', 'N', JLEN, I4-I2, ONE,
+ $ U( 1, I2+1 ), LDU, WV( 1, 1+I2 ),
+ $ LDWV )
+*
+* ==== Multiply by U22 ====
+*
+ CALL DGEMM( 'N', 'N', JLEN, I4-I2, J4-J2, ONE,
+ $ Z( JROW, INCOL+1+J2 ), LDZ,
+ $ U( J2+1, I2+1 ), LDU, ONE,
+ $ WV( 1, 1+I2 ), LDWV )
+*
+* ==== Copy the result back to Z ====
+*
+ CALL DLACPY( 'ALL', JLEN, KDU, WV, LDWV,
+ $ Z( JROW, INCOL+1 ), LDZ )
+ 210 CONTINUE
+ END IF
+ END IF
+ END IF
+ 220 CONTINUE
+*
+* ==== End of DLAQR5 ====
+*
+ END
diff --git a/kernel/power/dtrsm_logic_LT_16x4_power8.S b/kernel/power/dtrsm_logic_LT_16x4_power8.S
index 540a64062..04f5fdd90 100644
--- a/kernel/power/dtrsm_logic_LT_16x4_power8.S
+++ b/kernel/power/dtrsm_logic_LT_16x4_power8.S
@@ -54,29 +54,26 @@ DSTRM_LT_L4x16_LOOP_START:
addic. L, KK, 0
ble- DSTRM_LT_L4x16_SAVE
+ mtctr L
DSTRM_LT_L4x16_LOOP:
dcbt AO, PRE
dcbt BO, PRE
KERNEL_16x4
- addic. L, L, -1
- ble- DSTRM_LT_L4x16_SAVE
+ bdz- DSTRM_LT_L4x16_SAVE
dcbt AO, PRE
KERNEL_16x4
- addic. L, L, -1
- ble- DSTRM_LT_L4x16_SAVE
+ bdz- DSTRM_LT_L4x16_SAVE
dcbt AO, PRE
KERNEL_16x4
- addic. L, L, -1
- ble- DSTRM_LT_L4x16_SAVE
+ bdz- DSTRM_LT_L4x16_SAVE
dcbt AO, PRE
KERNEL_16x4
- addic. L, L, -1
- bgt+ DSTRM_LT_L4x16_LOOP
+ bdnz+ DSTRM_LT_L4x16_LOOP
DSTRM_LT_L4x16_SAVE:
diff --git a/kernel/power/dtrsm_macros_LT_16x4_power8.S b/kernel/power/dtrsm_macros_LT_16x4_power8.S
index 14e8402c9..dc47daa3a 100644
--- a/kernel/power/dtrsm_macros_LT_16x4_power8.S
+++ b/kernel/power/dtrsm_macros_LT_16x4_power8.S
@@ -44,10 +44,17 @@
lxvd2x vs0, o0, AO
+
+ lxvdsx vs16, o0, BO
+ lxvdsx vs17, o8, BO
+ lxvdsx vs18, o16, BO
+ lxvdsx vs19, o24, BO
+
lxvd2x vs1, o16, AO
lxvd2x vs2, o32, AO
lxvd2x vs3, o48, AO
+ addi BO, BO, 32
addi AO, AO, 64
lxvd2x vs4, o0, AO
@@ -57,13 +64,6 @@
addi AO, AO, 64
- lxvdsx vs16, o0, BO
- lxvdsx vs17, o8, BO
- lxvdsx vs18, o16, BO
- lxvdsx vs19, o24, BO
-
- addi BO, BO, 32
-
xvmaddadp vs32, vs0, vs16
xvmaddadp vs33, vs0, vs17
xvmaddadp vs34, vs0, vs18
@@ -287,52 +287,16 @@
.macro SOLVE_LT_16x4
+//############### LOAD B #######################
+
+ mr T1, BO
+ mr T4, BO
+
xxpermdi vs0, vs32, vs33, 0
xxpermdi vs1, vs34, vs35, 0
xxpermdi vs2, vs32, vs33, 3
xxpermdi vs3, vs34, vs35, 3
- xxpermdi vs4, vs36, vs37, 0
- xxpermdi vs5, vs38, vs39, 0
- xxpermdi vs6, vs36, vs37, 3
- xxpermdi vs7, vs38, vs39, 3
-
- xxpermdi vs8, vs40, vs41, 0
- xxpermdi vs9, vs42, vs43, 0
- xxpermdi vs10, vs40, vs41, 3
- xxpermdi vs11, vs42, vs43, 3
-
- xxpermdi vs12, vs44, vs45, 0
- xxpermdi vs13, vs46, vs47, 0
- xxpermdi vs14, vs44, vs45, 3
- xxpermdi vs15, vs46, vs47, 3
-
- xxpermdi vs16, vs48, vs49, 0
- xxpermdi vs17, vs50, vs51, 0
- xxpermdi vs18, vs48, vs49, 3
- xxpermdi vs19, vs50, vs51, 3
-
- xxpermdi vs20, vs52, vs53, 0
- xxpermdi vs21, vs54, vs55, 0
- xxpermdi vs22, vs52, vs53, 3
- xxpermdi vs23, vs54, vs55, 3
-
- xxpermdi vs24, vs56, vs57, 0
- xxpermdi vs25, vs58, vs59, 0
- xxpermdi vs26, vs56, vs57, 3
- xxpermdi vs27, vs58, vs59, 3
-
- xxpermdi vs28, vs60, vs61, 0
- xxpermdi vs29, vs62, vs63, 0
- xxpermdi vs30, vs60, vs61, 3
- xxpermdi vs31, vs62, vs63, 3
-
-
-//############### LOAD B #######################
-
-
- mr T1, BO
-
lxvd2x vs32, o0, T1
lxvd2x vs33, o16, T1
lxvd2x vs34, o32, T1
@@ -340,6 +304,11 @@
addi T1, T1, 64
+ xxpermdi vs4, vs36, vs37, 0
+ xxpermdi vs5, vs38, vs39, 0
+ xxpermdi vs6, vs36, vs37, 3
+ xxpermdi vs7, vs38, vs39, 3
+
lxvd2x vs36, o0, T1
lxvd2x vs37, o16, T1
lxvd2x vs38, o32, T1
@@ -347,6 +316,11 @@
addi T1, T1, 64
+ xxpermdi vs8, vs40, vs41, 0
+ xxpermdi vs9, vs42, vs43, 0
+ xxpermdi vs10, vs40, vs41, 3
+ xxpermdi vs11, vs42, vs43, 3
+
lxvd2x vs40, o0, T1
lxvd2x vs41, o16, T1
lxvd2x vs42, o32, T1
@@ -354,6 +328,11 @@
addi T1, T1, 64
+ xxpermdi vs12, vs44, vs45, 0
+ xxpermdi vs13, vs46, vs47, 0
+ xxpermdi vs14, vs44, vs45, 3
+ xxpermdi vs15, vs46, vs47, 3
+
lxvd2x vs44, o0, T1
lxvd2x vs45, o16, T1
lxvd2x vs46, o32, T1
@@ -361,6 +340,11 @@
addi T1, T1, 64
+ xxpermdi vs16, vs48, vs49, 0
+ xxpermdi vs17, vs50, vs51, 0
+ xxpermdi vs18, vs48, vs49, 3
+ xxpermdi vs19, vs50, vs51, 3
+
lxvd2x vs48, o0, T1
lxvd2x vs49, o16, T1
lxvd2x vs50, o32, T1
@@ -368,6 +352,11 @@
addi T1, T1, 64
+ xxpermdi vs20, vs52, vs53, 0
+ xxpermdi vs21, vs54, vs55, 0
+ xxpermdi vs22, vs52, vs53, 3
+ xxpermdi vs23, vs54, vs55, 3
+
lxvd2x vs52, o0, T1
lxvd2x vs53, o16, T1
lxvd2x vs54, o32, T1
@@ -375,6 +364,11 @@
addi T1, T1, 64
+ xxpermdi vs24, vs56, vs57, 0
+ xxpermdi vs25, vs58, vs59, 0
+ xxpermdi vs26, vs56, vs57, 3
+ xxpermdi vs27, vs58, vs59, 3
+
lxvd2x vs56, o0, T1
lxvd2x vs57, o16, T1
lxvd2x vs58, o32, T1
@@ -382,48 +376,27 @@
addi T1, T1, 64
+ xxpermdi vs28, vs60, vs61, 0
+ xxpermdi vs29, vs62, vs63, 0
+ xxpermdi vs30, vs60, vs61, 3
+ xxpermdi vs31, vs62, vs63, 3
+
+
+
lxvd2x vs60, o0, T1
lxvd2x vs61, o16, T1
lxvd2x vs62, o32, T1
lxvd2x vs63, o48, T1
+//############### OFFSET 0 #######################
+
+ dcbt AO, PRE
+ mr T1, AO
+
xvsubdp vs32, vs32, vs0
xvsubdp vs33, vs33, vs1
xvsubdp vs34, vs34, vs2
xvsubdp vs35, vs35, vs3
- xvsubdp vs36, vs36, vs4
- xvsubdp vs37, vs37, vs5
- xvsubdp vs38, vs38, vs6
- xvsubdp vs39, vs39, vs7
- xvsubdp vs40, vs40, vs8
- xvsubdp vs41, vs41, vs9
- xvsubdp vs42, vs42, vs10
- xvsubdp vs43, vs43, vs11
- xvsubdp vs44, vs44, vs12
- xvsubdp vs45, vs45, vs13
- xvsubdp vs46, vs46, vs14
- xvsubdp vs47, vs47, vs15
- xvsubdp vs48, vs48, vs16
- xvsubdp vs49, vs49, vs17
- xvsubdp vs50, vs50, vs18
- xvsubdp vs51, vs51, vs19
- xvsubdp vs52, vs52, vs20
- xvsubdp vs53, vs53, vs21
- xvsubdp vs54, vs54, vs22
- xvsubdp vs55, vs55, vs23
- xvsubdp vs56, vs56, vs24
- xvsubdp vs57, vs57, vs25
- xvsubdp vs58, vs58, vs26
- xvsubdp vs59, vs59, vs27
- xvsubdp vs60, vs60, vs28
- xvsubdp vs61, vs61, vs29
- xvsubdp vs62, vs62, vs30
- xvsubdp vs63, vs63, vs31
-
- mr T1, AO
-
-
-//############### OFFSET 0 #######################
lxvdsx vs0, o0, T1
lxvdsx vs1, o8, T1
@@ -432,6 +405,11 @@
addi T1, T1, 32
+ xvsubdp vs36, vs36, vs4
+ xvsubdp vs37, vs37, vs5
+ xvsubdp vs38, vs38, vs6
+ xvsubdp vs39, vs39, vs7
+
lxvdsx vs4, o0, T1
lxvdsx vs5, o8, T1
lxvdsx vs6, o16, T1
@@ -439,6 +417,11 @@
addi T1, T1, 32
+ xvsubdp vs40, vs40, vs8
+ xvsubdp vs41, vs41, vs9
+ xvsubdp vs42, vs42, vs10
+ xvsubdp vs43, vs43, vs11
+
lxvdsx vs8, o0, T1
lxvdsx vs9, o8, T1
lxvdsx vs10, o16, T1
@@ -446,6 +429,11 @@
addi T1, T1, 32
+ xvsubdp vs44, vs44, vs12
+ xvsubdp vs45, vs45, vs13
+ xvsubdp vs46, vs46, vs14
+ xvsubdp vs47, vs47, vs15
+
lxvdsx vs12, o0, T1
lxvdsx vs13, o8, T1
lxvdsx vs14, o16, T1
@@ -453,15 +441,48 @@
addi T1, T1, 32
+ xvsubdp vs48, vs48, vs16
+ xvsubdp vs49, vs49, vs17
+ xvsubdp vs50, vs50, vs18
+ xvsubdp vs51, vs51, vs19
+
+ xvsubdp vs52, vs52, vs20
+ xvsubdp vs53, vs53, vs21
+ xvsubdp vs54, vs54, vs22
+ xvsubdp vs55, vs55, vs23
+
+ xvsubdp vs56, vs56, vs24
+ xvsubdp vs57, vs57, vs25
+ xvsubdp vs58, vs58, vs26
+ xvsubdp vs59, vs59, vs27
+
+ xvsubdp vs60, vs60, vs28
+ xvsubdp vs61, vs61, vs29
+ xvsubdp vs62, vs62, vs30
+ xvsubdp vs63, vs63, vs31
+
+//############### OFFSET 1 #######################
+
+ addi T1, T1, 1*SIZE
+
xvmuldp vs32, vs32, vs0
xvmuldp vs33, vs33, vs0
xvnmsubadp vs34, vs32, vs1
xvnmsubadp vs35, vs33, vs1
xvnmsubadp vs36, vs32, vs2
+ dcbt T1, PRE
xvnmsubadp vs37, vs33, vs2
xvnmsubadp vs38, vs32, vs3
xvnmsubadp vs39, vs33, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs40, vs32, vs4
xvnmsubadp vs41, vs33, vs4
xvnmsubadp vs42, vs32, vs5
@@ -470,6 +491,14 @@
xvnmsubadp vs45, vs33, vs6
xvnmsubadp vs46, vs32, vs7
xvnmsubadp vs47, vs33, vs7
+
+ lxvdsx vs4, o0, T1
+ lxvdsx vs5, o8, T1
+ lxvdsx vs6, o16, T1
+ lxvdsx vs7, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs48, vs32, vs8
xvnmsubadp vs49, vs33, vs8
xvnmsubadp vs50, vs32, vs9
@@ -478,6 +507,14 @@
xvnmsubadp vs53, vs33, vs10
xvnmsubadp vs54, vs32, vs11
xvnmsubadp vs55, vs33, vs11
+
+ lxvdsx vs8, o0, T1
+ lxvdsx vs9, o8, T1
+ lxvdsx vs10, o16, T1
+ lxvdsx vs11, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs56, vs32, vs12
xvnmsubadp vs57, vs33, vs12
xvnmsubadp vs58, vs32, vs13
@@ -487,9 +524,27 @@
xvnmsubadp vs62, vs32, vs15
xvnmsubadp vs63, vs33, vs15
-//############### OFFSET 1 #######################
- addi T1, T1, 1*SIZE
+ lxvdsx vs12, o0, T1
+ lxvdsx vs13, o8, T1
+ lxvdsx vs14, o16, T1
+
+ addi T1, T1, 24
+
+//############### OFFSET 2 #######################
+
+ xvmuldp vs34, vs34, vs0
+ xvmuldp vs35, vs35, vs0
+
+ addi T1, T1, 2*SIZE
+
+ xvnmsubadp vs36, vs34, vs1
+ xvnmsubadp vs37, vs35, vs1
+ xvnmsubadp vs38, vs34, vs2
+ dcbt T1, PRE
+ xvnmsubadp vs39, vs35, vs2
+ xvnmsubadp vs40, vs34, vs3
+ xvnmsubadp vs41, vs35, vs3
lxvdsx vs0, o0, T1
lxvdsx vs1, o8, T1
@@ -498,35 +553,6 @@
addi T1, T1, 32
- lxvdsx vs4, o0, T1
- lxvdsx vs5, o8, T1
- lxvdsx vs6, o16, T1
- lxvdsx vs7, o24, T1
-
- addi T1, T1, 32
-
- lxvdsx vs8, o0, T1
- lxvdsx vs9, o8, T1
- lxvdsx vs10, o16, T1
- lxvdsx vs11, o24, T1
-
- addi T1, T1, 32
-
- lxvdsx vs12, o0, T1
- lxvdsx vs13, o8, T1
- lxvdsx vs14, o16, T1
-
- addi T1, T1, 24
-
- xvmuldp vs34, vs34, vs0
- xvmuldp vs35, vs35, vs0
-
- xvnmsubadp vs36, vs34, vs1
- xvnmsubadp vs37, vs35, vs1
- xvnmsubadp vs38, vs34, vs2
- xvnmsubadp vs39, vs35, vs2
- xvnmsubadp vs40, vs34, vs3
- xvnmsubadp vs41, vs35, vs3
xvnmsubadp vs42, vs34, vs4
xvnmsubadp vs43, vs35, vs4
xvnmsubadp vs44, vs34, vs5
@@ -535,6 +561,14 @@
xvnmsubadp vs47, vs35, vs6
xvnmsubadp vs48, vs34, vs7
xvnmsubadp vs49, vs35, vs7
+
+ lxvdsx vs4, o0, T1
+ lxvdsx vs5, o8, T1
+ lxvdsx vs6, o16, T1
+ lxvdsx vs7, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs50, vs34, vs8
xvnmsubadp vs51, vs35, vs8
xvnmsubadp vs52, vs34, vs9
@@ -543,30 +577,6 @@
xvnmsubadp vs55, vs35, vs10
xvnmsubadp vs56, vs34, vs11
xvnmsubadp vs57, vs35, vs11
- xvnmsubadp vs58, vs34, vs12
- xvnmsubadp vs59, vs35, vs12
- xvnmsubadp vs60, vs34, vs13
- xvnmsubadp vs61, vs35, vs13
- xvnmsubadp vs62, vs34, vs14
- xvnmsubadp vs63, vs35, vs14
-
-//############### OFFSET 2 #######################
-
- addi T1, T1, 2*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
-
- lxvdsx vs4, o0, T1
- lxvdsx vs5, o8, T1
- lxvdsx vs6, o16, T1
- lxvdsx vs7, o24, T1
-
- addi T1, T1, 32
lxvdsx vs8, o0, T1
lxvdsx vs9, o8, T1
@@ -575,20 +585,40 @@
addi T1, T1, 32
+
+ xvnmsubadp vs58, vs34, vs12
+ xvnmsubadp vs59, vs35, vs12
+ xvnmsubadp vs60, vs34, vs13
+ xvnmsubadp vs61, vs35, vs13
+ xvnmsubadp vs62, vs34, vs14
+ xvnmsubadp vs63, vs35, vs14
+
lxvdsx vs12, o0, T1
lxvdsx vs13, o8, T1
addi T1, T1, 16
+//############### OFFSET 3 #######################
xvmuldp vs36, vs36, vs0
xvmuldp vs37, vs37, vs0
+ addi T1, T1, 3*SIZE
+
xvnmsubadp vs38, vs36, vs1
xvnmsubadp vs39, vs37, vs1
xvnmsubadp vs40, vs36, vs2
+ dcbt T1, PRE
xvnmsubadp vs41, vs37, vs2
xvnmsubadp vs42, vs36, vs3
xvnmsubadp vs43, vs37, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs44, vs36, vs4
xvnmsubadp vs45, vs37, vs4
xvnmsubadp vs46, vs36, vs5
@@ -597,6 +627,14 @@
xvnmsubadp vs49, vs37, vs6
xvnmsubadp vs50, vs36, vs7
xvnmsubadp vs51, vs37, vs7
+
+ lxvdsx vs4, o0, T1
+ lxvdsx vs5, o8, T1
+ lxvdsx vs6, o16, T1
+ lxvdsx vs7, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs52, vs36, vs8
xvnmsubadp vs53, vs37, vs8
xvnmsubadp vs54, vs36, vs9
@@ -605,28 +643,6 @@
xvnmsubadp vs57, vs37, vs10
xvnmsubadp vs58, vs36, vs11
xvnmsubadp vs59, vs37, vs11
- xvnmsubadp vs60, vs36, vs12
- xvnmsubadp vs61, vs37, vs12
- xvnmsubadp vs62, vs36, vs13
- xvnmsubadp vs63, vs37, vs13
-
-//############### OFFSET 3 #######################
-
- addi T1, T1, 3*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
-
- lxvdsx vs4, o0, T1
- lxvdsx vs5, o8, T1
- lxvdsx vs6, o16, T1
- lxvdsx vs7, o24, T1
-
- addi T1, T1, 32
lxvdsx vs8, o0, T1
lxvdsx vs9, o8, T1
@@ -635,19 +651,43 @@
addi T1, T1, 32
+ xvnmsubadp vs60, vs36, vs12
+ xvnmsubadp vs61, vs37, vs12
+ xvnmsubadp vs62, vs36, vs13
+ xvnmsubadp vs63, vs37, vs13
+
lxvdsx vs12, o0, T1
+ stxvd2x vs32, o0, T4
+ stxvd2x vs33, o16, T4
+ stxvd2x vs34, o32, T4
+ stxvd2x vs35, o48, T4
+
+ addi T4, T4, 64
+
addi T1, T1, 8
+//############### OFFSET 4 #######################
xvmuldp vs38, vs38, vs0
xvmuldp vs39, vs39, vs0
+ addi T1, T1, 4*SIZE
+
xvnmsubadp vs40, vs38, vs1
xvnmsubadp vs41, vs39, vs1
xvnmsubadp vs42, vs38, vs2
+ dcbt T1, PRE
xvnmsubadp vs43, vs39, vs2
xvnmsubadp vs44, vs38, vs3
xvnmsubadp vs45, vs39, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs46, vs38, vs4
xvnmsubadp vs47, vs39, vs4
xvnmsubadp vs48, vs38, vs5
@@ -656,27 +696,6 @@
xvnmsubadp vs51, vs39, vs6
xvnmsubadp vs52, vs38, vs7
xvnmsubadp vs53, vs39, vs7
- xvnmsubadp vs54, vs38, vs8
- xvnmsubadp vs55, vs39, vs8
- xvnmsubadp vs56, vs38, vs9
- xvnmsubadp vs57, vs39, vs9
- xvnmsubadp vs58, vs38, vs10
- xvnmsubadp vs59, vs39, vs10
- xvnmsubadp vs60, vs38, vs11
- xvnmsubadp vs61, vs39, vs11
- xvnmsubadp vs62, vs38, vs12
- xvnmsubadp vs63, vs39, vs12
-
-//############### OFFSET 4 #######################
-
- addi T1, T1, 4*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
lxvdsx vs4, o0, T1
lxvdsx vs5, o8, T1
@@ -685,6 +704,16 @@
addi T1, T1, 32
+
+ xvnmsubadp vs54, vs38, vs8
+ xvnmsubadp vs55, vs39, vs8
+ xvnmsubadp vs56, vs38, vs9
+ xvnmsubadp vs57, vs39, vs9
+ xvnmsubadp vs58, vs38, vs10
+ xvnmsubadp vs59, vs39, vs10
+ xvnmsubadp vs60, vs38, vs11
+ xvnmsubadp vs61, vs39, vs11
+
lxvdsx vs8, o0, T1
lxvdsx vs9, o8, T1
lxvdsx vs10, o16, T1
@@ -692,15 +721,31 @@
addi T1, T1, 32
+ xvnmsubadp vs62, vs38, vs12
+ xvnmsubadp vs63, vs39, vs12
+
+
+//############### OFFSET 5 #######################
xvmuldp vs40, vs40, vs0
xvmuldp vs41, vs41, vs0
+ addi T1, T1, 5*SIZE
+
xvnmsubadp vs42, vs40, vs1
xvnmsubadp vs43, vs41, vs1
xvnmsubadp vs44, vs40, vs2
+ dcbt T1, PRE
xvnmsubadp vs45, vs41, vs2
xvnmsubadp vs46, vs40, vs3
xvnmsubadp vs47, vs41, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs48, vs40, vs4
xvnmsubadp vs49, vs41, vs4
xvnmsubadp vs50, vs40, vs5
@@ -709,6 +754,14 @@
xvnmsubadp vs53, vs41, vs6
xvnmsubadp vs54, vs40, vs7
xvnmsubadp vs55, vs41, vs7
+
+ lxvdsx vs4, o0, T1
+ lxvdsx vs5, o8, T1
+ lxvdsx vs6, o16, T1
+ lxvdsx vs7, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs56, vs40, vs8
xvnmsubadp vs57, vs41, vs8
xvnmsubadp vs58, vs40, vs9
@@ -718,9 +771,26 @@
xvnmsubadp vs62, vs40, vs11
xvnmsubadp vs63, vs41, vs11
-//############### OFFSET 5 #######################
- addi T1, T1, 5*SIZE
+ lxvdsx vs8, o0, T1
+ lxvdsx vs9, o8, T1
+ lxvdsx vs10, o16, T1
+
+ addi T1, T1, 24
+
+//############### OFFSET 6 #######################
+ xvmuldp vs42, vs42, vs0
+ xvmuldp vs43, vs43, vs0
+
+ addi T1, T1, 6*SIZE
+
+ xvnmsubadp vs44, vs42, vs1
+ xvnmsubadp vs45, vs43, vs1
+ xvnmsubadp vs46, vs42, vs2
+ dcbt T1, PRE
+ xvnmsubadp vs47, vs43, vs2
+ xvnmsubadp vs48, vs42, vs3
+ xvnmsubadp vs49, vs43, vs3
lxvdsx vs0, o0, T1
lxvdsx vs1, o8, T1
@@ -729,28 +799,6 @@
addi T1, T1, 32
- lxvdsx vs4, o0, T1
- lxvdsx vs5, o8, T1
- lxvdsx vs6, o16, T1
- lxvdsx vs7, o24, T1
-
- addi T1, T1, 32
-
- lxvdsx vs8, o0, T1
- lxvdsx vs9, o8, T1
- lxvdsx vs10, o16, T1
-
- addi T1, T1, 24
-
- xvmuldp vs42, vs42, vs0
- xvmuldp vs43, vs43, vs0
-
- xvnmsubadp vs44, vs42, vs1
- xvnmsubadp vs45, vs43, vs1
- xvnmsubadp vs46, vs42, vs2
- xvnmsubadp vs47, vs43, vs2
- xvnmsubadp vs48, vs42, vs3
- xvnmsubadp vs49, vs43, vs3
xvnmsubadp vs50, vs42, vs4
xvnmsubadp vs51, vs43, vs4
xvnmsubadp vs52, vs42, vs5
@@ -759,23 +807,6 @@
xvnmsubadp vs55, vs43, vs6
xvnmsubadp vs56, vs42, vs7
xvnmsubadp vs57, vs43, vs7
- xvnmsubadp vs58, vs42, vs8
- xvnmsubadp vs59, vs43, vs8
- xvnmsubadp vs60, vs42, vs9
- xvnmsubadp vs61, vs43, vs9
- xvnmsubadp vs62, vs42, vs10
- xvnmsubadp vs63, vs43, vs10
-
-//############### OFFSET 6 #######################
-
- addi T1, T1, 6*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
lxvdsx vs4, o0, T1
lxvdsx vs5, o8, T1
@@ -784,20 +815,46 @@
addi T1, T1, 32
+ xvnmsubadp vs58, vs42, vs8
+ xvnmsubadp vs59, vs43, vs8
+ xvnmsubadp vs60, vs42, vs9
+ xvnmsubadp vs61, vs43, vs9
+ xvnmsubadp vs62, vs42, vs10
+ xvnmsubadp vs63, vs43, vs10
+
lxvdsx vs8, o0, T1
lxvdsx vs9, o8, T1
addi T1, T1, 16
+ stxvd2x vs36, o0, T4
+ stxvd2x vs37, o16, T4
+ stxvd2x vs38, o32, T4
+ stxvd2x vs39, o48, T4
+
+ addi T4, T4, 64
+
+//############### OFFSET 7 #######################
xvmuldp vs44, vs44, vs0
xvmuldp vs45, vs45, vs0
+ addi T1, T1, 7*SIZE
+
xvnmsubadp vs46, vs44, vs1
xvnmsubadp vs47, vs45, vs1
xvnmsubadp vs48, vs44, vs2
+ dcbt T1, PRE
xvnmsubadp vs49, vs45, vs2
xvnmsubadp vs50, vs44, vs3
xvnmsubadp vs51, vs45, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs52, vs44, vs4
xvnmsubadp vs53, vs45, vs4
xvnmsubadp vs54, vs44, vs5
@@ -806,21 +863,6 @@
xvnmsubadp vs57, vs45, vs6
xvnmsubadp vs58, vs44, vs7
xvnmsubadp vs59, vs45, vs7
- xvnmsubadp vs60, vs44, vs8
- xvnmsubadp vs61, vs45, vs8
- xvnmsubadp vs62, vs44, vs9
- xvnmsubadp vs63, vs45, vs9
-
-//############### OFFSET 7 #######################
-
- addi T1, T1, 7*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
lxvdsx vs4, o0, T1
lxvdsx vs5, o8, T1
@@ -829,19 +871,36 @@
addi T1, T1, 32
+ xvnmsubadp vs60, vs44, vs8
+ xvnmsubadp vs61, vs45, vs8
+ xvnmsubadp vs62, vs44, vs9
+ xvnmsubadp vs63, vs45, vs9
+
lxvdsx vs8, o0, T1
addi T1, T1, 8
+//############### OFFSET 8 #######################
xvmuldp vs46, vs46, vs0
xvmuldp vs47, vs47, vs0
+ addi T1, T1, 8*SIZE
+
xvnmsubadp vs48, vs46, vs1
xvnmsubadp vs49, vs47, vs1
xvnmsubadp vs50, vs46, vs2
+ dcbt T1, PRE
xvnmsubadp vs51, vs47, vs2
xvnmsubadp vs52, vs46, vs3
xvnmsubadp vs53, vs47, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs54, vs46, vs4
xvnmsubadp vs55, vs47, vs4
xvnmsubadp vs56, vs46, vs5
@@ -850,19 +909,6 @@
xvnmsubadp vs59, vs47, vs6
xvnmsubadp vs60, vs46, vs7
xvnmsubadp vs61, vs47, vs7
- xvnmsubadp vs62, vs46, vs8
- xvnmsubadp vs63, vs47, vs8
-
-//############### OFFSET 8 #######################
-
- addi T1, T1, 8*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
lxvdsx vs4, o0, T1
lxvdsx vs5, o8, T1
@@ -871,15 +917,38 @@
addi T1, T1, 32
+ stxvd2x vs40, o0, T4
+ stxvd2x vs41, o16, T4
+ stxvd2x vs42, o32, T4
+ stxvd2x vs43, o48, T4
+
+ addi T4, T4, 64
+
+ xvnmsubadp vs62, vs46, vs8
+ xvnmsubadp vs63, vs47, vs8
+
+
+//############### OFFSET 9 #######################
xvmuldp vs48, vs48, vs0
xvmuldp vs49, vs49, vs0
+ addi T1, T1, 9*SIZE
+
xvnmsubadp vs50, vs48, vs1
xvnmsubadp vs51, vs49, vs1
xvnmsubadp vs52, vs48, vs2
+ dcbt T1, PRE
xvnmsubadp vs53, vs49, vs2
xvnmsubadp vs54, vs48, vs3
xvnmsubadp vs55, vs49, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs56, vs48, vs4
xvnmsubadp vs57, vs49, vs4
xvnmsubadp vs58, vs48, vs5
@@ -889,9 +958,25 @@
xvnmsubadp vs62, vs48, vs7
xvnmsubadp vs63, vs49, vs7
-//############### OFFSET 9 #######################
+ lxvdsx vs4, o0, T1
+ lxvdsx vs5, o8, T1
+ lxvdsx vs6, o16, T1
- addi T1, T1, 9*SIZE
+ addi T1, T1, 24
+
+//############### OFFSET 10 #######################
+ xvmuldp vs50, vs50, vs0
+ xvmuldp vs51, vs51, vs0
+
+ addi T1, T1, 10*SIZE
+
+ xvnmsubadp vs52, vs50, vs1
+ xvnmsubadp vs53, vs51, vs1
+ xvnmsubadp vs54, vs50, vs2
+ dcbt T1, PRE
+ xvnmsubadp vs55, vs51, vs2
+ xvnmsubadp vs56, vs50, vs3
+ xvnmsubadp vs57, vs51, vs3
lxvdsx vs0, o0, T1
lxvdsx vs1, o8, T1
@@ -900,21 +985,6 @@
addi T1, T1, 32
- lxvdsx vs4, o0, T1
- lxvdsx vs5, o8, T1
- lxvdsx vs6, o16, T1
-
- addi T1, T1, 24
-
- xvmuldp vs50, vs50, vs0
- xvmuldp vs51, vs51, vs0
-
- xvnmsubadp vs52, vs50, vs1
- xvnmsubadp vs53, vs51, vs1
- xvnmsubadp vs54, vs50, vs2
- xvnmsubadp vs55, vs51, vs2
- xvnmsubadp vs56, vs50, vs3
- xvnmsubadp vs57, vs51, vs3
xvnmsubadp vs58, vs50, vs4
xvnmsubadp vs59, vs51, vs4
xvnmsubadp vs60, vs50, vs5
@@ -922,66 +992,61 @@
xvnmsubadp vs62, vs50, vs6
xvnmsubadp vs63, vs51, vs6
-//############### OFFSET 10 #######################
-
- addi T1, T1, 10*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
-
lxvdsx vs4, o0, T1
lxvdsx vs5, o8, T1
addi T1, T1, 16
+ stxvd2x vs44, o0, T4
+ stxvd2x vs45, o16, T4
+ stxvd2x vs46, o32, T4
+ stxvd2x vs47, o48, T4
+
+ addi T4, T4, 64
+
+//############### OFFSET 11 #######################
xvmuldp vs52, vs52, vs0
xvmuldp vs53, vs53, vs0
+ addi T1, T1, 11*SIZE
+
xvnmsubadp vs54, vs52, vs1
xvnmsubadp vs55, vs53, vs1
xvnmsubadp vs56, vs52, vs2
+ dcbt T1, PRE
xvnmsubadp vs57, vs53, vs2
xvnmsubadp vs58, vs52, vs3
xvnmsubadp vs59, vs53, vs3
+
+ lxvdsx vs0, o0, T1
+ lxvdsx vs1, o8, T1
+ lxvdsx vs2, o16, T1
+ lxvdsx vs3, o24, T1
+
+ addi T1, T1, 32
+
xvnmsubadp vs60, vs52, vs4
xvnmsubadp vs61, vs53, vs4
xvnmsubadp vs62, vs52, vs5
xvnmsubadp vs63, vs53, vs5
-//############### OFFSET 11 #######################
-
- addi T1, T1, 11*SIZE
-
- lxvdsx vs0, o0, T1
- lxvdsx vs1, o8, T1
- lxvdsx vs2, o16, T1
- lxvdsx vs3, o24, T1
-
- addi T1, T1, 32
-
lxvdsx vs4, o0, T1
addi T1, T1, 8
+//############### OFFSET 12 #######################
xvmuldp vs54, vs54, vs0
xvmuldp vs55, vs55, vs0
+ addi T1, T1, 12*SIZE
+
xvnmsubadp vs56, vs54, vs1
xvnmsubadp vs57, vs55, vs1
xvnmsubadp vs58, vs54, vs2
+ dcbt T1, PRE
xvnmsubadp vs59, vs55, vs2
xvnmsubadp vs60, vs54, vs3
xvnmsubadp vs61, vs55, vs3
- xvnmsubadp vs62, vs54, vs4
- xvnmsubadp vs63, vs55, vs4
-
-//############### OFFSET 12 #######################
-
- addi T1, T1, 12*SIZE
lxvdsx vs0, o0, T1
lxvdsx vs1, o8, T1
@@ -990,9 +1055,23 @@
addi T1, T1, 32
+ stxvd2x vs48, o0, T4
+ stxvd2x vs49, o16, T4
+ stxvd2x vs50, o32, T4
+ stxvd2x vs51, o48, T4
+
+ addi T4, T4, 64
+
+ xvnmsubadp vs62, vs54, vs4
+ xvnmsubadp vs63, vs55, vs4
+
+
+//############### OFFSET 13 #######################
xvmuldp vs56, vs56, vs0
xvmuldp vs57, vs57, vs0
+ addi T1, T1, 13*SIZE
+
xvnmsubadp vs58, vs56, vs1
xvnmsubadp vs59, vs57, vs1
xvnmsubadp vs60, vs56, vs2
@@ -1000,43 +1079,44 @@
xvnmsubadp vs62, vs56, vs3
xvnmsubadp vs63, vs57, vs3
-//############### OFFSET 13 #######################
-
- addi T1, T1, 13*SIZE
-
lxvdsx vs0, o0, T1
lxvdsx vs1, o8, T1
lxvdsx vs2, o16, T1
addi T1, T1, 24
+//############### OFFSET 14 #######################
xvmuldp vs58, vs58, vs0
xvmuldp vs59, vs59, vs0
+ addi T1, T1, 14*SIZE
+
xvnmsubadp vs60, vs58, vs1
xvnmsubadp vs61, vs59, vs1
xvnmsubadp vs62, vs58, vs2
xvnmsubadp vs63, vs59, vs2
-//############### OFFSET 14 #######################
-
- addi T1, T1, 14*SIZE
lxvdsx vs0, o0, T1
lxvdsx vs1, o8, T1
addi T1, T1, 16
+ stxvd2x vs52, o0, T4
+ stxvd2x vs53, o16, T4
+ stxvd2x vs54, o32, T4
+ stxvd2x vs55, o48, T4
+
+ addi T4, T4, 64
+//############### OFFSET 15 #######################
xvmuldp vs60, vs60, vs0
xvmuldp vs61, vs61, vs0
+ addi T1, T1, 15*SIZE
+
xvnmsubadp vs62, vs60, vs1
xvnmsubadp vs63, vs61, vs1
-//############### OFFSET 15 #######################
-
- addi T1, T1, 15*SIZE
-
lxvdsx vs0, o0, T1
addi T1, T1, 8
@@ -1048,62 +1128,18 @@
//############### SAVE B #######################
- mr T1, BO
+ stxvd2x vs56, o0, T4
+ stxvd2x vs57, o16, T4
+ stxvd2x vs58, o32, T4
+ stxvd2x vs59, o48, T4
- stxvd2x vs32, o0, T1
- stxvd2x vs33, o16, T1
- stxvd2x vs34, o32, T1
- stxvd2x vs35, o48, T1
+ addi T4, T4, 64
- addi T1, T1, 64
-
- stxvd2x vs36, o0, T1
- stxvd2x vs37, o16, T1
- stxvd2x vs38, o32, T1
- stxvd2x vs39, o48, T1
-
- addi T1, T1, 64
-
- stxvd2x vs40, o0, T1
- stxvd2x vs41, o16, T1
- stxvd2x vs42, o32, T1
- stxvd2x vs43, o48, T1
-
- addi T1, T1, 64
-
- stxvd2x vs44, o0, T1
- stxvd2x vs45, o16, T1
- stxvd2x vs46, o32, T1
- stxvd2x vs47, o48, T1
-
- addi T1, T1, 64
-
- stxvd2x vs48, o0, T1
- stxvd2x vs49, o16, T1
- stxvd2x vs50, o32, T1
- stxvd2x vs51, o48, T1
-
- addi T1, T1, 64
-
- stxvd2x vs52, o0, T1
- stxvd2x vs53, o16, T1
- stxvd2x vs54, o32, T1
- stxvd2x vs55, o48, T1
-
- addi T1, T1, 64
-
- stxvd2x vs56, o0, T1
- stxvd2x vs57, o16, T1
- stxvd2x vs58, o32, T1
- stxvd2x vs59, o48, T1
-
- addi T1, T1, 64
-
- stxvd2x vs60, o0, T1
- stxvd2x vs61, o16, T1
- stxvd2x vs62, o32, T1
- stxvd2x vs63, o48, T1
+ stxvd2x vs60, o0, T4
+ stxvd2x vs61, o16, T4
+ stxvd2x vs62, o32, T4
+ stxvd2x vs63, o48, T4
//############### SAVE C #######################