diff --git a/kernel/power/sbgemv_common.c b/kernel/power/sbgemv_common.c index c9438b7e6..ad040b371 100644 --- a/kernel/power/sbgemv_common.c +++ b/kernel/power/sbgemv_common.c @@ -111,13 +111,6 @@ FORCEINLINE vec_f32 vec_loadNHi_mult(vec_bf16 *in, vec_f32 v_inp0, BLASLONG n, v return (v_inp0 * v_in00); } -FORCEINLINE vec_f32 vec_loadNHi_mult2(vec_f32 v_x0, vec_bf16 *in, BLASLONG n, vec_bf16 zero) -{ - vec_f32 v_in00 = vec_loadNHi(in, n, zero); - - return (v_x0 * v_in00); -} - FORCEINLINE vec_f32 vec_loadNHi_vec(vec_bf16 *in, BLASLONG i, BLASLONG n, vec_bf16 zero) { return vec_loadNHi(&in[i], n, zero); diff --git a/kernel/power/sbgemv_n_vsx.c b/kernel/power/sbgemv_n_vsx.c index e8f6dca9f..390a87359 100644 --- a/kernel/power/sbgemv_n_vsx.c +++ b/kernel/power/sbgemv_n_vsx.c @@ -80,7 +80,7 @@ static void BF16GEMV_N_VSX_1(BLASLONG n, IFLOAT **ap, IFLOAT *xo, FLOAT *y, FLOA } else if (n) { vy0[0] = vec_loadN_f32(&v_y[(i * 2) + 0], n); - vy0[0] += vec_loadNHi_mult2(v_x0, &va0[i], n, zero); + vy0[0] += vec_loadNHi_mult(&va0[i], v_x0, n, zero); vec_storeN_f32(vy0[0], &v_y[(i * 2) + 0], n); } @@ -131,8 +131,8 @@ static void BF16GEMV_N_VSX_2(BLASLONG n, IFLOAT **ap, IFLOAT *xo, FLOAT *y, FLOA } else if (n) { vy0[0] = vec_loadN_f32(&v_y[(i * 2) + 0], n); - vy0[0] += vec_loadNHi_mult2(v_x0, &va0[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x1, &va1[i], n, zero); + vy0[0] += vec_loadNHi_mult(&va0[i], v_x0, n, zero); + vy0[0] += vec_loadNHi_mult(&va1[i], v_x1, n, zero); vec_storeN_f32(vy0[0], &v_y[(i * 2) + 0], n); } @@ -193,10 +193,10 @@ static void BF16GEMV_N_VSX_4(BLASLONG n, IFLOAT **ap, IFLOAT *xo, FLOAT *y, FLOA } else if (n) { vy0[0] = vec_loadN_f32(&v_y[(i * 2) + 0], n); - vy0[0] += vec_loadNHi_mult2(v_x0, &va0[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x1, &va1[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x2, &va2[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x3, &va3[i], n, zero); + vy0[0] += vec_loadNHi_mult(&va0[i], v_x0, n, zero); + vy0[0] += vec_loadNHi_mult(&va1[i], v_x1, n, zero); + vy0[0] += vec_loadNHi_mult(&va2[i], v_x2, n, zero); + vy0[0] += vec_loadNHi_mult(&va3[i], v_x3, n, zero); vec_storeN_f32(vy0[0], &v_y[(i * 2) + 0], n); } @@ -281,14 +281,14 @@ static void BF16GEMV_N_VSX_8(BLASLONG n, IFLOAT **ap, IFLOAT *xo, FLOAT *y, BLAS } else if (n) { vy0[0] = vec_loadN_f32(&v_y[(i * 2) + 0], n); - vy0[0] += vec_loadNHi_mult2(v_x0, &va0[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x1, &va1[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x2, &va2[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x3, &va3[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x4, &vb0[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x5, &vb1[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x6, &vb2[i], n, zero); - vy0[0] += vec_loadNHi_mult2(v_x7, &vb3[i], n, zero); + vy0[0] += vec_loadNHi_mult(&va0[i], v_x0, n, zero); + vy0[0] += vec_loadNHi_mult(&va1[i], v_x1, n, zero); + vy0[0] += vec_loadNHi_mult(&va2[i], v_x2, n, zero); + vy0[0] += vec_loadNHi_mult(&va3[i], v_x3, n, zero); + vy0[0] += vec_loadNHi_mult(&vb0[i], v_x4, n, zero); + vy0[0] += vec_loadNHi_mult(&vb1[i], v_x5, n, zero); + vy0[0] += vec_loadNHi_mult(&vb2[i], v_x6, n, zero); + vy0[0] += vec_loadNHi_mult(&vb3[i], v_x7, n, zero); vec_storeN_f32(vy0[0], &v_y[(i * 2) + 0], n); }