Merge pull request #1320 from timmoon10/develop

2D thread distribution for multi-threaded GEMMs
This commit is contained in:
Martin Kroeker 2017-10-08 23:31:33 +02:00 committed by GitHub
commit db72ad8f6a
1 changed files with 196 additions and 202 deletions

View File

@ -219,15 +219,17 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
FLOAT *buffer[DIVIDE_RATE]; FLOAT *buffer[DIVIDE_RATE];
BLASLONG k, lda, ldb, ldc; BLASLONG k, lda, ldb, ldc;
BLASLONG m_from, m_to, n_from, n_to, N_from, N_to; BLASLONG m_from, m_to, n_from, n_to;
FLOAT *alpha, *beta; FLOAT *alpha, *beta;
FLOAT *a, *b, *c; FLOAT *a, *b, *c;
job_t *job = (job_t *)args -> common; job_t *job = (job_t *)args -> common;
BLASLONG xxx, bufferside;
BLASLONG ls, min_l, jjs, min_jj; BLASLONG nthreads_m;
BLASLONG is, min_i, div_n; BLASLONG mypos_m, mypos_n;
BLASLONG is, js, ls, bufferside, jjs;
BLASLONG min_i, min_l, div_n, min_jj;
BLASLONG i, current; BLASLONG i, current;
BLASLONG l1stride; BLASLONG l1stride;
@ -259,74 +261,69 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
alpha = (FLOAT *)args -> alpha; alpha = (FLOAT *)args -> alpha;
beta = (FLOAT *)args -> beta; beta = (FLOAT *)args -> beta;
/* Initialize 2D CPU distribution */
nthreads_m = args -> nthreads;
if (range_m) {
nthreads_m = range_m[-1];
}
mypos_n = blas_quickdivide(mypos, nthreads_m); /* mypos_n = mypos / nthreads_m */
mypos_m = mypos - mypos_n * nthreads_m; /* mypos_m = mypos % nthreads_m */
/* Initialize m and n */
m_from = 0; m_from = 0;
m_to = M; m_to = M;
if (range_m) { if (range_m) {
m_from = range_m[0]; m_from = range_m[mypos_m + 0];
m_to = range_m[1]; m_to = range_m[mypos_m + 1];
} }
n_from = 0; n_from = 0;
n_to = N; n_to = N;
N_from = 0;
N_to = N;
if (range_n) { if (range_n) {
n_from = range_n[mypos + 0]; n_from = range_n[mypos + 0];
n_to = range_n[mypos + 1]; n_to = range_n[mypos + 1];
N_from = range_n[0];
N_to = range_n[args -> nthreads];
} }
/* Multiply C by beta if needed */
if (beta) { if (beta) {
#ifndef COMPLEX #ifndef COMPLEX
if (beta[0] != ONE) if (beta[0] != ONE)
#else #else
if ((beta[0] != ONE) || (beta[1] != ZERO)) if ((beta[0] != ONE) || (beta[1] != ZERO))
#endif #endif
BETA_OPERATION(m_from, m_to, N_from, N_to, beta, c, ldc); BETA_OPERATION(m_from, m_to, range_n[mypos_n * nthreads_m], range_n[(mypos_n + 1) * nthreads_m], beta, c, ldc);
} }
/* Return early if no more computation is needed */
if ((k == 0) || (alpha == NULL)) return 0; if ((k == 0) || (alpha == NULL)) return 0;
if ((alpha[0] == ZERO) if ((alpha[0] == ZERO)
#ifdef COMPLEX #ifdef COMPLEX
&& (alpha[1] == ZERO) && (alpha[1] == ZERO)
#endif #endif
) return 0; ) return 0;
#if 0 /* Initialize workspace for local region of B */
fprintf(stderr, "Thread[%ld] m_from : %ld m_to : %ld n_from : %ld n_to : %ld N_from : %ld N_to : %ld\n",
mypos, m_from, m_to, n_from, n_to, N_from, N_to);
fprintf(stderr, "GEMM: P = %4ld Q = %4ld R = %4ld\n", (BLASLONG)GEMM_P, (BLASLONG)GEMM_Q, (BLASLONG)GEMM_R);
#endif
div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE; div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE;
buffer[0] = sb; buffer[0] = sb;
for (i = 1; i < DIVIDE_RATE; i++) { for (i = 1; i < DIVIDE_RATE; i++) {
buffer[i] = buffer[i - 1] + GEMM_Q * ((div_n + GEMM_UNROLL_N - 1)/GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE; buffer[i] = buffer[i - 1] + GEMM_Q * ((div_n + GEMM_UNROLL_N - 1)/GEMM_UNROLL_N) * GEMM_UNROLL_N * COMPSIZE;
} }
/* Iterate through steps of k */
for(ls = 0; ls < k; ls += min_l){ for(ls = 0; ls < k; ls += min_l){
/* Determine step size in k */
min_l = k - ls; min_l = k - ls;
if (min_l >= GEMM_Q * 2) { if (min_l >= GEMM_Q * 2) {
min_l = GEMM_Q; min_l = GEMM_Q;
} else { } else {
if (min_l > GEMM_Q) min_l = (min_l + 1) / 2; if (min_l > GEMM_Q) min_l = (min_l + 1) / 2;
} }
/* Determine step size in m
* Note: We are currently on the first step in m
*/
l1stride = 1; l1stride = 1;
min_i = m_to - m_from; min_i = m_to - m_from;
if (min_i >= GEMM_P * 2) { if (min_i >= GEMM_P * 2) {
min_i = GEMM_P; min_i = GEMM_P;
} else { } else {
@ -337,54 +334,49 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
} }
} }
/* Copy local region of A into workspace */
START_RPCC(); START_RPCC();
ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa); ICOPY_OPERATION(min_l, min_i, a, lda, ls, m_from, sa);
STOP_RPCC(copy_A); STOP_RPCC(copy_A);
/* Copy local region of B into workspace and apply kernel */
div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE; div_n = (n_to - n_from + DIVIDE_RATE - 1) / DIVIDE_RATE;
for (js = n_from, bufferside = 0; js < n_to; js += div_n, bufferside ++) {
for (xxx = n_from, bufferside = 0; xxx < n_to; xxx += div_n, bufferside ++) { /* Make sure if no one is using workspace */
START_RPCC(); START_RPCC();
/* Make sure if no one is using buffer */
for (i = 0; i < args -> nthreads; i++) for (i = 0; i < args -> nthreads; i++)
while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;}; while (job[mypos].working[i][CACHE_LINE_SIZE * bufferside]) {YIELDING;};
STOP_RPCC(waiting1); STOP_RPCC(waiting1);
#if defined(FUSED_GEMM) && !defined(TIMING) #if defined(FUSED_GEMM) && !defined(TIMING)
FUSED_KERNEL_OPERATION(min_i, MIN(n_to, xxx + div_n) - xxx, min_l, alpha, /* Fused operation to copy region of B into workspace and apply kernel */
sa, buffer[bufferside], b, ldb, c, ldc, m_from, xxx, ls); FUSED_KERNEL_OPERATION(min_i, MIN(n_to, js + div_n) - js, min_l, alpha,
sa, buffer[bufferside], b, ldb, c, ldc, m_from, js, ls);
#else #else
for(jjs = xxx; jjs < MIN(n_to, xxx + div_n); jjs += min_jj){ /* Split local region of B into parts */
min_jj = MIN(n_to, xxx + div_n) - jjs; for(jjs = js; jjs < MIN(n_to, js + div_n); jjs += min_jj){
min_jj = MIN(n_to, js + div_n) - jjs;
if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N; if (min_jj >= 3*GEMM_UNROLL_N) min_jj = 3*GEMM_UNROLL_N;
else else
if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N; if (min_jj >= 2*GEMM_UNROLL_N) min_jj = 2*GEMM_UNROLL_N;
else else
if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N; if (min_jj > GEMM_UNROLL_N) min_jj = GEMM_UNROLL_N;
/* Copy part of local region of B into workspace */
START_RPCC(); START_RPCC();
OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs, OCOPY_OPERATION(min_l, min_jj, b, ldb, ls, jjs,
buffer[bufferside] + min_l * (jjs - xxx) * COMPSIZE * l1stride); buffer[bufferside] + min_l * (jjs - js) * COMPSIZE * l1stride);
STOP_RPCC(copy_B); STOP_RPCC(copy_B);
/* Apply kernel with local region of A and part of local region of B */
START_RPCC(); START_RPCC();
KERNEL_OPERATION(min_i, min_jj, min_l, alpha, KERNEL_OPERATION(min_i, min_jj, min_l, alpha,
sa, buffer[bufferside] + min_l * (jjs - xxx) * COMPSIZE * l1stride, sa, buffer[bufferside] + min_l * (jjs - js) * COMPSIZE * l1stride,
c, ldc, m_from, jjs); c, ldc, m_from, jjs);
STOP_RPCC(kernel); STOP_RPCC(kernel);
#ifdef TIMING #ifdef TIMING
@ -394,51 +386,54 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
} }
#endif #endif
for (i = 0; i < args -> nthreads; i++) job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside]; /* Set flag so other threads can access local region of B */
for (i = mypos_n * nthreads_m; i < (mypos_n + 1) * nthreads_m; i++)
job[mypos].working[i][CACHE_LINE_SIZE * bufferside] = (BLASLONG)buffer[bufferside];
WMB; WMB;
} }
/* Get regions of B from other threads and apply kernel */
current = mypos; current = mypos;
do { do {
/* This thread accesses regions of B from threads in the range
* [ mypos_n * nthreads_m, (mypos_n+1) * nthreads_m ) */
current ++; current ++;
if (current >= args -> nthreads) current = 0; if (current >= (mypos_n + 1) * nthreads_m) current = mypos_n * nthreads_m;
/* Split other region of B into parts */
div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE; div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE;
for (js = range_n[current], bufferside = 0; js < range_n[current + 1]; js += div_n, bufferside ++) {
for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) {
if (current != mypos) { if (current != mypos) {
/* Wait until other region of B is initialized */
START_RPCC(); START_RPCC();
/* thread has to wait */
while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;}; while(job[current].working[mypos][CACHE_LINE_SIZE * bufferside] == 0) {YIELDING;};
STOP_RPCC(waiting2); STOP_RPCC(waiting2);
/* Apply kernel with local region of A and part of other region of B */
START_RPCC(); START_RPCC();
KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - js, div_n), min_l, alpha,
KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), min_l, alpha,
sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
c, ldc, m_from, xxx); c, ldc, m_from, js);
STOP_RPCC(kernel); STOP_RPCC(kernel);
#ifdef TIMING #ifdef TIMING
ops += 2 * min_i * MIN(range_n[current + 1] - xxx, div_n) * min_l; ops += 2 * min_i * MIN(range_n[current + 1] - js, div_n) * min_l;
#endif #endif
} }
/* Clear synchronization flag if this thread is done with other region of B */
if (m_to - m_from == min_i) { if (m_to - m_from == min_i) {
job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
} }
} }
} while (current != mypos); } while (current != mypos);
/* Iterate through steps of m
* Note: First step has already been finished */
for(is = m_from + min_i; is < m_to; is += min_i){ for(is = m_from + min_i; is < m_to; is += min_i){
min_i = m_to - is; min_i = m_to - is;
if (min_i >= GEMM_P * 2) { if (min_i >= GEMM_P * 2) {
min_i = GEMM_P; min_i = GEMM_P;
} else } else
@ -446,40 +441,41 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
min_i = (((min_i + 1) / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M; min_i = (((min_i + 1) / 2 + GEMM_UNROLL_M - 1)/GEMM_UNROLL_M) * GEMM_UNROLL_M;
} }
/* Copy local region of A into workspace */
START_RPCC(); START_RPCC();
ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa); ICOPY_OPERATION(min_l, min_i, a, lda, ls, is, sa);
STOP_RPCC(copy_A); STOP_RPCC(copy_A);
/* Get regions of B and apply kernel */
current = mypos; current = mypos;
do { do {
/* Split region of B into parts and apply kernel */
div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE; div_n = (range_n[current + 1] - range_n[current] + DIVIDE_RATE - 1) / DIVIDE_RATE;
for (js = range_n[current], bufferside = 0; js < range_n[current + 1]; js += div_n, bufferside ++) {
for (xxx = range_n[current], bufferside = 0; xxx < range_n[current + 1]; xxx += div_n, bufferside ++) { /* Apply kernel with local region of A and part of region of B */
START_RPCC(); START_RPCC();
KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - js, div_n), min_l, alpha,
KERNEL_OPERATION(min_i, MIN(range_n[current + 1] - xxx, div_n), min_l, alpha,
sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside], sa, (FLOAT *)job[current].working[mypos][CACHE_LINE_SIZE * bufferside],
c, ldc, is, xxx); c, ldc, is, js);
STOP_RPCC(kernel); STOP_RPCC(kernel);
#ifdef TIMING #ifdef TIMING
ops += 2 * min_i * MIN(range_n[current + 1] - xxx, div_n) * min_l; ops += 2 * min_i * MIN(range_n[current + 1] - js, div_n) * min_l;
#endif #endif
/* Clear synchronization flag if this thread is done with region of B */
if (is + min_i >= m_to) { if (is + min_i >= m_to) {
/* Thread doesn't need this buffer any more */
job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0; job[current].working[mypos][CACHE_LINE_SIZE * bufferside] &= 0;
WMB; WMB;
} }
} }
/* This thread accesses regions of B from threads in the range
* [ mypos_n * nthreads_m, (mypos_n+1) * nthreads_m ) */
current ++; current ++;
if (current >= args -> nthreads) current = 0; if (current >= (mypos_n + 1) * nthreads_m) current = mypos_n * nthreads_m;
} while (current != mypos); } while (current != mypos);
@ -487,14 +483,13 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
} }
/* Wait until all other threads are done with local region of B */
START_RPCC(); START_RPCC();
for (i = 0; i < args -> nthreads; i++) { for (i = 0; i < args -> nthreads; i++) {
for (xxx = 0; xxx < DIVIDE_RATE; xxx++) { for (js = 0; js < DIVIDE_RATE; js++) {
while (job[mypos].working[i][CACHE_LINE_SIZE * xxx] ) {YIELDING;}; while (job[mypos].working[i][CACHE_LINE_SIZE * js] ) {YIELDING;};
} }
} }
STOP_RPCC(waiting3); STOP_RPCC(waiting3);
#ifdef TIMING #ifdef TIMING
@ -507,17 +502,6 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
(double)waiting2 /(double)total * 100., (double)waiting2 /(double)total * 100.,
(double)waiting3 /(double)total * 100., (double)waiting3 /(double)total * 100.,
(double)ops/(double)kernel / 4. * 100.); (double)ops/(double)kernel / 4. * 100.);
#if 0
fprintf(stderr, "GEMM [%2ld] Copy_A : %6.2ld Copy_B : %6.2ld Wait : %6.2ld\n",
mypos, copy_A, copy_B, waiting);
fprintf(stderr, "Waiting[%2ld] %6.2f %6.2f %6.2f\n",
mypos,
(double)waiting1/(double)waiting * 100.,
(double)waiting2/(double)waiting * 100.,
(double)waiting3/(double)waiting * 100.);
#endif
fprintf(stderr, "\n"); fprintf(stderr, "\n");
#endif #endif
@ -525,7 +509,8 @@ static int inner_thread(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n,
} }
static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
*range_n, FLOAT *sa, FLOAT *sb, BLASLONG mypos){ *range_n, FLOAT *sa, FLOAT *sb,
BLASLONG nthreads_m, BLASLONG nthreads_n) {
blas_arg_t newarg; blas_arg_t newarg;
@ -537,10 +522,10 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
blas_queue_t queue[MAX_CPU_NUMBER]; blas_queue_t queue[MAX_CPU_NUMBER];
BLASLONG range_M[MAX_CPU_NUMBER + 1]; BLASLONG range_M_buffer[MAX_CPU_NUMBER + 2];
BLASLONG range_N[MAX_CPU_NUMBER + 1]; BLASLONG range_N_buffer[MAX_CPU_NUMBER + 2];
BLASLONG *range_M, *range_N;
BLASLONG num_cpu_m, num_cpu_n; BLASLONG num_parts;
BLASLONG nthreads = args -> nthreads; BLASLONG nthreads = args -> nthreads;
@ -548,6 +533,7 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
BLASLONG m, n, n_from, n_to; BLASLONG m, n, n_from, n_to;
int mode; int mode;
/* Get execution mode */
#ifndef COMPLEX #ifndef COMPLEX
#ifdef XDOUBLE #ifdef XDOUBLE
mode = BLAS_XDOUBLE | BLAS_REAL | BLAS_NODE; mode = BLAS_XDOUBLE | BLAS_REAL | BLAS_NODE;
@ -566,6 +552,16 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
#endif #endif
#endif #endif
#ifdef USE_ALLOC_HEAP
/* Dynamically allocate workspace */
job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t));
if(job==NULL){
fprintf(stderr, "OpenBLAS: malloc failed in %s\n", __func__);
exit(1);
}
#endif
/* Initialize struct for arguments */
newarg.m = args -> m; newarg.m = args -> m;
newarg.n = args -> n; newarg.n = args -> n;
newarg.k = args -> k; newarg.k = args -> k;
@ -578,23 +574,19 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
newarg.alpha = args -> alpha; newarg.alpha = args -> alpha;
newarg.beta = args -> beta; newarg.beta = args -> beta;
newarg.nthreads = args -> nthreads; newarg.nthreads = args -> nthreads;
#ifdef USE_ALLOC_HEAP
job = (job_t*)malloc(MAX_CPU_NUMBER * sizeof(job_t));
if(job==NULL){
fprintf(stderr, "OpenBLAS: malloc failed in %s\n", __func__);
exit(1);
}
#endif
newarg.common = (void *)job; newarg.common = (void *)job;
#ifdef PARAMTEST #ifdef PARAMTEST
newarg.gemm_p = args -> gemm_p; newarg.gemm_p = args -> gemm_p;
newarg.gemm_q = args -> gemm_q; newarg.gemm_q = args -> gemm_q;
newarg.gemm_r = args -> gemm_r; newarg.gemm_r = args -> gemm_r;
#endif #endif
/* Initialize partitions in m and n
* Note: The number of CPU partitions is stored in the -1 entry */
range_M = &range_M_buffer[1];
range_N = &range_N_buffer[1];
range_M[-1] = nthreads_m;
range_N[-1] = nthreads_n;
if (!range_m) { if (!range_m) {
range_M[0] = 0; range_M[0] = 0;
m = args -> m; m = args -> m;
@ -603,34 +595,35 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
m = range_m[1] - range_m[0]; m = range_m[1] - range_m[0];
} }
num_cpu_m = 0; /* Partition m into nthreads_m regions */
num_parts = 0;
while (m > 0){ while (m > 0){
width = blas_quickdivide(m + nthreads_m - num_parts - 1, nthreads_m - num_parts);
width = blas_quickdivide(m + nthreads - num_cpu_m - 1, nthreads - num_cpu_m);
m -= width; m -= width;
if (m < 0) width = width + m; if (m < 0) width = width + m;
range_M[num_parts + 1] = range_M[num_parts] + width;
range_M[num_cpu_m + 1] = range_M[num_cpu_m] + width; num_parts ++;
}
num_cpu_m ++; for (i = num_parts; i < MAX_CPU_NUMBER; i++) {
range_M[i + 1] = range_M[num_parts];
} }
for (i = 0; i < num_cpu_m; i++) { /* Initialize parameters for parallel execution */
for (i = 0; i < nthreads; i++) {
queue[i].mode = mode; queue[i].mode = mode;
queue[i].routine = inner_thread; queue[i].routine = inner_thread;
queue[i].args = &newarg; queue[i].args = &newarg;
queue[i].range_m = &range_M[i]; queue[i].range_m = range_M;
queue[i].range_n = &range_N[0]; queue[i].range_n = range_N;
queue[i].sa = NULL; queue[i].sa = NULL;
queue[i].sb = NULL; queue[i].sb = NULL;
queue[i].next = &queue[i + 1]; queue[i].next = &queue[i + 1];
} }
queue[0].sa = sa; queue[0].sa = sa;
queue[0].sb = sb; queue[0].sb = sb;
queue[nthreads - 1].next = NULL;
/* Iterate through steps of n */
if (!range_n) { if (!range_n) {
n_from = 0; n_from = 0;
n_to = args -> n; n_to = args -> n;
@ -638,38 +631,38 @@ static int gemm_driver(blas_arg_t *args, BLASLONG *range_m, BLASLONG
n_from = range_n[0]; n_from = range_n[0];
n_to = range_n[1]; n_to = range_n[1];
} }
for(js = n_from; js < n_to; js += GEMM_R * nthreads){ for(js = n_from; js < n_to; js += GEMM_R * nthreads){
n = n_to - js; n = n_to - js;
if (n > GEMM_R * nthreads) n = GEMM_R * nthreads; if (n > GEMM_R * nthreads) n = GEMM_R * nthreads;
/* Partition (a step of) n into nthreads regions */
range_N[0] = js; range_N[0] = js;
num_parts = 0;
num_cpu_n = 0;
while (n > 0){ while (n > 0){
width = blas_quickdivide(n + nthreads - num_parts - 1, nthreads - num_parts);
width = blas_quickdivide(n + nthreads - num_cpu_n - 1, nthreads - num_cpu_n); if (width < SWITCH_RATIO) {
width = SWITCH_RATIO;
}
n -= width; n -= width;
if (n < 0) width = width + n; if (n < 0) width = width + n;
range_N[num_parts + 1] = range_N[num_parts] + width;
range_N[num_cpu_n + 1] = range_N[num_cpu_n] + width; num_parts ++;
}
num_cpu_n ++; for (j = num_parts; j < MAX_CPU_NUMBER; j++) {
range_N[j + 1] = range_N[num_parts];
} }
for (j = 0; j < num_cpu_m; j++) { /* Clear synchronization flags */
for (i = 0; i < num_cpu_m; i++) { for (i = 0; i < MAX_CPU_NUMBER; i++) {
for (j = 0; j < MAX_CPU_NUMBER; j++) {
for (k = 0; k < DIVIDE_RATE; k++) { for (k = 0; k < DIVIDE_RATE; k++) {
job[j].working[i][CACHE_LINE_SIZE * k] = 0; job[i].working[j][CACHE_LINE_SIZE * k] = 0;
} }
} }
} }
queue[num_cpu_m - 1].next = NULL; /* Execute parallel computation */
exec_blas(nthreads, queue);
exec_blas(num_cpu_m, queue);
} }
#ifdef USE_ALLOC_HEAP #ifdef USE_ALLOC_HEAP
@ -683,42 +676,43 @@ int CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLO
BLASLONG m = args -> m; BLASLONG m = args -> m;
BLASLONG n = args -> n; BLASLONG n = args -> n;
BLASLONG nthreads = args -> nthreads; BLASLONG nthreads_m, nthreads_n;
if (nthreads == 1) {
GEMM_LOCAL(args, range_m, range_n, sa, sb, 0);
return 0;
}
/* Get dimensions from index ranges if available */
if (range_m) { if (range_m) {
BLASLONG m_from = *(((BLASLONG *)range_m) + 0); m = range_m[1] - range_m[0];
BLASLONG m_to = *(((BLASLONG *)range_m) + 1);
m = m_to - m_from;
} }
if (range_n) { if (range_n) {
BLASLONG n_from = *(((BLASLONG *)range_n) + 0); n = range_n[1] - range_n[0];
BLASLONG n_to = *(((BLASLONG *)range_n) + 1);
n = n_to - n_from;
} }
if ((m < 2 * SWITCH_RATIO) || (n < 2 * SWITCH_RATIO)) { /* Partitions in m should have at least SWITCH_RATIO rows */
if (m < 2 * SWITCH_RATIO) {
nthreads_m = 1;
} else {
nthreads_m = args -> nthreads;
while (m < nthreads_m * SWITCH_RATIO) {
nthreads_m = nthreads_m / 2;
}
}
/* Partitions in n should have at most SWITCH_RATIO * nthreads_m columns */
if (n < SWITCH_RATIO * nthreads_m) {
nthreads_n = 1;
} else {
nthreads_n = (n + SWITCH_RATIO * nthreads_m - 1) / (SWITCH_RATIO * nthreads_m);
if (nthreads_m * nthreads_n > args -> nthreads) {
nthreads_n = blas_quickdivide(args -> nthreads, nthreads_m);
}
}
/* Execute serial or parallel computation */
if (nthreads_m * nthreads_n <= 1) {
GEMM_LOCAL(args, range_m, range_n, sa, sb, 0); GEMM_LOCAL(args, range_m, range_n, sa, sb, 0);
return 0; } else {
args -> nthreads = nthreads_m * nthreads_n;
gemm_driver(args, range_m, range_n, sa, sb, nthreads_m, nthreads_n);
} }
if (m < nthreads * SWITCH_RATIO) {
nthreads = blas_quickdivide(m, SWITCH_RATIO);
}
if (n < nthreads * SWITCH_RATIO) {
nthreads = blas_quickdivide(n, SWITCH_RATIO);
}
args -> nthreads = nthreads;
gemm_driver(args, range_m, range_n, sa, sb, 0);
return 0; return 0;
} }