fix #4063 threading perf issues on Windows

This commit is contained in:
Mark Seminatore 2023-06-23 19:42:27 -07:00
parent 641454804f
commit d301649430
1 changed files with 40 additions and 59 deletions

View File

@ -52,9 +52,7 @@
/* Thread server common information */
typedef struct{
CRITICAL_SECTION lock;
HANDLE filled;
HANDLE killed;
HANDLE taskSemaphore;
blas_queue_t *queue; /* Parameter Pointer */
int shutdown; /* server shutdown flag */
@ -68,6 +66,7 @@ int blas_server_avail = 0;
static BLASULONG server_lock = 0;
static blas_pool_t pool;
static BLASULONG pool_lock = 0;
static HANDLE blas_threads [MAX_CPU_NUMBER];
static DWORD blas_threads_id[MAX_CPU_NUMBER];
@ -198,7 +197,6 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
/* This is a main routine of threads. Each thread waits until job is */
/* queued. */
static DWORD WINAPI blas_thread_server(void *arg){
/* Thread identifier */
@ -207,9 +205,7 @@ static DWORD WINAPI blas_thread_server(void *arg){
#endif
void *buffer, *sa, *sb;
blas_queue_t *queue;
DWORD action;
HANDLE handles[] = {pool.filled, pool.killed};
volatile blas_queue_t *queue;
/* Each server needs each buffer */
buffer = blas_memory_alloc(2);
@ -226,28 +222,32 @@ static DWORD WINAPI blas_thread_server(void *arg){
fprintf(STDERR, "Server[%2ld] Waiting for Queue.\n", cpu);
#endif
do {
action = WaitForMultipleObjects(2, handles, FALSE, INFINITE);
} while ((action != WAIT_OBJECT_0) && (action != WAIT_OBJECT_0 + 1));
// all worker threads wait on the semaphore
WaitForSingleObject(pool.taskSemaphore, INFINITE);
if (action == WAIT_OBJECT_0 + 1) break;
// kill the thread if we are shutting down the server
if (pool.shutdown)
break;
#ifdef SMP_DEBUG
fprintf(STDERR, "Server[%2ld] Got it.\n", cpu);
#endif
EnterCriticalSection(&pool.lock);
// grab a queued task and update the list
volatile blas_queue_t* queue_next;
LONG64 prev_value;
do {
queue = (volatile blas_queue_t*)pool.queue;
if (!queue)
break;
queue = pool.queue;
if (queue) pool.queue = queue->next;
LeaveCriticalSection(&pool.lock);
queue_next = (volatile blas_queue_t*)queue->next;
prev_value = InterlockedCompareExchange64((PLONG64)&pool.queue, (LONG64)queue_next, (LONG64)queue);
} while (prev_value != queue);
if (queue) {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
if (pool.queue) SetEvent(pool.filled);
sa = queue -> sa;
sb = queue -> sb;
@ -332,13 +332,8 @@ static DWORD WINAPI blas_thread_server(void *arg){
fprintf(STDERR, "Server[%2ld] Finished!\n", cpu);
#endif
EnterCriticalSection(&queue->lock);
queue -> status = BLAS_STATUS_FINISHED;
LeaveCriticalSection(&queue->lock);
SetEvent(queue->finish);
// mark our sub-task as complete
InterlockedDecrement(&queue->status);
}
/* Shutdown procedure */
@ -353,7 +348,7 @@ static DWORD WINAPI blas_thread_server(void *arg){
}
/* Initializing routine */
int blas_thread_init(void){
int blas_thread_init(void){
BLASLONG i;
if (blas_server_avail || (blas_cpu_number <= 1)) return 0;
@ -367,9 +362,7 @@ int blas_thread_init(void){
if (!blas_server_avail){
InitializeCriticalSection(&pool.lock);
pool.filled = CreateEvent(NULL, FALSE, FALSE, NULL);
pool.killed = CreateEvent(NULL, TRUE, FALSE, NULL);
pool.taskSemaphore = CreateSemaphore(NULL, 0, blas_cpu_number - 1, NULL);
pool.shutdown = 0;
pool.queue = NULL;
@ -391,11 +384,10 @@ int blas_thread_init(void){
/*
User can call one of two routines.
exec_blas_async ... immediately returns after jobs are queued.
exec_blas_async ... immediately returns after jobs are queued.
exec_blas ... returns after jobs are finished.
exec_blas ... returns after jobs are finished.
*/
int exec_blas_async(BLASLONG pos, blas_queue_t *queue){
#if defined(SMP_SERVER)
@ -409,8 +401,7 @@ int exec_blas_async(BLASLONG pos, blas_queue_t *queue){
current = queue;
while (current) {
InitializeCriticalSection(&current -> lock);
current -> finish = CreateEvent(NULL, FALSE, FALSE, NULL);
current->status = 1;
current -> position = pos;
#ifdef CONSISTENT_FPCSR
@ -422,19 +413,10 @@ int exec_blas_async(BLASLONG pos, blas_queue_t *queue){
pos ++;
}
EnterCriticalSection(&pool.lock);
pool.queue = queue;
if (pool.queue) {
current = pool.queue;
while (current -> next) current = current -> next;
current -> next = queue;
} else {
pool.queue = queue;
}
LeaveCriticalSection(&pool.lock);
SetEvent(pool.filled);
// start up worker threads
ReleaseSemaphore(pool.taskSemaphore, pos - 1, NULL);
return 0;
}
@ -450,10 +432,9 @@ int exec_blas_async_wait(BLASLONG num, blas_queue_t *queue){
fprintf(STDERR, "Waiting Queue ..\n");
#endif
WaitForSingleObject(queue->finish, INFINITE);
CloseHandle(queue->finish);
DeleteCriticalSection(&queue -> lock);
// spin-wait on each sub-task to finish
while (*((volatile int*)&queue->status))
YIELDING;
queue = queue -> next;
num --;
@ -501,18 +482,21 @@ int exec_blas(BLASLONG num, blas_queue_t *queue){
/* Shutdown procedure, but user don't have to call this routine. The */
/* kernel automatically kill threads. */
int BLASFUNC(blas_thread_shutdown)(void){
int i;
#ifdef SMP_DEBUG
fprintf(STDERR, "blas_thread_shutdown..\n");
#endif
if (!blas_server_avail) return 0;
LOCK_COMMAND(&server_lock);
if (blas_server_avail){
SetEvent(pool.killed);
pool.shutdown = 1;
for(i = 0; i < blas_num_threads - 1; i++){
// Could also just use WaitForMultipleObjects
@ -528,8 +512,7 @@ int BLASFUNC(blas_thread_shutdown)(void){
CloseHandle(blas_threads[i]);
}
CloseHandle(pool.filled);
CloseHandle(pool.killed);
CloseHandle(pool.taskSemaphore);
blas_server_avail = 0;
}
@ -559,16 +542,14 @@ void goto_set_num_threads(int num_threads)
//increased_threads = 1;
if (!blas_server_avail){
InitializeCriticalSection(&pool.lock);
pool.filled = CreateEvent(NULL, FALSE, FALSE, NULL);
pool.killed = CreateEvent(NULL, TRUE, FALSE, NULL);
pool.taskSemaphore = CreateSemaphore(NULL, 0, blas_cpu_number - 1, NULL);
pool.shutdown = 0;
pool.queue = NULL;
blas_server_avail = 1;
}
for(i = blas_num_threads - 1; i < num_threads - 1; i++){
for(i = blas_num_threads; i < num_threads - 1; i++){
blas_threads[i] = CreateThread(NULL, 0,
blas_thread_server, (void *)i,