Merge pull request #4835 from martin-frbg/revertwin4359

Temporarily revert to the coarse-grained locking in the Windows thread server
This commit is contained in:
Martin Kroeker 2024-08-07 14:09:32 +02:00 committed by GitHub
commit 753c7ebe17
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 591 additions and 666 deletions

View File

@ -111,8 +111,8 @@ typedef struct blas_queue {
struct blas_queue *next; struct blas_queue *next;
#if defined( __WIN32__) || defined(__CYGWIN32__) || defined(_WIN32) || defined(__CYGWIN__) #if defined( __WIN32__) || defined(__CYGWIN32__) || defined(_WIN32) || defined(__CYGWIN__)
// CRITICAL_SECTION lock; CRITICAL_SECTION lock;
// HANDLE finish; HANDLE finish;
volatile int finished; volatile int finished;
#else #else
pthread_mutex_t lock; pthread_mutex_t lock;

View File

@ -1,4 +1,3 @@
/*********************************************************************/ /*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */ /* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */ /* All rights reserved. */
@ -49,41 +48,31 @@
#endif #endif
#endif #endif
#ifdef SMP_DEBUG
# define MT_TRACE(...) fprintf(stderr, __VA_ARGS__)
#else
# define MT_TRACE(...)
#endif
/* This is a thread implementation for Win32 lazy implementation */ /* This is a thread implementation for Win32 lazy implementation */
/* Thread server common information */ /* Thread server common information */
typedef struct{
CRITICAL_SECTION lock;
HANDLE filled;
HANDLE killed;
static blas_queue_t *work_queue = NULL; blas_queue_t *queue; /* Parameter Pointer */
static HANDLE kickoff_event = NULL; int shutdown; /* server shutdown flag */
static CRITICAL_SECTION queue_lock;
} blas_pool_t;
/* We need this global for checking if initialization is finished. */ /* We need this global for checking if initialization is finished. */
int blas_server_avail = 0; int blas_server_avail = 0;
int blas_omp_threads_local = 1; int blas_omp_threads_local = 1;
static void * blas_thread_buffer[MAX_CPU_NUMBER];
/* Local Variables */ /* Local Variables */
static BLASULONG server_lock = 0; static BLASULONG server_lock = 0;
static blas_pool_t pool;
static HANDLE blas_threads [MAX_CPU_NUMBER]; static HANDLE blas_threads [MAX_CPU_NUMBER];
static DWORD blas_threads_id[MAX_CPU_NUMBER]; static DWORD blas_threads_id[MAX_CPU_NUMBER];
static volatile int thread_target; // target num of live threads, volatile for cross-thread reads
//Prototypes
static void exec_threads(int , blas_queue_t *, int);
static void adjust_thread_buffers();
//
// Legacy code path
//
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
if (!(mode & BLAS_COMPLEX)){ if (!(mode & BLAS_COMPLEX)){
@ -207,395 +196,70 @@ static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb) {
} }
} }
// /* This is a main routine of threads. Each thread waits until job is */
// This is a main routine of threads. Each thread waits until job is queued. /* queued. */
//
static DWORD WINAPI blas_thread_server(void *arg){ static DWORD WINAPI blas_thread_server(void *arg){
/* Thread identifier */ /* Thread identifier */
#ifdef SMP_DEBUG
BLASLONG cpu = (BLASLONG)arg; BLASLONG cpu = (BLASLONG)arg;
#endif
void *buffer, *sa, *sb;
blas_queue_t *queue; blas_queue_t *queue;
DWORD action;
HANDLE handles[] = {pool.filled, pool.killed};
MT_TRACE("Server[%2ld] Thread is started!\n", cpu); /* Each server needs each buffer */
buffer = blas_memory_alloc(2);
#ifdef SMP_DEBUG
fprintf(STDERR, "Server[%2ld] Thread is started!\n", cpu);
#endif
while (1){ while (1){
/* Waiting for Queue */ /* Waiting for Queue */
MT_TRACE("Server[%2ld] Waiting for Queue.\n", cpu); #ifdef SMP_DEBUG
fprintf(STDERR, "Server[%2ld] Waiting for Queue.\n", cpu);
#endif
// event raised when work is added to the queue do {
WaitForSingleObject(kickoff_event, INFINITE); action = WaitForMultipleObjects(2, handles, FALSE, INFINITE);
} while ((action != WAIT_OBJECT_0) && (action != WAIT_OBJECT_0 + 1));
if (cpu > thread_target - 2) { if (action == WAIT_OBJECT_0 + 1) break;
//MT_TRACE("thread [%d] exiting.\n", cpu);
break; // excess thread, so worker thread exits
}
MT_TRACE("Server[%2ld] Got it.\n", cpu); #ifdef SMP_DEBUG
fprintf(STDERR, "Server[%2ld] Got it.\n", cpu);
#endif
EnterCriticalSection(&queue_lock); EnterCriticalSection(&pool.lock);
queue = work_queue; queue = pool.queue;
if (queue) if (queue) pool.queue = queue->next;
work_queue = work_queue->next;
LeaveCriticalSection(&queue_lock); LeaveCriticalSection(&pool.lock);
if (queue) { if (queue) {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
exec_threads(cpu, queue, 0); if (pool.queue) SetEvent(pool.filled);
} else {
continue; //if queue == NULL
}
MT_TRACE("Server[%2ld] Finished!\n", cpu);
queue->finished = 1;
}
/* Shutdown procedure */
MT_TRACE("Server[%2ld] Shutdown!\n", cpu);
return 0;
}
//
// Initializing routine
//
int blas_thread_init(void) {
BLASLONG i;
if (blas_server_avail || (blas_cpu_number <= 1)) return 0;
LOCK_COMMAND(&server_lock);
adjust_thread_buffers();
MT_TRACE("Initializing Thread(Num. threads = %d)\n", blas_cpu_number);
if (!blas_server_avail) {
// create the kickoff Event
kickoff_event = CreateEvent(NULL, TRUE, FALSE, NULL);
thread_target = blas_cpu_number;
InitializeCriticalSection(&queue_lock);
for(i = 0; i < blas_cpu_number - 1; i++) {
//MT_TRACE("thread_init: creating thread [%d]\n", i);
blas_threads[i] = CreateThread(NULL, 0,
blas_thread_server, (void *)i,
0, &blas_threads_id[i]);
}
blas_server_avail = 1;
}
UNLOCK_COMMAND(&server_lock);
return 0;
}
//
// User can call one of two routines.
// exec_blas_async ... immediately returns after jobs are queued.
// exec_blas ... returns after jobs are finished.
//
int exec_blas_async(BLASLONG pos, blas_queue_t *queue) {
#if defined(SMP_SERVER)
// Handle lazy re-init of the thread-pool after a POSIX fork
// on Cygwin or as delayed init when a static library is used
if (unlikely(blas_server_avail == 0)) blas_thread_init();
#endif
blas_queue_t *current;
current = queue;
while (current) {
current -> position = pos;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("fnstcw %0" : "=m" (current -> x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (current -> sse_mode));
#endif
current->finished = 0;
current = current -> next;
pos ++;
}
EnterCriticalSection(&queue_lock);
if (!work_queue)
{
work_queue = queue;
}
else
{
blas_queue_t *queue_item = work_queue;
// find the end of the work queue
while (queue_item->next)
queue_item = queue_item->next;
// add new work to the end
queue_item->next = queue;
}
LeaveCriticalSection(&queue_lock);
SetEvent(kickoff_event);
return 0;
}
//
// Join. Wait for all queued tasks to complete
//
int exec_blas_async_wait(BLASLONG num, blas_queue_t *queue) {
MT_TRACE("Synchronization Waiting.\n");
while (num) {
MT_TRACE("Waiting Queue ..\n");
while (!queue->finished)
YIELDING;
queue = queue->next;
num--;
}
MT_TRACE("Completely Done.\n\n");
// if work was added to the queue after this batch we can't sleep the worker threads
// by resetting the event
EnterCriticalSection(&queue_lock);
if (work_queue == NULL)
ResetEvent(kickoff_event);
LeaveCriticalSection(&queue_lock);
return 0;
}
//
// Execute Threads
//
int exec_blas(BLASLONG num, blas_queue_t *queue) {
#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT)
// Handle lazy re-init of the thread-pool after a POSIX fork
if (unlikely(blas_server_avail == 0)) blas_thread_init();
#endif
#ifndef ALL_THREADED
int (*routine)(blas_arg_t *, void *, void *, double *, double *, BLASLONG);
#endif
if ((num <= 0) || (queue == NULL)) return 0;
//Redirect to caller's callback routine
if (openblas_threads_callback_) {
int buf_index = 0, i = 0;
#ifndef USE_SIMPLE_THREADED_LEVEL3
for (i = 0; i < num; i ++)
queue[i].position = i;
#endif
openblas_threads_callback_(1, (openblas_dojob_callback) exec_threads, num, sizeof(blas_queue_t), (void*) queue, buf_index);
return 0;
}
if ((num > 1) && queue -> next)
exec_blas_async(1, queue -> next);
routine = queue -> routine;
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(routine, queue -> mode, queue -> args, queue -> sb);
} else {
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else
(routine)(queue -> args, queue -> range_m, queue -> range_n,
queue -> sa, queue -> sb, 0);
}
if ((num > 1) && queue -> next)
exec_blas_async_wait(num - 1, queue -> next);
return 0;
}
//
// Shutdown procedure, but user don't have to call this routine. The
// kernel automatically kill threads.
//
int BLASFUNC(blas_thread_shutdown)(void) {
int i;
if (!blas_server_avail) return 0;
LOCK_COMMAND(&server_lock);
//Free buffers allocated for threads
for(i=0; i<MAX_CPU_NUMBER; i++){
if(blas_thread_buffer[i]!=NULL){
blas_memory_free(blas_thread_buffer[i]);
blas_thread_buffer[i]=NULL;
}
}
if (blas_server_avail) {
for (i = 0; i < blas_num_threads - 1; i++) {
// Could also just use WaitForMultipleObjects
DWORD wait_thread_value = WaitForSingleObject(blas_threads[i], 50);
#ifndef OS_WINDOWSSTORE
// TerminateThread is only available with WINAPI_DESKTOP and WINAPI_SYSTEM not WINAPI_APP in UWP
if (WAIT_OBJECT_0 != wait_thread_value) {
TerminateThread(blas_threads[i],0);
}
#endif
CloseHandle(blas_threads[i]);
}
blas_server_avail = 0;
}
UNLOCK_COMMAND(&server_lock);
return 0;
}
//
// Legacy function to set numbef of threads
//
void goto_set_num_threads(int num_threads)
{
long i;
#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT)
// Handle lazy re-init of the thread-pool after a POSIX fork
if (unlikely(blas_server_avail == 0)) blas_thread_init();
#endif
if (num_threads < 1) num_threads = blas_cpu_number;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (blas_server_avail && num_threads < blas_num_threads) {
LOCK_COMMAND(&server_lock);
thread_target = num_threads;
SetEvent(kickoff_event);
for (i = num_threads - 1; i < blas_num_threads - 1; i++) {
//MT_TRACE("set_num_threads: waiting on thread [%d] to quit.\n", i);
WaitForSingleObject(blas_threads[i], INFINITE);
//MT_TRACE("set_num_threads: thread [%d] has quit.\n", i);
CloseHandle(blas_threads[i]);
}
blas_num_threads = num_threads;
ResetEvent(kickoff_event);
UNLOCK_COMMAND(&server_lock);
}
if (num_threads > blas_num_threads) {
LOCK_COMMAND(&server_lock);
thread_target = num_threads;
//increased_threads = 1;
if (!blas_server_avail) {
// create the kickoff Event
kickoff_event = CreateEvent(NULL, TRUE, FALSE, NULL);
InitializeCriticalSection(&queue_lock);
blas_server_avail = 1;
}
for (i = (blas_num_threads > 0) ? blas_num_threads - 1 : 0; i < num_threads - 1; i++) {
//MT_TRACE("set_num_threads: creating thread [%d]\n", i);
blas_threads[i] = CreateThread(NULL, 0,
blas_thread_server, (void *)i,
0, &blas_threads_id[i]);
}
blas_num_threads = num_threads;
UNLOCK_COMMAND(&server_lock);
}
blas_cpu_number = num_threads;
}
//
// Openblas function to set thread count
//
void openblas_set_num_threads(int num)
{
goto_set_num_threads(num);
}
static void adjust_thread_buffers() {
int i=0;
//adjust buffer for each thread
for(i=0; i < blas_cpu_number; i++){
if(blas_thread_buffer[i] == NULL){
blas_thread_buffer[i] = blas_memory_alloc(2);
}
}
for(; i < MAX_CPU_NUMBER; i++){
if(blas_thread_buffer[i] != NULL){
blas_memory_free(blas_thread_buffer[i]);
blas_thread_buffer[i] = NULL;
}
}
}
//Indivitual threads work executor, Helps in setting by synchronization environment and calling inner_threads routine
static void exec_threads(int cpu, blas_queue_t *queue, int buf_index) {
void *buffer, *sa, *sb;
buffer = blas_thread_buffer[cpu];
sa = queue -> sa; sa = queue -> sa;
sb = queue -> sb; sb = queue -> sb;
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
#ifdef CONSISTENT_FPCSR #ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode)); __asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode)); __asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
#endif #endif
MT_TRACE("Server[%2ld] Started. Mode = 0x%03x M = %3ld N=%3ld K=%3ld\n", #ifdef SMP_DEBUG
fprintf(STDERR, "Server[%2ld] Started. Mode = 0x%03x M = %3ld N=%3ld K=%3ld\n",
cpu, queue->mode, queue-> args ->m, queue->args->n, queue->args->k); cpu, queue->mode, queue-> args ->m, queue->args->n, queue->args->k);
#endif
// fprintf(stderr, "queue start[%ld]!!!\n", cpu); // fprintf(stderr, "queue start[%ld]!!!\n", cpu);
@ -603,8 +267,7 @@ static void exec_threads(int cpu, blas_queue_t *queue, int buf_index) {
main_status[cpu] = MAIN_RUNNING1; main_status[cpu] = MAIN_RUNNING1;
#endif #endif
if (sa == NULL) if (sa == NULL) sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
if (sb == NULL) { if (sb == NULL) {
if (!(queue -> mode & BLAS_COMPLEX)){ if (!(queue -> mode & BLAS_COMPLEX)){
@ -656,9 +319,271 @@ if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
#endif #endif
if (!(queue -> mode & BLAS_LEGACY)) { if (!(queue -> mode & BLAS_LEGACY)) {
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position); (routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
} else { } else {
legacy_exec(routine, queue -> mode, queue -> args, sb); legacy_exec(routine, queue -> mode, queue -> args, sb);
} }
}else{
continue; //if queue == NULL
}
#ifdef SMP_DEBUG
fprintf(STDERR, "Server[%2ld] Finished!\n", cpu);
#endif
EnterCriticalSection(&queue->lock);
queue -> status = BLAS_STATUS_FINISHED;
LeaveCriticalSection(&queue->lock);
SetEvent(queue->finish);
}
/* Shutdown procedure */
#ifdef SMP_DEBUG
fprintf(STDERR, "Server[%2ld] Shutdown!\n", cpu);
#endif
blas_memory_free(buffer);
return 0;
}
/* Initializing routine */
int blas_thread_init(void){
BLASLONG i;
if (blas_server_avail || (blas_cpu_number <= 1)) return 0;
LOCK_COMMAND(&server_lock);
#ifdef SMP_DEBUG
fprintf(STDERR, "Initializing Thread(Num. threads = %d)\n",
blas_cpu_number);
#endif
if (!blas_server_avail){
InitializeCriticalSection(&pool.lock);
pool.filled = CreateEvent(NULL, FALSE, FALSE, NULL);
pool.killed = CreateEvent(NULL, TRUE, FALSE, NULL);
pool.shutdown = 0;
pool.queue = NULL;
for(i = 0; i < blas_cpu_number - 1; i++){
blas_threads[i] = CreateThread(NULL, 0,
blas_thread_server, (void *)i,
0, &blas_threads_id[i]);
}
blas_server_avail = 1;
}
UNLOCK_COMMAND(&server_lock);
return 0;
}
/*
User can call one of two routines.
exec_blas_async ... immediately returns after jobs are queued.
exec_blas ... returns after jobs are finished.
*/
int exec_blas_async(BLASLONG pos, blas_queue_t *queue){
#if defined(SMP_SERVER)
// Handle lazy re-init of the thread-pool after a POSIX fork
// on Cygwin or as delayed init when a static library is used
if (unlikely(blas_server_avail == 0)) blas_thread_init();
#endif
blas_queue_t *current;
current = queue;
while (current) {
InitializeCriticalSection(&current -> lock);
current -> finish = CreateEvent(NULL, FALSE, FALSE, NULL);
current -> position = pos;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("fnstcw %0" : "=m" (current -> x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (current -> sse_mode));
#endif
current = current -> next;
pos ++;
}
EnterCriticalSection(&pool.lock);
if (pool.queue) {
current = pool.queue;
while (current -> next) current = current -> next;
current -> next = queue;
} else {
pool.queue = queue;
}
LeaveCriticalSection(&pool.lock);
SetEvent(pool.filled);
return 0;
}
int exec_blas_async_wait(BLASLONG num, blas_queue_t *queue){
#ifdef SMP_DEBUG
fprintf(STDERR, "Synchronization Waiting.\n");
#endif
while (num){
#ifdef SMP_DEBUG
fprintf(STDERR, "Waiting Queue ..\n");
#endif
WaitForSingleObject(queue->finish, INFINITE);
CloseHandle(queue->finish);
DeleteCriticalSection(&queue -> lock);
queue = queue -> next;
num --;
}
#ifdef SMP_DEBUG
fprintf(STDERR, "Completely Done.\n\n");
#endif
return 0;
}
/* Execute Threads */
int exec_blas(BLASLONG num, blas_queue_t *queue){
#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT)
// Handle lazy re-init of the thread-pool after a POSIX fork
if (unlikely(blas_server_avail == 0)) blas_thread_init();
#endif
#ifndef ALL_THREADED
int (*routine)(blas_arg_t *, void *, void *, double *, double *, BLASLONG);
#endif
if ((num <= 0) || (queue == NULL)) return 0;
if ((num > 1) && queue -> next) exec_blas_async(1, queue -> next);
routine = queue -> routine;
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(routine, queue -> mode, queue -> args, queue -> sb);
} else
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else
(routine)(queue -> args, queue -> range_m, queue -> range_n,
queue -> sa, queue -> sb, 0);
if ((num > 1) && queue -> next) exec_blas_async_wait(num - 1, queue -> next);
return 0;
}
/* Shutdown procedure, but user don't have to call this routine. The */
/* kernel automatically kill threads. */
int BLASFUNC(blas_thread_shutdown)(void){
int i;
if (!blas_server_avail) return 0;
LOCK_COMMAND(&server_lock);
if (blas_server_avail){
SetEvent(pool.killed);
for(i = 0; i < blas_num_threads - 1; i++){
// Could also just use WaitForMultipleObjects
DWORD wait_thread_value = WaitForSingleObject(blas_threads[i], 50);
#ifndef OS_WINDOWSSTORE
// TerminateThread is only available with WINAPI_DESKTOP and WINAPI_SYSTEM not WINAPI_APP in UWP
if (WAIT_OBJECT_0 != wait_thread_value) {
TerminateThread(blas_threads[i],0);
}
#endif
CloseHandle(blas_threads[i]);
}
CloseHandle(pool.filled);
CloseHandle(pool.killed);
blas_server_avail = 0;
}
UNLOCK_COMMAND(&server_lock);
return 0;
}
void goto_set_num_threads(int num_threads)
{
long i;
#if defined(SMP_SERVER) && defined(OS_CYGWIN_NT)
// Handle lazy re-init of the thread-pool after a POSIX fork
if (unlikely(blas_server_avail == 0)) blas_thread_init();
#endif
if (num_threads < 1) num_threads = blas_cpu_number;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (num_threads > blas_num_threads) {
LOCK_COMMAND(&server_lock);
//increased_threads = 1;
if (!blas_server_avail){
InitializeCriticalSection(&pool.lock);
pool.filled = CreateEvent(NULL, FALSE, FALSE, NULL);
pool.killed = CreateEvent(NULL, TRUE, FALSE, NULL);
pool.shutdown = 0;
pool.queue = NULL;
blas_server_avail = 1;
}
for(i = (blas_num_threads > 0) ? blas_num_threads - 1 : 0; i < num_threads - 1; i++){
blas_threads[i] = CreateThread(NULL, 0,
blas_thread_server, (void *)i,
0, &blas_threads_id[i]);
}
blas_num_threads = num_threads;
UNLOCK_COMMAND(&server_lock);
}
blas_cpu_number = num_threads;
}
void openblas_set_num_threads(int num)
{
goto_set_num_threads(num);
} }