diff options
author | Tamar Christina <tamar@zhox.com> | 2019-06-01 10:56:31 +0100 |
---|---|---|
committer | Ben Gamari <ben@smart-cactus.org> | 2020-07-15 16:41:01 -0400 |
commit | 459e1c5f7c71e37ed8bb239c57bdec441d278fff (patch) | |
tree | e049c6429afef505fcd65af9eb0a785be78aa200 | |
parent | 00a23bfda4840546075ec2b2e18f61380b360dfc (diff) | |
download | haskell-459e1c5f7c71e37ed8bb239c57bdec441d278fff.tar.gz |
winio: Use SlimReaderLocks and ConditonalVariables provided by the OS instead of emulated ones
-rw-r--r-- | includes/rts/OSThreads.h | 77 | ||||
-rw-r--r-- | rts/HeapStackCheck.cmm | 4 | ||||
-rw-r--r-- | rts/PrimOps.cmm | 4 | ||||
-rw-r--r-- | rts/StgMiscClosures.cmm | 4 | ||||
-rw-r--r-- | rts/win32/IOManager.c | 60 | ||||
-rw-r--r-- | rts/win32/OSMem.c | 19 | ||||
-rw-r--r-- | rts/win32/OSThreads.c | 176 | ||||
-rw-r--r-- | rts/win32/WorkQueue.c | 14 | ||||
-rw-r--r-- | rts/win32/WorkQueue.h | 4 |
9 files changed, 152 insertions, 210 deletions
diff --git a/includes/rts/OSThreads.h b/includes/rts/OSThreads.h index ada2a9a787..a68f1ea140 100644 --- a/includes/rts/OSThreads.h +++ b/includes/rts/OSThreads.h @@ -77,18 +77,22 @@ EXTERN_INLINE int TRY_ACQUIRE_LOCK(pthread_mutex_t *mutex) #if defined(CMINUSMINUS) -/* We jump through a hoop here to get a CCall EnterCriticalSection - and LeaveCriticalSection, as that's what C-- wants. */ +/* We jump through a hoop here to get a CCall AcquireSRWLockExclusive + and ReleaseSRWLockExclusive, as that's what C-- wants. */ -#define OS_ACQUIRE_LOCK(mutex) foreign "stdcall" EnterCriticalSection(mutex) -#define OS_RELEASE_LOCK(mutex) foreign "stdcall" LeaveCriticalSection(mutex) +#define OS_ACQUIRE_LOCK(mutex) foreign "stdcall" AcquireSRWLockExclusive(mutex) +#define OS_RELEASE_LOCK(mutex) foreign "stdcall" ReleaseSRWLockExclusive(mutex) #define OS_ASSERT_LOCK_HELD(mutex) /* nothing */ -#else +#else // CMINUSMINUS #include <windows.h> +#include <synchapi.h> -typedef HANDLE Condition; +/* Use native conditional variables coupled with SRW locks, these are more + efficient and occur a smaller overhead then emulating them with events. + See Note [SRW locks]. */ +typedef CONDITION_VARIABLE Condition; typedef DWORD OSThreadId; // don't be tempted to use HANDLE as the OSThreadId: there can be // many HANDLES to a given thread, so comparison would not work. @@ -98,58 +102,47 @@ typedef DWORD ThreadLocalKey; #define INIT_COND_VAR 0 -// We have a choice for implementing Mutexes on Windows. Standard -// Mutexes are kernel objects that require kernel calls to -// acquire/release, whereas CriticalSections are spin-locks that block -// in the kernel after spinning for a configurable number of times. -// CriticalSections are *much* faster, so we use those. The Mutex -// implementation is left here for posterity. -#define USE_CRITICAL_SECTIONS 1 - -#if USE_CRITICAL_SECTIONS - -typedef CRITICAL_SECTION Mutex; +/* Note [SRW locks] + We have a choice for implementing Mutexes on Windows. Standard + Mutexes are kernel objects that require kernel calls to + acquire/release, whereas CriticalSections are spin-locks that block + in the kernel after spinning for a configurable number of times. + CriticalSections are *much* faster than Mutexes, however not as fast as + slim reader/writer locks. CriticalSections also require a 48 byte structure + to provide lock re-entrancy. We don't need that because the other primitives + used for other platforms don't have this, as such locks are used defensively + in the RTS in a way that we don't need re-entrancy. This means that SRW's + 8 byte size is much more appropriate. With an 8 byte payload there's a + higher chance of it being in your cache line. They're also a lot faster than + CriticalSections when multiple threads are involved. CS requires setup and + teardown via kernel calls while SRWL is zero-initialized via + SRWLOCK_INIT assignment. */ + +typedef SRWLOCK Mutex; #if defined(LOCK_DEBUG) #define OS_ACQUIRE_LOCK(mutex) \ debugBelch("ACQUIRE_LOCK(0x%p) %s %d\n", mutex,__FILE__,__LINE__); \ - EnterCriticalSection(mutex) + AcquireSRWLockExclusive(mutex) #define OS_RELEASE_LOCK(mutex) \ debugBelch("RELEASE_LOCK(0x%p) %s %d\n", mutex,__FILE__,__LINE__); \ - LeaveCriticalSection(mutex) + ReleaseSRWLockExclusive(mutex) #define OS_ASSERT_LOCK_HELD(mutex) /* nothing */ #else -#define OS_ACQUIRE_LOCK(mutex) EnterCriticalSection(mutex) -#define TRY_ACQUIRE_LOCK(mutex) (TryEnterCriticalSection(mutex) == 0) -#define OS_RELEASE_LOCK(mutex) LeaveCriticalSection(mutex) +#define OS_ACQUIRE_LOCK(mutex) AcquireSRWLockExclusive(mutex) +#define TRY_ACQUIRE_LOCK(mutex) (TryAcquireSRWLockExclusive(mutex) == 0) +#define OS_RELEASE_LOCK(mutex) ReleaseSRWLockExclusive(mutex) +#define OS_INIT_LOCK(mutex) InitializeSRWLock(mutex) +#define OS_CLOSE_LOCK(mutex) // I don't know how to do this. TryEnterCriticalSection() doesn't do // the right thing. #define OS_ASSERT_LOCK_HELD(mutex) /* nothing */ -#endif - -#else - -typedef HANDLE Mutex; - -// casting to (Mutex *) here required due to use in .cmm files where -// the argument has (void *) type. -#define OS_ACQUIRE_LOCK(mutex) \ - if (WaitForSingleObject(*((Mutex *)mutex),INFINITE) == WAIT_FAILED) { \ - barf("WaitForSingleObject: %d", GetLastError()); \ - } - -#define OS_RELEASE_LOCK(mutex) \ - if (ReleaseMutex(*((Mutex *)mutex)) == 0) { \ - barf("ReleaseMutex: %d", GetLastError()); \ - } - -#define OS_ASSERT_LOCK_HELD(mutex) /* nothing */ -#endif +#endif // LOCK_DEBUG #endif // CMINUSMINUS diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm index 1c1de089dc..b8df323c8b 100644 --- a/rts/HeapStackCheck.cmm +++ b/rts/HeapStackCheck.cmm @@ -17,8 +17,8 @@ #if defined(__PIC__) import pthread_mutex_unlock; #endif -import EnterCriticalSection; -import LeaveCriticalSection; +import AcquireSRWLockExclusive; +import ReleaseSRWLockExclusives; /* Stack/Heap Check Failure * ------------------------ diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm index 1fd746edf6..e13e89b98c 100644 --- a/rts/PrimOps.cmm +++ b/rts/PrimOps.cmm @@ -31,8 +31,8 @@ import pthread_mutex_unlock; #endif import CLOSURE base_ControlziExceptionziBase_nestedAtomically_closure; import CLOSURE base_GHCziIOziException_heapOverflow_closure; -import EnterCriticalSection; -import LeaveCriticalSection; +import AcquireSRWLockExclusive; +import ReleaseSRWLockExclusive; import CLOSURE ghczmprim_GHCziTypes_False_closure; #if defined(PROFILING) import CLOSURE CCS_MAIN; diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm index 4293dfb787..7a8f20dded 100644 --- a/rts/StgMiscClosures.cmm +++ b/rts/StgMiscClosures.cmm @@ -15,8 +15,8 @@ import pthread_mutex_lock; import ghczmprim_GHCziTypes_Czh_info; import ghczmprim_GHCziTypes_Izh_info; -import EnterCriticalSection; -import LeaveCriticalSection; +import AcquireSRWLockExclusive; +import ReleaseSRWLockExclusive; /* ---------------------------------------------------------------------------- Stack underflow diff --git a/rts/win32/IOManager.c b/rts/win32/IOManager.c index f155180ef3..e5da32b982 100644 --- a/rts/win32/IOManager.c +++ b/rts/win32/IOManager.c @@ -22,7 +22,7 @@ * Internal state maintained by the IO manager. */ typedef struct IOManagerState { - CritSection manLock; + Mutex manLock; WorkQueue* workQueue; int queueSize; int numWorkers; @@ -30,7 +30,7 @@ typedef struct IOManagerState { HANDLE hExitEvent; unsigned int requestID; /* fields for keeping track of active WorkItems */ - CritSection active_work_lock; + Mutex active_work_lock; WorkItem* active_work_items; UINT sleepResolution; } IOManagerState; @@ -65,7 +65,7 @@ IOWorkerProc(PVOID param) // The error code is communicated back on completion of request; reset. errCode = 0; - EnterCriticalSection(&iom->manLock); + OS_ACQUIRE_LOCK(&iom->manLock); /* Signal that the worker is idle. * * 'workersIdle' is used when determining whether or not to @@ -73,7 +73,7 @@ IOWorkerProc(PVOID param) * (see addIORequest().) */ iom->workersIdle++; - LeaveCriticalSection(&iom->manLock); + OS_RELEASE_LOCK(&iom->manLock); /* * A possible future refinement is to make long-term idle threads @@ -85,19 +85,19 @@ IOWorkerProc(PVOID param) if (rc == WAIT_OBJECT_0) { // we received the exit event - EnterCriticalSection(&iom->manLock); + OS_ACQUIRE_LOCK(&iom->manLock); ioMan->numWorkers--; - LeaveCriticalSection(&iom->manLock); + OS_RELEASE_LOCK(&iom->manLock); return 0; } - EnterCriticalSection(&iom->manLock); + OS_ACQUIRE_LOCK(&iom->manLock); /* Signal that the thread is 'non-idle' and about to consume * a work item. */ iom->workersIdle--; iom->queueSize--; - LeaveCriticalSection(&iom->manLock); + OS_RELEASE_LOCK(&iom->manLock); if ( rc == (WAIT_OBJECT_0 + 1) ) { /* work item available, fetch it. */ @@ -266,17 +266,17 @@ IOWorkerProc(PVOID param) } else { fprintf(stderr, "unable to fetch work; fatal.\n"); fflush(stderr); - EnterCriticalSection(&iom->manLock); + OS_ACQUIRE_LOCK(&iom->manLock); ioMan->numWorkers--; - LeaveCriticalSection(&iom->manLock); + OS_RELEASE_LOCK(&iom->manLock); return 1; } } else { fprintf(stderr, "waiting failed (%lu); fatal.\n", rc); fflush(stderr); - EnterCriticalSection(&iom->manLock); + OS_ACQUIRE_LOCK(&iom->manLock); ioMan->numWorkers--; - LeaveCriticalSection(&iom->manLock); + OS_RELEASE_LOCK(&iom->manLock); return 1; } } @@ -334,13 +334,13 @@ StartIOManager(void) } ioMan->hExitEvent = hExit; - InitializeCriticalSection(&ioMan->manLock); + OS_INIT_LOCK(&ioMan->manLock); ioMan->workQueue = wq; ioMan->numWorkers = 0; ioMan->workersIdle = 0; ioMan->queueSize = 0; ioMan->requestID = 1; - InitializeCriticalSection(&ioMan->active_work_lock); + OS_INIT_LOCK(&ioMan->active_work_lock); ioMan->active_work_items = NULL; ioMan->sleepResolution = sleepResolution; @@ -360,7 +360,7 @@ int depositWorkItem( unsigned int reqID, WorkItem* wItem ) { - EnterCriticalSection(&ioMan->manLock); + OS_ACQUIRE_LOCK(&ioMan->manLock); #if 0 fprintf(stderr, "depositWorkItem: %d/%d\n", @@ -397,9 +397,9 @@ depositWorkItem( unsigned int reqID, if ( (ioMan->workersIdle < ioMan->queueSize) ) { /* see if giving up our quantum ferrets out some idle threads. */ - LeaveCriticalSection(&ioMan->manLock); + OS_RELEASE_LOCK(&ioMan->manLock); Sleep(0); - EnterCriticalSection(&ioMan->manLock); + OS_ACQUIRE_LOCK(&ioMan->manLock); if ( (ioMan->workersIdle < ioMan->queueSize) ) { /* No, go ahead and create another. */ ioMan->numWorkers++; @@ -408,7 +408,7 @@ depositWorkItem( unsigned int reqID, } } } - LeaveCriticalSection(&ioMan->manLock); + OS_RELEASE_LOCK(&ioMan->manLock); if (SubmitWork(ioMan->workQueue,wItem)) { /* Note: the work item has potentially been consumed by a worker thread @@ -522,17 +522,17 @@ void ShutdownIOManager ( bool wait_threads ) if (wait_threads) { /* Wait for all worker threads to die. */ for (;;) { - EnterCriticalSection(&ioMan->manLock); + OS_ACQUIRE_LOCK(&ioMan->manLock); num = ioMan->numWorkers; - LeaveCriticalSection(&ioMan->manLock); + OS_RELEASE_LOCK(&ioMan->manLock); if (num == 0) break; Sleep(10); } FreeWorkQueue(ioMan->workQueue); CloseHandle(ioMan->hExitEvent); - DeleteCriticalSection(&ioMan->active_work_lock); - DeleteCriticalSection(&ioMan->manLock); + OS_CLOSE_LOCK(&ioMan->active_work_lock); + OS_CLOSE_LOCK(&ioMan->manLock); mmresult = timeEndPeriod(ioMan->sleepResolution); if (mmresult != MMSYSERR_NOERROR) { @@ -550,10 +550,10 @@ void RegisterWorkItem(IOManagerState* ioMan, WorkItem* wi) { - EnterCriticalSection(&ioMan->active_work_lock); + OS_ACQUIRE_LOCK(&ioMan->active_work_lock); wi->link = ioMan->active_work_items; ioMan->active_work_items = wi; - LeaveCriticalSection(&ioMan->active_work_lock); + OS_RELEASE_LOCK(&ioMan->active_work_lock); } static @@ -563,7 +563,7 @@ DeregisterWorkItem(IOManagerState* ioMan, { WorkItem *ptr, *prev; - EnterCriticalSection(&ioMan->active_work_lock); + OS_ACQUIRE_LOCK(&ioMan->active_work_lock); for(prev=NULL,ptr=ioMan->active_work_items;ptr;prev=ptr,ptr=ptr->link) { if (wi->requestID == ptr->requestID) { if (prev==NULL) { @@ -571,13 +571,13 @@ DeregisterWorkItem(IOManagerState* ioMan, } else { prev->link = ptr->link; } - LeaveCriticalSection(&ioMan->active_work_lock); + OS_RELEASE_LOCK(&ioMan->active_work_lock); return; } } fprintf(stderr, "DeregisterWorkItem: unable to locate work item %d\n", wi->requestID); - LeaveCriticalSection(&ioMan->active_work_lock); + OS_RELEASE_LOCK(&ioMan->active_work_lock); } @@ -596,11 +596,11 @@ void abandonWorkRequest ( int reqID ) { WorkItem *ptr; - EnterCriticalSection(&ioMan->active_work_lock); + OS_ACQUIRE_LOCK(&ioMan->active_work_lock); for(ptr=ioMan->active_work_items;ptr;ptr=ptr->link) { if (ptr->requestID == (unsigned int)reqID ) { ptr->abandonOp = 1; - LeaveCriticalSection(&ioMan->active_work_lock); + OS_RELEASE_LOCK(&ioMan->active_work_lock); return; } } @@ -608,7 +608,7 @@ abandonWorkRequest ( int reqID ) * finished sometime since awaitRequests() last drained the completed * request table; i.e., not an error. */ - LeaveCriticalSection(&ioMan->active_work_lock); + OS_RELEASE_LOCK(&ioMan->active_work_lock); } #endif diff --git a/rts/win32/OSMem.c b/rts/win32/OSMem.c index dd0f60ff0a..fd26d06c4e 100644 --- a/rts/win32/OSMem.c +++ b/rts/win32/OSMem.c @@ -37,28 +37,11 @@ static alloc_rec* allocs = NULL; /* free_blocks are kept in ascending order, and adjacent blocks are merged */ static block_rec* free_blocks = NULL; -/* Mingw-w64 does not currently have this in their header. So we have to import it.*/ -typedef LPVOID(WINAPI *VirtualAllocExNumaProc)(HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); - -/* Cache NUMA API call. */ -VirtualAllocExNumaProc _VirtualAllocExNuma; - void osMemInit(void) { allocs = NULL; free_blocks = NULL; - - /* Resolve and cache VirtualAllocExNuma. */ - if (osNumaAvailable() && RtsFlags.GcFlags.numa) - { - _VirtualAllocExNuma = (VirtualAllocExNumaProc)(void*)GetProcAddress(GetModuleHandleW(L"kernel32"), "VirtualAllocExNuma"); - if (!_VirtualAllocExNuma) - { - sysErrorBelch( - "osBindMBlocksToNode: VirtualAllocExNuma does not exist. How did you get this far?"); - } - } } static @@ -569,7 +552,7 @@ void osBindMBlocksToNode( On windows also -xb is broken, it does nothing so that can't be used to tweak it (see #12577). So for now, just let the OS decide. */ - temp = _VirtualAllocExNuma( + temp = VirtualAllocExNuma( GetCurrentProcess(), NULL, // addr? See base memory size, diff --git a/rts/win32/OSThreads.c b/rts/win32/OSThreads.c index fe35f35e82..706237363a 100644 --- a/rts/win32/OSThreads.c +++ b/rts/win32/OSThreads.c @@ -27,69 +27,6 @@ static uint32_t* cpuGroupCumulativeCache = NULL; /* Processor group dist cache. */ static uint8_t* cpuGroupDistCache = NULL; -/* Win32 threads and synchronisation objects */ - -/* A Condition is represented by a Win32 Event object; - * a Mutex by a Mutex kernel object. - * - * ToDo: go through the defn and usage of these to - * make sure the semantics match up with that of - * the (assumed) pthreads behaviour. This is really - * just a first pass at getting something compilable. - */ - -void -initCondition( Condition* pCond ) -{ - HANDLE h = CreateEvent(NULL, - FALSE, /* auto reset */ - FALSE, /* initially not signalled */ - NULL); /* unnamed => process-local. */ - - if ( h == NULL ) { - sysErrorBelch("initCondition: unable to create"); - stg_exit(EXIT_FAILURE); - } - *pCond = h; - return; -} - -void -closeCondition( Condition* pCond ) -{ - if ( CloseHandle(*pCond) == 0 ) { - sysErrorBelch("closeCondition: failed to close"); - } - return; -} - -bool -broadcastCondition ( Condition* pCond ) -{ - PulseEvent(*pCond); - return true; -} - -bool -signalCondition ( Condition* pCond ) -{ - if (SetEvent(*pCond) == 0) { - sysErrorBelch("SetEvent"); - stg_exit(EXIT_FAILURE); - } - return true; -} - -bool -waitCondition ( Condition* pCond, Mutex* pMut ) -{ - RELEASE_LOCK(pMut); - WaitForSingleObject(*pCond, INFINITE); - /* Hmm..use WaitForMultipleObjects() ? */ - ACQUIRE_LOCK(pMut); - return true; -} - void yieldThread() { @@ -150,35 +87,6 @@ osThreadIsAlive(OSThreadId id) return (exit_code == STILL_ACTIVE); } -#if defined(USE_CRITICAL_SECTIONS) -void -initMutex (Mutex* pMut) -{ - InitializeCriticalSectionAndSpinCount(pMut,4000); -} -void -closeMutex (Mutex* pMut) -{ - DeleteCriticalSection(pMut); -} -#else -void -initMutex (Mutex* pMut) -{ - HANDLE h = CreateMutex ( NULL, /* default sec. attributes */ - TRUE, /* not owned => initially signalled */ - NULL - ); - *pMut = h; - return; -} -void -closeMutex (Mutex* pMut) -{ - CloseHandle(*pMut); -} -#endif - void newThreadLocalKey (ThreadLocalKey *key) { @@ -252,6 +160,13 @@ forkOS_createThread ( HsStablePtr entry ) (unsigned*)&pId) == 0); } +#if defined(x86_64_HOST_ARCH) + +#if !defined(ALL_PROCESSOR_GROUPS) +#define ALL_PROCESSOR_GROUPS 0xffff +#endif +#endif + void freeThreadingResources (void) { if (cpuGroupCache) @@ -426,12 +341,15 @@ getNumberOfProcessors (void) if (nproc) { - IF_DEBUG(scheduler, debugBelch("[*] Total number of active processors detected: %u\n", nproc)); + IF_DEBUG(scheduler, debugBelch("[*] Total number of active " + "processors detected: %u\n", nproc)); return nproc; } - IF_DEBUG(scheduler, debugBelch("Could not determine Max number of logical processors.\n" - "Falling back to old code which limits to 64 logical processors.\n")); + IF_DEBUG(scheduler, debugBelch("Could not determine Max number of " + "logical processors.\n" + "Falling back to old code which limits " + "to 64 logical processors.\n")); } #endif @@ -484,7 +402,6 @@ setThreadAffinity (uint32_t n, uint32_t m) // cap N of M for (i = 0; i < n_groups; i++) { #if defined(x86_64_HOST_ARCH) - // If we support the new API, use it. if (mask[i] > 0) { GROUP_AFFINITY hGroup; @@ -515,24 +432,15 @@ setThreadAffinity (uint32_t n, uint32_t m) // cap N of M free(mask); } -typedef BOOL (WINAPI *PCSIO)(HANDLE); - void interruptOSThread (OSThreadId id) { HANDLE hdl; - PCSIO pCSIO; if (!(hdl = OpenThread(THREAD_TERMINATE,FALSE,id))) { sysErrorBelch("interruptOSThread: OpenThread"); stg_exit(EXIT_FAILURE); } - pCSIO = (PCSIO)(void*)GetProcAddress(GetModuleHandle(TEXT("Kernel32.dll")), - "CancelSynchronousIo"); - if ( NULL != pCSIO ) { - pCSIO(hdl); - } else { - // Nothing to do, unfortunately - } + CancelSynchronousIo(hdl); CloseHandle(hdl); } @@ -600,3 +508,59 @@ KernelThreadId kernelThreadId (void) DWORD tid = GetCurrentThreadId(); return tid; } + +/* Win32 threads and synchronisation objects */ + +/* A Condition is represented by a Win32 Event object; + * a Mutex by a Mutex kernel object. + * + * ToDo: go through the defn and usage of these to + * make sure the semantics match up with that of + * the (assumed) pthreads behaviour. This is really + * just a first pass at getting something compilable. + */ + +void +initCondition( Condition* pCond ) +{ + InitializeConditionVariable(pCond); + return; +} + +void +closeCondition( Condition* pCond STG_UNUSED) +{ + return; +} + +bool +broadcastCondition ( Condition* pCond ) +{ + WakeAllConditionVariable(pCond); + return true; +} + +bool +signalCondition ( Condition* pCond ) +{ + WakeConditionVariable(pCond); + return true; +} + +bool +waitCondition ( Condition* pCond, Mutex* pMut ) +{ + SleepConditionVariableSRW(pCond, pMut, INFINITE, 0); + return true; +} + +void +initMutex (Mutex* pMut) +{ + InitializeSRWLock(pMut); +} +void +closeMutex (Mutex* pMut) +{ + (void)pMut; +} diff --git a/rts/win32/WorkQueue.c b/rts/win32/WorkQueue.c index e560bd24cd..dba20c668b 100644 --- a/rts/win32/WorkQueue.c +++ b/rts/win32/WorkQueue.c @@ -3,11 +3,13 @@ * * (c) sof, 2002-2003. */ +#include "Rts.h" #include "WorkQueue.h" #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> +#include <windows.h> static void queue_error_rc( char* loc, DWORD err); static void queue_error( char* loc, char* reason); @@ -48,7 +50,7 @@ NewWorkQueue() memset(wq, 0, sizeof *wq); - InitializeCriticalSection(&wq->queueLock); + OS_INIT_LOCK(&wq->queueLock); wq->workAvailable = newSemaphore(0, WORKQUEUE_SIZE); wq->roomAvailable = newSemaphore(WORKQUEUE_SIZE, WORKQUEUE_SIZE); @@ -83,7 +85,7 @@ FreeWorkQueue ( WorkQueue* pq ) if ( pq->roomAvailable ) { CloseHandle(pq->roomAvailable); } - DeleteCriticalSection(&pq->queueLock); + OS_CLOSE_LOCK(&pq->queueLock); free(pq); return; } @@ -147,13 +149,13 @@ FetchWork ( WorkQueue* pq, void** ppw ) return false; } - EnterCriticalSection(&pq->queueLock); + OS_ACQUIRE_LOCK(&pq->queueLock); *ppw = pq->items[pq->head]; /* For sanity's sake, zero out the pointer. */ pq->items[pq->head] = NULL; pq->head = (pq->head + 1) % WORKQUEUE_SIZE; rc = ReleaseSemaphore(pq->roomAvailable,1, NULL); - LeaveCriticalSection(&pq->queueLock); + OS_RELEASE_LOCK(&pq->queueLock); if ( 0 == rc ) { queue_error_rc("FetchWork.ReleaseSemaphore()", GetLastError()); return false; @@ -191,11 +193,11 @@ SubmitWork ( WorkQueue* pq, void* pw ) return false; } - EnterCriticalSection(&pq->queueLock); + OS_ACQUIRE_LOCK(&pq->queueLock); pq->items[pq->tail] = pw; pq->tail = (pq->tail + 1) % WORKQUEUE_SIZE; rc = ReleaseSemaphore(pq->workAvailable,1, NULL); - LeaveCriticalSection(&pq->queueLock); + OS_RELEASE_LOCK(&pq->queueLock); if ( 0 == rc ) { queue_error_rc("SubmitWork.ReleaseSemaphore()", GetLastError()); return false; diff --git a/rts/win32/WorkQueue.h b/rts/win32/WorkQueue.h index 4dbfcd40d3..569a7b4445 100644 --- a/rts/win32/WorkQueue.h +++ b/rts/win32/WorkQueue.h @@ -14,12 +14,12 @@ #define WORKQUEUE_SIZE 16 typedef HANDLE Semaphore; -typedef CRITICAL_SECTION CritSection; +typedef SRWLOCK Mutex; typedef struct WorkQueue { /* the master lock, need to be grabbed prior to using any of the other elements of the struct. */ - CritSection queueLock; + Mutex queueLock; /* consumers/workers block waiting for 'workAvailable' */ Semaphore workAvailable; Semaphore roomAvailable; |