summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2023-01-27 08:54:19 -0500
committerMarge Bot <ben+marge-bot@smart-cactus.org>2023-03-08 15:02:31 -0500
commit6c6674cafefbb72f1b9c5b8a005fc62f905c50ea (patch)
treef6dfd8a099613acbfcd982eba3f523cfea44f1c2 /rts
parent96a5aaede899f95fb06dcdb9d0439bbea0f93e14 (diff)
downloadhaskell-6c6674cafefbb72f1b9c5b8a005fc62f905c50ea.tar.gz
rts: Encapsulate block allocator spinlock
This makes it a bit easier to add instrumentation on this spinlock while debugging.
Diffstat (limited to 'rts')
-rw-r--r--rts/include/rts/storage/MBlock.h5
-rw-r--r--rts/sm/GCUtils.c20
-rw-r--r--rts/sm/GCUtils.h3
-rw-r--r--rts/sm/HeapAlloc.h4
-rw-r--r--rts/sm/NonMoving.c4
-rw-r--r--rts/sm/NonMovingMark.c4
-rw-r--r--rts/sm/Storage.h9
7 files changed, 28 insertions, 21 deletions
diff --git a/rts/include/rts/storage/MBlock.h b/rts/include/rts/storage/MBlock.h
index 3acefda9a0..38789e7863 100644
--- a/rts/include/rts/storage/MBlock.h
+++ b/rts/include/rts/storage/MBlock.h
@@ -25,8 +25,3 @@ extern void freeAllMBlocks(void);
extern void *getFirstMBlock(void **state);
extern void *getNextMBlock(void **state, void *mblock);
-
-#if defined(THREADED_RTS)
-// needed for HEAP_ALLOCED below
-extern SpinLock gc_alloc_block_sync;
-#endif
diff --git a/rts/sm/GCUtils.c b/rts/sm/GCUtils.c
index 86b95e1fd8..4d1a4bed6b 100644
--- a/rts/sm/GCUtils.c
+++ b/rts/sm/GCUtils.c
@@ -36,18 +36,18 @@ bdescr* allocGroup_sync(uint32_t n)
{
bdescr *bd;
uint32_t node = capNoToNumaNode(gct->thread_index);
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
bd = allocGroupOnNode(node,n);
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
return bd;
}
bdescr* allocGroupOnNode_sync(uint32_t node, uint32_t n)
{
bdescr *bd;
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
bd = allocGroupOnNode(node,n);
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
return bd;
}
@@ -57,7 +57,7 @@ allocBlocks_sync(uint32_t n, bdescr **hd)
bdescr *bd;
uint32_t i;
uint32_t node = capNoToNumaNode(gct->thread_index);
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
bd = allocLargeChunkOnNode(node,1,n);
// NB. allocLargeChunk, rather than allocGroup(n), to allocate in a
// fragmentation-friendly way.
@@ -70,7 +70,7 @@ allocBlocks_sync(uint32_t n, bdescr **hd)
bd[n-1].link = NULL;
// We have to hold the lock until we've finished fiddling with the metadata,
// otherwise the block allocator can get confused.
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
*hd = bd;
return n;
}
@@ -78,17 +78,17 @@ allocBlocks_sync(uint32_t n, bdescr **hd)
void
freeChain_sync(bdescr *bd)
{
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
freeChain(bd);
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
}
void
freeGroup_sync(bdescr *bd)
{
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
freeGroup(bd);
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
}
/* -----------------------------------------------------------------------------
diff --git a/rts/sm/GCUtils.h b/rts/sm/GCUtils.h
index dec81e1755..72d3bad149 100644
--- a/rts/sm/GCUtils.h
+++ b/rts/sm/GCUtils.h
@@ -17,6 +17,9 @@
#include "BeginPrivate.h"
+#define ACQUIRE_ALLOC_BLOCK_SPIN_LOCK() ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync)
+#define RELEASE_ALLOC_BLOCK_SPIN_LOCK() RELEASE_SPIN_LOCK(&gc_alloc_block_sync)
+
bdescr* allocGroup_sync(uint32_t n);
bdescr* allocGroupOnNode_sync(uint32_t node, uint32_t n);
diff --git a/rts/sm/HeapAlloc.h b/rts/sm/HeapAlloc.h
index f91795529d..b9f7c468be 100644
--- a/rts/sm/HeapAlloc.h
+++ b/rts/sm/HeapAlloc.h
@@ -209,9 +209,9 @@ StgBool HEAP_ALLOCED_GC(const void *p)
} else {
// putting the rest out of line turned out to be a slight
// performance improvement:
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
b = HEAP_ALLOCED_miss(mblock,p);
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
return b;
}
}
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 046f2fa88c..d9a2b1021d 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -636,12 +636,12 @@ static struct NonmovingSegment *nonmovingAllocSegment(uint32_t node)
if (ret == NULL) {
// Take gc spinlock: another thread may be scavenging a moving
// generation and call `todo_block_full`
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
bdescr *bd = allocAlignedGroupOnNode(node, NONMOVING_SEGMENT_BLOCKS);
// See Note [Live data accounting in nonmoving collector].
oldest_gen->n_blocks += bd->blocks;
oldest_gen->n_words += BLOCK_SIZE_W * bd->blocks;
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
for (StgWord32 i = 0; i < bd->blocks; ++i) {
initBdescr(&bd[i], oldest_gen, oldest_gen);
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index 3bfff4a014..bd018f4ff7 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -493,13 +493,13 @@ markQueuePushClosureGC (MarkQueue *q, StgClosure *p)
if (q->top->head == MARK_QUEUE_BLOCK_ENTRIES) {
// Yes, this block is full.
// allocate a fresh block.
- ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
+ ACQUIRE_ALLOC_BLOCK_SPIN_LOCK();
bdescr *bd = allocGroup(MARK_QUEUE_BLOCKS);
bd->link = q->blocks;
q->blocks = bd;
q->top = (MarkQueueBlock *) bd->start;
q->top->head = 0;
- RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
+ RELEASE_ALLOC_BLOCK_SPIN_LOCK();
}
MarkQueueEnt ent = {
diff --git a/rts/sm/Storage.h b/rts/sm/Storage.h
index faec383c8f..d3bd9a8dd3 100644
--- a/rts/sm/Storage.h
+++ b/rts/sm/Storage.h
@@ -43,6 +43,15 @@ extern Mutex sm_mutex;
#define ASSERT_SM_LOCK()
#endif
+#if defined(THREADED_RTS)
+// needed for HEAP_ALLOCED below
+extern SpinLock gc_alloc_block_sync;
+#endif
+
+#define ACQUIRE_ALLOC_BLOCK_SPIN_LOCK() ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync)
+#define RELEASE_ALLOC_BLOCK_SPIN_LOCK() RELEASE_SPIN_LOCK(&gc_alloc_block_sync)
+
+
/* -----------------------------------------------------------------------------
The write barrier for MVARs and TVARs
-------------------------------------------------------------------------- */