summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2022-10-22 16:22:47 +0000
committerMarge Bot <ben+marge-bot@smart-cactus.org>2023-03-08 15:02:30 -0500
commit5b7f65767fbc2967e01a13ee580598e976f5d225 (patch)
treec3af53445c80387bcefbdeaa63182ca3b36538e3
parentda7b2b941d235a284d5685829c235a9e671a0336 (diff)
downloadhaskell-5b7f65767fbc2967e01a13ee580598e976f5d225.tar.gz
rts/BlockAlloc: Allow disabling of internal assertions
These can be quite expensive and it is sometimes useful to compile a DEBUG RTS without them.
-rw-r--r--rts/sm/BlockAlloc.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c
index 158698b08f..7f78bac04a 100644
--- a/rts/sm/BlockAlloc.c
+++ b/rts/sm/BlockAlloc.c
@@ -27,6 +27,16 @@
static void initMBlock(void *mblock, uint32_t node);
+/*
+ * By default the DEBUG RTS is built with block allocator assertions
+ * enabled. However, these are quite expensive and consequently it can
+ * sometimes be useful to disable them if debugging an issue known to be
+ * elsewhere
+ */
+#if defined(DEBUG)
+#define BLOCK_ALLOC_DEBUG
+#endif
+
/* -----------------------------------------------------------------------------
Implementation notes
@@ -246,7 +256,7 @@ initGroup(bdescr *head)
last->link = head;
}
-#if defined(DEBUG)
+#if defined(BLOCK_ALLOC_DEBUG)
for (uint32_t i=0; i < head->blocks; i++) {
head[i].flags = 0;
}
@@ -618,7 +628,7 @@ allocAlignedGroupOnNode (uint32_t node, W_ n)
ASSERT(slop_low_blocks + slop_high_blocks + n == num_blocks);
-#if defined(DEBUG)
+#if defined(BLOCK_ALLOC_DEBUG)
checkFreeListSanity();
W_ free_before = countFreeList();
#endif
@@ -628,7 +638,7 @@ allocAlignedGroupOnNode (uint32_t node, W_ n)
ASSERT(countBlocks(bd) == num_blocks - slop_low_blocks);
}
-#if defined(DEBUG)
+#if defined(BLOCK_ALLOC_DEBUG)
ASSERT(countFreeList() == free_before + slop_low_blocks);
checkFreeListSanity();
#endif
@@ -636,7 +646,7 @@ allocAlignedGroupOnNode (uint32_t node, W_ n)
// At this point the bd should be aligned, but we may have slop on the high side
ASSERT((uintptr_t)bd->start % group_size == 0);
-#if defined(DEBUG)
+#if defined(BLOCK_ALLOC_DEBUG)
free_before = countFreeList();
#endif
@@ -645,7 +655,7 @@ allocAlignedGroupOnNode (uint32_t node, W_ n)
ASSERT(bd->blocks == n);
}
-#if defined(DEBUG)
+#if defined(BLOCK_ALLOC_DEBUG)
ASSERT(countFreeList() == free_before + slop_high_blocks);
checkFreeListSanity();
#endif
@@ -928,7 +938,7 @@ freeGroup(bdescr *p)
ASSERT(RELAXED_LOAD(&p->free) != (P_)-1);
-#if defined(DEBUG)
+#if defined(BLOCK_ALLOC_DEBUG)
for (uint32_t i=0; i < p->blocks; i++) {
p[i].flags = 0;
}