summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--includes/Rts.h9
-rw-r--r--includes/rts/Flags.h6
-rw-r--r--includes/rts/storage/Block.h7
-rw-r--r--includes/rts/storage/GC.h8
-rw-r--r--includes/rts/storage/InfoTables.h2
-rw-r--r--includes/rts/storage/TSO.h5
-rw-r--r--rts/Capability.c5
-rw-r--r--rts/PrimOps.cmm4
-rw-r--r--rts/Threads.c4
-rw-r--r--rts/sm/BlockAlloc.c144
-rw-r--r--rts/sm/Evac.c15
-rw-r--r--rts/sm/GC.c21
-rw-r--r--rts/sm/Sanity.c8
-rw-r--r--rts/sm/Storage.c4
-rw-r--r--testsuite/tests/rts/testblockalloc.c121
-rw-r--r--utils/deriveConstants/Main.hs3
16 files changed, 298 insertions, 68 deletions
diff --git a/includes/Rts.h b/includes/Rts.h
index dd60726c39..56642e14c5 100644
--- a/includes/Rts.h
+++ b/includes/Rts.h
@@ -281,26 +281,27 @@ TICK_VAR(2)
#define IF_RTSFLAGS(c,s) if (RtsFlags.c) { s; } doNothing()
#if defined(DEBUG)
+/* See Note [RtsFlags is a pointer in STG code] */
#if IN_STG_CODE
#define IF_DEBUG(c,s) if (RtsFlags[0].DebugFlags.c) { s; } doNothing()
#else
#define IF_DEBUG(c,s) if (RtsFlags.DebugFlags.c) { s; } doNothing()
-#endif
+#endif /* IN_STG_CODE */
#else
#define IF_DEBUG(c,s) doNothing()
-#endif
+#endif /* DEBUG */
#if defined(DEBUG)
#define DEBUG_ONLY(s) s
#else
#define DEBUG_ONLY(s) doNothing()
-#endif
+#endif /* DEBUG */
#if defined(DEBUG)
#define DEBUG_IS_ON 1
#else
#define DEBUG_IS_ON 0
-#endif
+#endif /* DEBUG */
/* -----------------------------------------------------------------------------
Useful macros and inline functions
diff --git a/includes/rts/Flags.h b/includes/rts/Flags.h
index b3caf13c1f..678d556bf1 100644
--- a/includes/rts/Flags.h
+++ b/includes/rts/Flags.h
@@ -268,7 +268,11 @@ typedef struct _RTS_FLAGS {
#if defined(COMPILING_RTS_MAIN)
extern DLLIMPORT RTS_FLAGS RtsFlags;
#elif IN_STG_CODE
-/* Hack because the C code generator can't generate '&label'. */
+/* Note [RtsFlags is a pointer in STG code]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * When compiling with IN_STG_CODE the RtsFlags symbol is defined as a pointer.
+ * This is necessary because the C code generator can't generate '&label'.
+ */
extern RTS_FLAGS RtsFlags[];
#else
extern RTS_FLAGS RtsFlags;
diff --git a/includes/rts/storage/Block.h b/includes/rts/storage/Block.h
index ecd6bf5dd8..792a72d717 100644
--- a/includes/rts/storage/Block.h
+++ b/includes/rts/storage/Block.h
@@ -290,6 +290,13 @@ EXTERN_INLINE bdescr* allocBlock(void)
bdescr *allocGroupOnNode(uint32_t node, W_ n);
+// Allocate n blocks, aligned at n-block boundary. The returned bdescr will
+// have this invariant
+//
+// bdescr->start % BLOCK_SIZE*n == 0
+//
+bdescr *allocAlignedGroupOnNode(uint32_t node, W_ n);
+
EXTERN_INLINE bdescr* allocBlockOnNode(uint32_t node);
EXTERN_INLINE bdescr* allocBlockOnNode(uint32_t node)
{
diff --git a/includes/rts/storage/GC.h b/includes/rts/storage/GC.h
index 1571975852..77dbe60297 100644
--- a/includes/rts/storage/GC.h
+++ b/includes/rts/storage/GC.h
@@ -240,9 +240,17 @@ void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
/* (needed when dynamic libraries are used). */
extern bool keepCAFs;
+#include "rts/Flags.h"
+
INLINE_HEADER void initBdescr(bdescr *bd, generation *gen, generation *dest)
{
bd->gen = gen;
bd->gen_no = gen->no;
bd->dest_no = dest->no;
+
+#if !IN_STG_CODE
+ /* See Note [RtsFlags is a pointer in STG code] */
+ ASSERT(gen->no < RtsFlags.GcFlags.generations);
+ ASSERT(dest->no < RtsFlags.GcFlags.generations);
+#endif
}
diff --git a/includes/rts/storage/InfoTables.h b/includes/rts/storage/InfoTables.h
index 4de5207b4d..b97e12982b 100644
--- a/includes/rts/storage/InfoTables.h
+++ b/includes/rts/storage/InfoTables.h
@@ -355,7 +355,7 @@ typedef struct StgConInfoTable_ {
*/
#if defined(TABLES_NEXT_TO_CODE)
#define GET_CON_DESC(info) \
- ((const char *)((StgWord)((info)+1) + (info->con_desc)))
+ ((const char *)((StgWord)((info)+1) + ((info)->con_desc)))
#else
#define GET_CON_DESC(info) ((const char *)(info)->con_desc)
#endif
diff --git a/includes/rts/storage/TSO.h b/includes/rts/storage/TSO.h
index 93018581fd..63d2a11e8e 100644
--- a/includes/rts/storage/TSO.h
+++ b/includes/rts/storage/TSO.h
@@ -185,6 +185,11 @@ typedef struct StgTSO_ {
} *StgTSOPtr; // StgTSO defined in rts/Types.h
+
+#define STACK_DIRTY 1
+// used by sanity checker to verify that all dirty stacks are on the mutable list
+#define STACK_SANE 64
+
typedef struct StgStack_ {
StgHeader header;
StgWord32 stack_size; // stack size in *words*
diff --git a/rts/Capability.c b/rts/Capability.c
index 33a94398cd..8b552e0b09 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -748,6 +748,8 @@ static Capability * waitForReturnCapability (Task *task)
* result of the external call back to the Haskell thread that
* made it.
*
+ * pCap is strictly an output.
+ *
* ------------------------------------------------------------------------- */
void waitForCapability (Capability **pCap, Task *task)
@@ -840,6 +842,9 @@ void waitForCapability (Capability **pCap, Task *task)
* SYNC_GC_PAR), either to do a sequential GC, forkProcess, or
* setNumCapabilities. We should give up the Capability temporarily.
*
+ * When yieldCapability returns *pCap will have been updated to the new
+ * capability held by the caller.
+ *
* ------------------------------------------------------------------------- */
#if defined(THREADED_RTS)
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index d06cde05d9..a2ab3de586 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -1744,7 +1744,7 @@ loop:
// indicate that the MVar operation has now completed.
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
- if (TO_W_(StgStack_dirty(stack)) == 0) {
+ if ((TO_W_(StgStack_dirty(stack)) & STACK_DIRTY) == 0) {
ccall dirty_STACK(MyCapability() "ptr", stack "ptr");
}
@@ -1829,7 +1829,7 @@ loop:
// indicate that the MVar operation has now completed.
StgTSO__link(tso) = stg_END_TSO_QUEUE_closure;
- if (TO_W_(StgStack_dirty(stack)) == 0) {
+ if ((TO_W_(StgStack_dirty(stack)) & STACK_DIRTY) == 0) {
ccall dirty_STACK(MyCapability() "ptr", stack "ptr");
}
diff --git a/rts/Threads.c b/rts/Threads.c
index 2bdcea1c00..3d5b463051 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -85,7 +85,7 @@ createThread(Capability *cap, W_ size)
SET_HDR(stack, &stg_STACK_info, cap->r.rCCCS);
stack->stack_size = stack_size - sizeofW(StgStack);
stack->sp = stack->stack + stack->stack_size;
- stack->dirty = 1;
+ stack->dirty = STACK_DIRTY;
tso = (StgTSO *)allocate(cap, sizeofW(StgTSO));
TICK_ALLOC_TSO();
@@ -804,7 +804,7 @@ loop:
// indicate that the MVar operation has now completed.
tso->_link = (StgTSO*)&stg_END_TSO_QUEUE_closure;
- if (stack->dirty == 0) {
+ if ((stack->dirty & STACK_DIRTY) == 0) {
dirty_STACK(cap, stack);
}
diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c
index f9e3d11407..b3e1e2ce75 100644
--- a/rts/sm/BlockAlloc.c
+++ b/rts/sm/BlockAlloc.c
@@ -310,7 +310,7 @@ setup_tail (bdescr *bd)
// Take a free block group bd, and split off a group of size n from
// it. Adjust the free list as necessary, and return the new group.
static bdescr *
-split_free_block (bdescr *bd, uint32_t node, W_ n, uint32_t ln)
+split_free_block (bdescr *bd, uint32_t node, W_ n, uint32_t ln /* log_2_ceil(n) */)
{
bdescr *fg; // free group
@@ -325,6 +325,46 @@ split_free_block (bdescr *bd, uint32_t node, W_ n, uint32_t ln)
return fg;
}
+// Take N blocks off the end, free the rest.
+static bdescr *
+split_block_high (bdescr *bd, W_ n)
+{
+ ASSERT(bd->blocks > n);
+
+ bdescr* ret = bd + bd->blocks - n; // take n blocks off the end
+ ret->blocks = n;
+ ret->start = ret->free = bd->start + (bd->blocks - n)*BLOCK_SIZE_W;
+ ret->link = NULL;
+
+ bd->blocks -= n;
+
+ setup_tail(ret);
+ setup_tail(bd);
+ freeGroup(bd);
+
+ return ret;
+}
+
+// Like `split_block_high`, but takes n blocks off the beginning rather
+// than the end.
+static bdescr *
+split_block_low (bdescr *bd, W_ n)
+{
+ ASSERT(bd->blocks > n);
+
+ bdescr* bd_ = bd + n;
+ bd_->blocks = bd->blocks - n;
+ bd_->start = bd_->free = bd->start + n*BLOCK_SIZE_W;
+
+ bd->blocks = n;
+
+ setup_tail(bd_);
+ setup_tail(bd);
+ freeGroup(bd_);
+
+ return bd;
+}
+
/* Only initializes the start pointers on the first megablock and the
* blocks field of the first bdescr; callers are responsible for calling
* initGroup afterwards.
@@ -461,6 +501,108 @@ finish:
return bd;
}
+// Allocate `n` blocks aligned to `n` blocks, e.g. when n = 8, the blocks will
+// be aligned at `8 * BLOCK_SIZE`. For a group with `n` blocks this can be used
+// for easily accessing the beginning of the group from a location p in the
+// group with
+//
+// p % (BLOCK_SIZE*n)
+//
+// Used by the non-moving collector for allocating segments.
+//
+// Because the storage manager does not support aligned allocations, we have to
+// allocate `2*n - 1` blocks here to make sure we'll be able to find an aligned
+// region in the allocated blocks. After finding the aligned area we want to
+// free slop on the low and high sides, and block allocator doesn't support
+// freeing only some portion of a megablock (we can only free whole megablocks).
+// So we disallow allocating megablocks here, and allow allocating at most
+// `BLOCKS_PER_MBLOCK / 2` blocks.
+bdescr *
+allocAlignedGroupOnNode (uint32_t node, W_ n)
+{
+ // allocate enough blocks to have enough space aligned at n-block boundary
+ // free any slops on the low and high side of this space
+
+ // number of blocks to allocate to make sure we have enough aligned space
+ W_ num_blocks = 2*n - 1;
+
+ if (num_blocks >= BLOCKS_PER_MBLOCK) {
+ barf("allocAlignedGroupOnNode: allocating megablocks is not supported\n"
+ " requested blocks: %" FMT_Word "\n"
+ " required for alignment: %" FMT_Word "\n"
+ " megablock size (in blocks): %" FMT_Word,
+ n, num_blocks, (W_) BLOCKS_PER_MBLOCK);
+ }
+
+ W_ group_size = n * BLOCK_SIZE;
+
+ // To reduce splitting and fragmentation we use allocLargeChunkOnNode here.
+ // Tweak the max allocation to avoid allocating megablocks. Splitting slop
+ // below doesn't work with megablocks (freeGroup can't free only a portion
+ // of a megablock so we can't allocate megablocks and free some parts of
+ // them).
+ W_ max_blocks = stg_min(num_blocks * 3, BLOCKS_PER_MBLOCK - 1);
+ bdescr *bd = allocLargeChunkOnNode(node, num_blocks, max_blocks);
+ // We may allocate more than num_blocks, so update it
+ num_blocks = bd->blocks;
+
+ // slop on the low side
+ W_ slop_low = 0;
+ if ((uintptr_t)bd->start % group_size != 0) {
+ slop_low = group_size - ((uintptr_t)bd->start % group_size);
+ }
+
+ W_ slop_high = (num_blocks * BLOCK_SIZE) - group_size - slop_low;
+
+ ASSERT((slop_low % BLOCK_SIZE) == 0);
+ ASSERT((slop_high % BLOCK_SIZE) == 0);
+
+ W_ slop_low_blocks = slop_low / BLOCK_SIZE;
+ W_ slop_high_blocks = slop_high / BLOCK_SIZE;
+
+ ASSERT(slop_low_blocks + slop_high_blocks + n == num_blocks);
+
+#if defined(DEBUG)
+ checkFreeListSanity();
+ W_ free_before = countFreeList();
+#endif
+
+ if (slop_low_blocks != 0) {
+ bd = split_block_high(bd, num_blocks - slop_low_blocks);
+ ASSERT(countBlocks(bd) == num_blocks - slop_low_blocks);
+ }
+
+#if defined(DEBUG)
+ ASSERT(countFreeList() == free_before + slop_low_blocks);
+ checkFreeListSanity();
+#endif
+
+ // At this point the bd should be aligned, but we may have slop on the high side
+ ASSERT((uintptr_t)bd->start % group_size == 0);
+
+#if defined(DEBUG)
+ free_before = countFreeList();
+#endif
+
+ if (slop_high_blocks != 0) {
+ bd = split_block_low(bd, n);
+ ASSERT(bd->blocks == n);
+ }
+
+#if defined(DEBUG)
+ ASSERT(countFreeList() == free_before + slop_high_blocks);
+ checkFreeListSanity();
+#endif
+
+ // Should still be aligned
+ ASSERT((uintptr_t)bd->start % group_size == 0);
+
+ // Just to make sure I get this right
+ ASSERT(Bdescr(bd->start) == bd);
+
+ return bd;
+}
+
STATIC_INLINE
uint32_t nodeWithLeastBlocks (void)
{
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index 53a473d26c..666daf0e32 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -39,7 +39,19 @@
copy_tag(p, info, src, size, stp, tag)
#endif
-/* Used to avoid long recursion due to selector thunks
+/* Note [Selector optimisation depth limit]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * MAX_THUNK_SELECTOR_DEPTH is used to avoid long recursion of
+ * eval_thunk_selector due to nested selector thunks. Note that this *only*
+ * counts nested selector thunks, e.g. `fst (fst (... (fst x)))`. The collector
+ * will traverse interleaved selector-constructor pairs without limit, e.g.
+ *
+ * a = (fst b, _)
+ * b = (fst c, _)
+ * c = (fst d, _)
+ * d = (x, _)
+ *
*/
#define MAX_THUNK_SELECTOR_DEPTH 16
@@ -1257,6 +1269,7 @@ selector_loop:
// recursively evaluate this selector. We don't want to
// recurse indefinitely, so we impose a depth bound.
+ // See Note [Selector optimisation depth limit].
if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
goto bale_out;
}
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 76237f35c2..3f301aeb53 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -1841,21 +1841,16 @@ resize_nursery (void)
#if defined(DEBUG)
-static void gcCAFs(void)
+void gcCAFs(void)
{
- StgIndStatic *p, *prev;
+ uint32_t i = 0;
+ StgIndStatic *prev = NULL;
- const StgInfoTable *info;
- uint32_t i;
-
- i = 0;
- p = debug_caf_list;
- prev = NULL;
-
- for (p = debug_caf_list; p != (StgIndStatic*)END_OF_CAF_LIST;
- p = (StgIndStatic*)p->saved_info) {
-
- info = get_itbl((StgClosure*)p);
+ for (StgIndStatic *p = debug_caf_list;
+ p != (StgIndStatic*) END_OF_CAF_LIST;
+ p = (StgIndStatic*) p->saved_info)
+ {
+ const StgInfoTable *info = get_itbl((StgClosure*)p);
ASSERT(info->type == IND_STATIC);
// See Note [STATIC_LINK fields] in Storage.h
diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c
index 3585bd93b4..289ac542b8 100644
--- a/rts/sm/Sanity.c
+++ b/rts/sm/Sanity.c
@@ -632,9 +632,9 @@ checkGlobalTSOList (bool checkTSOs)
stack = tso->stackobj;
while (1) {
- if (stack->dirty & 1) {
- ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & TSO_MARKED));
- stack->dirty &= ~TSO_MARKED;
+ if (stack->dirty & STACK_DIRTY) {
+ ASSERT(Bdescr((P_)stack)->gen_no == 0 || (stack->dirty & STACK_SANE));
+ stack->dirty &= ~STACK_SANE;
}
frame = (StgUnderflowFrame*) (stack->stack + stack->stack_size
- sizeofW(StgUnderflowFrame));
@@ -669,7 +669,7 @@ checkMutableList( bdescr *mut_bd, uint32_t gen )
((StgTSO *)p)->flags |= TSO_MARKED;
break;
case STACK:
- ((StgStack *)p)->dirty |= TSO_MARKED;
+ ((StgStack *)p)->dirty |= STACK_SANE;
break;
}
}
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 0130a08f7c..97c71478ed 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -1139,8 +1139,8 @@ dirty_TSO (Capability *cap, StgTSO *tso)
void
dirty_STACK (Capability *cap, StgStack *stack)
{
- if (stack->dirty == 0) {
- stack->dirty = 1;
+ if (! (stack->dirty & STACK_DIRTY)) {
+ stack->dirty = STACK_DIRTY;
recordClosureMutated(cap,(StgClosure*)stack);
}
}
diff --git a/testsuite/tests/rts/testblockalloc.c b/testsuite/tests/rts/testblockalloc.c
index 577245f45e..53eed24015 100644
--- a/testsuite/tests/rts/testblockalloc.c
+++ b/testsuite/tests/rts/testblockalloc.c
@@ -3,6 +3,7 @@
#include <stdio.h>
extern bdescr *allocGroup_lock_lock(uint32_t n);
+extern bdescr *allocAlignedGroupOnNode (uint32_t node, W_ n);
extern void freeGroup_lock(bdescr *p);
const int ARRSIZE = 256;
@@ -13,64 +14,110 @@ const int SEED = 0xf00f00;
extern StgWord mblocks_allocated;
-int main (int argc, char *argv[])
+static void test_random_alloc(void)
{
- int i, j, b;
-
bdescr *a[ARRSIZE];
- srand(SEED);
+ // repeatedly sweep though the array, allocating new random-sized
+ // objects and deallocating the old ones.
+ for (int i=0; i < LOOPS; i++)
+ {
+ for (int j=0; j < ARRSIZE; j++)
+ {
+ if (i > 0)
+ {
+ IF_DEBUG(block_alloc, debugBelch("A%d: freeing %p, %d blocks @ %p\n", j, a[j], a[j]->blocks, a[j]->start));
+ freeGroup_lock(a[j]);
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+
+ int b = (rand() % MAXALLOC) + 1;
+ a[j] = allocGroup_lock(b);
+ IF_DEBUG(block_alloc, debugBelch("A%d: allocated %p, %d blocks @ %p\n", j, a[j], b, a[j]->start));
+ // allocating zero blocks isn't allowed
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+ }
+ for (int j=0; j < ARRSIZE; j++)
{
- RtsConfig conf = defaultRtsConfig;
- conf.rts_opts_enabled = RtsOptsAll;
- hs_init_ghc(&argc, &argv, conf);
+ freeGroup_lock(a[j]);
}
+}
+
+static void test_sequential_alloc(void)
+{
+ bdescr *a[ARRSIZE];
- // repeatedly sweep though the array, allocating new random-sized
- // objects and deallocating the old ones.
- for (i=0; i < LOOPS; i++)
- {
- for (j=0; j < ARRSIZE; j++)
- {
- if (i > 0)
- {
- IF_DEBUG(block_alloc, debugBelch("A%d: freeing %p, %d blocks @ %p\n", j, a[j], a[j]->blocks, a[j]->start));
- freeGroup_lock(a[j]);
- DEBUG_ONLY(checkFreeListSanity());
- }
- b = (rand() % MAXALLOC) + 1;
- a[j] = allocGroup_lock(b);
- IF_DEBUG(block_alloc, debugBelch("A%d: allocated %p, %d blocks @ %p\n", j, a[j], b, a[j]->start));
- // allocating zero blocks isn't allowed
- DEBUG_ONLY(checkFreeListSanity());
- }
- }
-
- for (j=0; j < ARRSIZE; j++)
- {
- freeGroup_lock(a[j]);
- }
-
// this time, sweep forwards allocating new blocks, and then
// backwards deallocating them.
- for (i=0; i < LOOPS; i++)
+ for (int i=0; i < LOOPS; i++)
{
- for (j=0; j < ARRSIZE; j++)
+ for (int j=0; j < ARRSIZE; j++)
{
- b = (rand() % MAXALLOC) + 1;
+ int b = (rand() % MAXALLOC) + 1;
a[j] = allocGroup_lock(b);
IF_DEBUG(block_alloc, debugBelch("B%d,%d: allocated %p, %d blocks @ %p\n", i, j, a[j], b, a[j]->start));
DEBUG_ONLY(checkFreeListSanity());
}
- for (j=ARRSIZE-1; j >= 0; j--)
+ for (int j=ARRSIZE-1; j >= 0; j--)
{
IF_DEBUG(block_alloc, debugBelch("B%d,%d: freeing %p, %d blocks @ %p\n", i, j, a[j], a[j]->blocks, a[j]->start));
freeGroup_lock(a[j]);
DEBUG_ONLY(checkFreeListSanity());
}
}
-
+}
+
+static void test_aligned_alloc(void)
+{
+ bdescr *a[ARRSIZE];
+
+ // this time, sweep forwards allocating new blocks, and then
+ // backwards deallocating them.
+ for (int i=0; i < LOOPS; i++)
+ {
+ for (int j=0; j < ARRSIZE; j++)
+ {
+ // allocAlignedGroupOnNode does not support allocating more than
+ // BLOCKS_PER_MBLOCK/2 blocks.
+ int b = rand() % (BLOCKS_PER_MBLOCK / 2);
+ if (b == 0) { b = 1; }
+ a[j] = allocAlignedGroupOnNode(0, b);
+ if ((((W_)(a[j]->start)) % (b*BLOCK_SIZE)) != 0)
+ {
+ barf("%p is not aligned to allocation size %d", a[j], b);
+ }
+ IF_DEBUG(block_alloc, debugBelch("B%d,%d: allocated %p, %d blocks @ %p\n", i, j, a[j], b, a[j]->start));
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+ for (int j=ARRSIZE-1; j >= 0; j--)
+ {
+ IF_DEBUG(block_alloc, debugBelch("B%d,%d: freeing %p, %d blocks @ %p\n", i, j, a[j], a[j]->blocks, a[j]->start));
+ freeGroup_lock(a[j]);
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+ }
+}
+
+int main (int argc, char *argv[])
+{
+ int i, j, b;
+
+ bdescr *a[ARRSIZE];
+
+ srand(SEED);
+
+ {
+ RtsConfig conf = defaultRtsConfig;
+ conf.rts_opts_enabled = RtsOptsAll;
+ hs_init_ghc(&argc, &argv, conf);
+ }
+
+ test_random_alloc();
+ test_sequential_alloc();
+ test_aligned_alloc();
+
DEBUG_ONLY(checkFreeListSanity());
hs_exit(); // will do a memory leak test
diff --git a/utils/deriveConstants/Main.hs b/utils/deriveConstants/Main.hs
index 54533254dd..f6f590715b 100644
--- a/utils/deriveConstants/Main.hs
+++ b/utils/deriveConstants/Main.hs
@@ -307,6 +307,9 @@ wanteds os = concat
"sizeofW(StgHeader) - sizeofW(StgProfHeader)"
,constantWord Both "PROF_HDR_SIZE" "sizeofW(StgProfHeader)"
+ -- Stack flags for C--
+ ,constantWord C "STACK_DIRTY" "STACK_DIRTY"
+
-- Size of a storage manager block (in bytes).
,constantWord Both "BLOCK_SIZE" "BLOCK_SIZE"
,constantWord C "MBLOCK_SIZE" "MBLOCK_SIZE"