summaryrefslogtreecommitdiff
path: root/rts/Updates.h
diff options
context:
space:
mode:
authorSimon Marlow <marlowsd@gmail.com>2010-12-15 12:08:43 +0000
committerSimon Marlow <marlowsd@gmail.com>2010-12-15 12:08:43 +0000
commitf30d527344db528618f64a25250a3be557d9f287 (patch)
tree5b827afed254139a197cbdcdd37bebe8fa859d67 /rts/Updates.h
parent99b6e6ac44c6c610b0d60e3b70a2341c83d23106 (diff)
downloadhaskell-f30d527344db528618f64a25250a3be557d9f287.tar.gz
Implement stack chunks and separate TSO/STACK objects
This patch makes two changes to the way stacks are managed: 1. The stack is now stored in a separate object from the TSO. This means that it is easier to replace the stack object for a thread when the stack overflows or underflows; we don't have to leave behind the old TSO as an indirection any more. Consequently, we can remove ThreadRelocated and deRefTSO(), which were a pain. This is obviously the right thing, but the last time I tried to do it it made performance worse. This time I seem to have cracked it. 2. Stacks are now represented as a chain of chunks, rather than a single monolithic object. The big advantage here is that individual chunks are marked clean or dirty according to whether they contain pointers to the young generation, and the GC can avoid traversing clean stack chunks during a young-generation collection. This means that programs with deep stacks will see a big saving in GC overhead when using the default GC settings. A secondary advantage is that there is much less copying involved as the stack grows. Programs that quickly grow a deep stack will see big improvements. In some ways the implementation is simpler, as nothing special needs to be done to reclaim stack as the stack shrinks (the GC just recovers the dead stack chunks). On the other hand, we have to manage stack underflow between chunks, so there's a new stack frame (UNDERFLOW_FRAME), and we now have separate TSO and STACK objects. The total amount of code is probably about the same as before. There are new RTS flags: -ki<size> Sets the initial thread stack size (default 1k) Egs: -ki4k -ki2m -kc<size> Sets the stack chunk size (default 32k) -kb<size> Sets the stack chunk buffer size (default 1k) -ki was previously called just -k, and the old name is still accepted for backwards compatibility. These new options are documented.
Diffstat (limited to 'rts/Updates.h')
-rw-r--r--rts/Updates.h97
1 files changed, 3 insertions, 94 deletions
diff --git a/rts/Updates.h b/rts/Updates.h
index 2258c988bb..954f02afe1 100644
--- a/rts/Updates.h
+++ b/rts/Updates.h
@@ -18,101 +18,12 @@
-------------------------------------------------------------------------- */
/* LDV profiling:
- * We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
- * which p1 resides.
- *
- * Note:
* After all, we do *NOT* need to call LDV_RECORD_CREATE() for IND
* closures because they are inherently used. But, it corrupts
* the invariants that every closure keeps its creation time in the profiling
* field. So, we call LDV_RECORD_CREATE().
*/
-/* In the DEBUG case, we also zero out the slop of the old closure,
- * so that the sanity checker can tell where the next closure is.
- *
- * Two important invariants: we should never try to update a closure
- * to point to itself, and the closure being updated should not
- * already have been updated (the mutable list will get messed up
- * otherwise).
- *
- * NB. We do *not* do this in THREADED_RTS mode, because when we have the
- * possibility of multiple threads entering the same closure, zeroing
- * the slop in one of the threads would have a disastrous effect on
- * the other (seen in the wild!).
- */
-#ifdef CMINUSMINUS
-
-#define FILL_SLOP(p) \
- W_ inf; \
- W_ sz; \
- W_ i; \
- inf = %GET_STD_INFO(p); \
- if (%INFO_TYPE(inf) != HALF_W_(BLACKHOLE)) { \
- if (%INFO_TYPE(inf) == HALF_W_(THUNK_SELECTOR)) { \
- sz = BYTES_TO_WDS(SIZEOF_StgSelector_NoThunkHdr); \
- } else { \
- if (%INFO_TYPE(inf) == HALF_W_(AP_STACK)) { \
- sz = StgAP_STACK_size(p) + BYTES_TO_WDS(SIZEOF_StgAP_STACK_NoThunkHdr); \
- } else { \
- if (%INFO_TYPE(inf) == HALF_W_(AP)) { \
- sz = TO_W_(StgAP_n_args(p)) + BYTES_TO_WDS(SIZEOF_StgAP_NoThunkHdr); \
- } else { \
- sz = TO_W_(%INFO_PTRS(inf)) + TO_W_(%INFO_NPTRS(inf)); \
- } \
- } \
- } \
- i = 0; \
- for: \
- if (i < sz) { \
- StgThunk_payload(p,i) = 0; \
- i = i + 1; \
- goto for; \
- } \
- }
-
-#else /* !CMINUSMINUS */
-
-INLINE_HEADER void
-FILL_SLOP(StgClosure *p)
-{
- StgInfoTable *inf = get_itbl(p);
- nat i, sz;
-
- switch (inf->type) {
- case BLACKHOLE:
- goto no_slop;
- // we already filled in the slop when we overwrote the thunk
- // with BLACKHOLE, and also an evacuated BLACKHOLE is only the
- // size of an IND.
- case THUNK_SELECTOR:
- sz = sizeofW(StgSelector) - sizeofW(StgThunkHeader);
- break;
- case AP:
- sz = ((StgAP *)p)->n_args + sizeofW(StgAP) - sizeofW(StgThunkHeader);
- break;
- case AP_STACK:
- sz = ((StgAP_STACK *)p)->size + sizeofW(StgAP_STACK) - sizeofW(StgThunkHeader);
- break;
- default:
- sz = inf->layout.payload.ptrs + inf->layout.payload.nptrs;
- break;
- }
- for (i = 0; i < sz; i++) {
- ((StgThunk *)p)->payload[i] = 0;
- }
-no_slop:
- ;
-}
-
-#endif /* CMINUSMINUS */
-
-#if !defined(DEBUG) || defined(THREADED_RTS)
-#define DEBUG_FILL_SLOP(p) /* do nothing */
-#else
-#define DEBUG_FILL_SLOP(p) FILL_SLOP(p)
-#endif
-
/* We have two versions of this macro (sadly), one for use in C-- code,
* and the other for C.
*
@@ -128,9 +39,8 @@ no_slop:
#define updateWithIndirection(p1, p2, and_then) \
W_ bd; \
\
- DEBUG_FILL_SLOP(p1); \
- LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
- StgInd_indirectee(p1) = p2; \
+ OVERWRITING_CLOSURE(p1); \
+ StgInd_indirectee(p1) = p2; \
prim %write_barrier() []; \
SET_INFO(p1, stg_BLACKHOLE_info); \
LDV_RECORD_CREATE(p1); \
@@ -155,8 +65,7 @@ INLINE_HEADER void updateWithIndirection (Capability *cap,
ASSERT( (P_)p1 != (P_)p2 );
/* not necessarily true: ASSERT( !closure_IND(p1) ); */
/* occurs in RaiseAsync.c:raiseAsync() */
- DEBUG_FILL_SLOP(p1);
- LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1);
+ OVERWRITING_CLOSURE(p1);
((StgInd *)p1)->indirectee = p2;
write_barrier();
SET_INFO(p1, &stg_BLACKHOLE_info);