summaryrefslogtreecommitdiff
path: root/rts/Updates.h
diff options
context:
space:
mode:
Diffstat (limited to 'rts/Updates.h')
-rw-r--r--rts/Updates.h97
1 files changed, 3 insertions, 94 deletions
diff --git a/rts/Updates.h b/rts/Updates.h
index 2258c988bb..954f02afe1 100644
--- a/rts/Updates.h
+++ b/rts/Updates.h
@@ -18,101 +18,12 @@
-------------------------------------------------------------------------- */
/* LDV profiling:
- * We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
- * which p1 resides.
- *
- * Note:
* After all, we do *NOT* need to call LDV_RECORD_CREATE() for IND
* closures because they are inherently used. But, it corrupts
* the invariants that every closure keeps its creation time in the profiling
* field. So, we call LDV_RECORD_CREATE().
*/
-/* In the DEBUG case, we also zero out the slop of the old closure,
- * so that the sanity checker can tell where the next closure is.
- *
- * Two important invariants: we should never try to update a closure
- * to point to itself, and the closure being updated should not
- * already have been updated (the mutable list will get messed up
- * otherwise).
- *
- * NB. We do *not* do this in THREADED_RTS mode, because when we have the
- * possibility of multiple threads entering the same closure, zeroing
- * the slop in one of the threads would have a disastrous effect on
- * the other (seen in the wild!).
- */
-#ifdef CMINUSMINUS
-
-#define FILL_SLOP(p) \
- W_ inf; \
- W_ sz; \
- W_ i; \
- inf = %GET_STD_INFO(p); \
- if (%INFO_TYPE(inf) != HALF_W_(BLACKHOLE)) { \
- if (%INFO_TYPE(inf) == HALF_W_(THUNK_SELECTOR)) { \
- sz = BYTES_TO_WDS(SIZEOF_StgSelector_NoThunkHdr); \
- } else { \
- if (%INFO_TYPE(inf) == HALF_W_(AP_STACK)) { \
- sz = StgAP_STACK_size(p) + BYTES_TO_WDS(SIZEOF_StgAP_STACK_NoThunkHdr); \
- } else { \
- if (%INFO_TYPE(inf) == HALF_W_(AP)) { \
- sz = TO_W_(StgAP_n_args(p)) + BYTES_TO_WDS(SIZEOF_StgAP_NoThunkHdr); \
- } else { \
- sz = TO_W_(%INFO_PTRS(inf)) + TO_W_(%INFO_NPTRS(inf)); \
- } \
- } \
- } \
- i = 0; \
- for: \
- if (i < sz) { \
- StgThunk_payload(p,i) = 0; \
- i = i + 1; \
- goto for; \
- } \
- }
-
-#else /* !CMINUSMINUS */
-
-INLINE_HEADER void
-FILL_SLOP(StgClosure *p)
-{
- StgInfoTable *inf = get_itbl(p);
- nat i, sz;
-
- switch (inf->type) {
- case BLACKHOLE:
- goto no_slop;
- // we already filled in the slop when we overwrote the thunk
- // with BLACKHOLE, and also an evacuated BLACKHOLE is only the
- // size of an IND.
- case THUNK_SELECTOR:
- sz = sizeofW(StgSelector) - sizeofW(StgThunkHeader);
- break;
- case AP:
- sz = ((StgAP *)p)->n_args + sizeofW(StgAP) - sizeofW(StgThunkHeader);
- break;
- case AP_STACK:
- sz = ((StgAP_STACK *)p)->size + sizeofW(StgAP_STACK) - sizeofW(StgThunkHeader);
- break;
- default:
- sz = inf->layout.payload.ptrs + inf->layout.payload.nptrs;
- break;
- }
- for (i = 0; i < sz; i++) {
- ((StgThunk *)p)->payload[i] = 0;
- }
-no_slop:
- ;
-}
-
-#endif /* CMINUSMINUS */
-
-#if !defined(DEBUG) || defined(THREADED_RTS)
-#define DEBUG_FILL_SLOP(p) /* do nothing */
-#else
-#define DEBUG_FILL_SLOP(p) FILL_SLOP(p)
-#endif
-
/* We have two versions of this macro (sadly), one for use in C-- code,
* and the other for C.
*
@@ -128,9 +39,8 @@ no_slop:
#define updateWithIndirection(p1, p2, and_then) \
W_ bd; \
\
- DEBUG_FILL_SLOP(p1); \
- LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
- StgInd_indirectee(p1) = p2; \
+ OVERWRITING_CLOSURE(p1); \
+ StgInd_indirectee(p1) = p2; \
prim %write_barrier() []; \
SET_INFO(p1, stg_BLACKHOLE_info); \
LDV_RECORD_CREATE(p1); \
@@ -155,8 +65,7 @@ INLINE_HEADER void updateWithIndirection (Capability *cap,
ASSERT( (P_)p1 != (P_)p2 );
/* not necessarily true: ASSERT( !closure_IND(p1) ); */
/* occurs in RaiseAsync.c:raiseAsync() */
- DEBUG_FILL_SLOP(p1);
- LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1);
+ OVERWRITING_CLOSURE(p1);
((StgInd *)p1)->indirectee = p2;
write_barrier();
SET_INFO(p1, &stg_BLACKHOLE_info);