summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
Diffstat (limited to 'rts')
-rw-r--r--rts/ClosureFlags.c5
-rw-r--r--rts/HeapStackCheck.cmm8
-rw-r--r--rts/LdvProfile.c3
-rw-r--r--rts/Linker.c3
-rw-r--r--rts/PrimOps.cmm65
-rw-r--r--rts/Printer.c3
-rw-r--r--rts/ProfHeap.c6
-rw-r--r--rts/RaiseAsync.c8
-rw-r--r--rts/RetainerProfile.c9
-rw-r--r--rts/Sanity.c3
-rw-r--r--rts/StgMiscClosures.cmm8
-rw-r--r--rts/sm/Compact.c3
-rw-r--r--rts/sm/Evac.c3
-rw-r--r--rts/sm/GC.c5
-rw-r--r--rts/sm/GC.h2
-rw-r--r--rts/sm/Scav.c66
-rw-r--r--rts/sm/Storage.c24
17 files changed, 160 insertions, 64 deletions
diff --git a/rts/ClosureFlags.c b/rts/ClosureFlags.c
index 08b4dd3e27..12e6632f24 100644
--- a/rts/ClosureFlags.c
+++ b/rts/ClosureFlags.c
@@ -71,7 +71,8 @@ StgWord16 closure_flags[] = {
/* BLACKHOLE = */ ( _NS| _UPT ),
/* SE_BLACKHOLE = */ ( _NS| _UPT ),
/* SE_CAF_BLACKHOLE = */ ( _NS| _UPT ),
-/* MVAR = */ (_HNF| _NS| _MUT|_UPT ),
+/* MVAR_CLEAN = */ (_HNF| _NS| _MUT|_UPT ),
+/* MVAR_DIRTY = */ (_HNF| _NS| _MUT|_UPT ),
/* ARR_WORDS = */ (_HNF| _NS| _UPT ),
/* MUT_ARR_PTRS_CLEAN = */ (_HNF| _NS| _MUT|_UPT ),
/* MUT_ARR_PTRS_DIRTY = */ (_HNF| _NS| _MUT|_UPT ),
@@ -99,6 +100,6 @@ StgWord16 closure_flags[] = {
/* CATCH_STM_FRAME = */ ( _BTM )
};
-#if N_CLOSURE_TYPES != 71
+#if N_CLOSURE_TYPES != 72
#error Closure types changed: update ClosureFlags.c!
#endif
diff --git a/rts/HeapStackCheck.cmm b/rts/HeapStackCheck.cmm
index 5b21ee1da1..333d0c09e0 100644
--- a/rts/HeapStackCheck.cmm
+++ b/rts/HeapStackCheck.cmm
@@ -827,7 +827,9 @@ INFO_TABLE_RET( stg_block_takemvar, RET_SMALL, "ptr" W_ unused )
stg_block_takemvar_finally
{
#ifdef THREADED_RTS
- unlockClosure(R3, stg_EMPTY_MVAR_info);
+ unlockClosure(R3, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
@@ -853,7 +855,9 @@ INFO_TABLE_RET( stg_block_putmvar, RET_SMALL, "ptr" W_ unused1, "ptr" W_ unused2
stg_block_putmvar_finally
{
#ifdef THREADED_RTS
- unlockClosure(R3, stg_FULL_MVAR_info);
+ unlockClosure(R3, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(R3, stg_MVAR_DIRTY_info);
#endif
jump StgReturn;
}
diff --git a/rts/LdvProfile.c b/rts/LdvProfile.c
index 193344e4f5..ecbba8b6ab 100644
--- a/rts/LdvProfile.c
+++ b/rts/LdvProfile.c
@@ -96,7 +96,8 @@ processHeapClosureForDead( StgClosure *c )
'inherently used' cases: do nothing.
*/
case TSO:
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
case MUT_ARR_PTRS_FROZEN:
diff --git a/rts/Linker.c b/rts/Linker.c
index 853bf77dd2..e86efd393a 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -646,7 +646,8 @@ typedef struct _RtsSymbolVal {
SymX(stg_CAF_BLACKHOLE_info) \
SymX(awakenBlockedQueue) \
SymX(stg_CHARLIKE_closure) \
- SymX(stg_EMPTY_MVAR_info) \
+ SymX(stg_MVAR_CLEAN_info) \
+ SymX(stg_MVAR_DIRTY_info) \
SymX(stg_IND_STATIC_info) \
SymX(stg_INTLIKE_closure) \
SymX(stg_MUT_ARR_PTRS_DIRTY_info) \
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index 67227d05af..04a753c51b 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -1445,7 +1445,7 @@ isEmptyMVarzh_fast
{
/* args: R1 = MVar closure */
- if (GET_INFO(R1) == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(R1) == stg_END_TSO_QUEUE_closure) {
RET_N(1);
} else {
RET_N(0);
@@ -1460,7 +1460,8 @@ newMVarzh_fast
ALLOC_PRIM ( SIZEOF_StgMVar, NO_PTRS, newMVarzh_fast );
mvar = Hp - SIZEOF_StgMVar + WDS(1);
- SET_HDR(mvar,stg_EMPTY_MVAR_info,W_[CCCS]);
+ SET_HDR(mvar,stg_MVAR_DIRTY_info,W_[CCCS]);
+ // MVARs start dirty: generation 0 has no mutable list
StgMVar_head(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
@@ -1495,11 +1496,15 @@ takeMVarzh_fast
#else
info = GET_INFO(mvar);
#endif
+
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
/* If the MVar is empty, put ourselves on its blocking queue,
* and wait until we're woken up.
*/
- if (info == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
@@ -1543,7 +1548,9 @@ takeMVarzh_fast
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
}
@@ -1553,9 +1560,9 @@ takeMVarzh_fast
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
RET_P(val);
@@ -1577,9 +1584,9 @@ tryTakeMVarzh_fast
info = GET_INFO(mvar);
#endif
- if (info == stg_EMPTY_MVAR_info) {
+ if (StgMVar_value(mvar) == stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, info);
#endif
/* HACK: we need a pointer to pass back,
* so we abuse NO_FINALIZER_closure
@@ -1587,6 +1594,10 @@ tryTakeMVarzh_fast
RET_NP(0, stg_NO_FINALIZER_closure);
}
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
+
/* we got the value... */
val = StgMVar_value(mvar);
@@ -1616,7 +1627,9 @@ tryTakeMVarzh_fast
StgMVar_tail(mvar) = stg_END_TSO_QUEUE_closure;
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
@@ -1624,9 +1637,9 @@ tryTakeMVarzh_fast
/* No further putMVars, MVar is now empty */
StgMVar_value(mvar) = stg_END_TSO_QUEUE_closure;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_EMPTY_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
@@ -1647,7 +1660,11 @@ putMVarzh_fast
info = GET_INFO(mvar);
#endif
- if (info == stg_FULL_MVAR_info) {
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
+
+ if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
@@ -1686,7 +1703,9 @@ putMVarzh_fast
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
@@ -1696,9 +1715,9 @@ putMVarzh_fast
StgMVar_value(mvar) = R2;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
jump %ENTRY_CODE(Sp(0));
}
@@ -1720,13 +1739,17 @@ tryPutMVarzh_fast
info = GET_INFO(mvar);
#endif
- if (info == stg_FULL_MVAR_info) {
+ if (StgMVar_value(mvar) != stg_END_TSO_QUEUE_closure) {
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, info);
#endif
RET_N(0);
}
+ if (info == stg_MVAR_CLEAN_info) {
+ foreign "C" dirty_MVAR(BaseReg "ptr", mvar);
+ }
+
if (StgMVar_head(mvar) != stg_END_TSO_QUEUE_closure) {
/* There are takeMVar(s) waiting: wake up the first one
@@ -1752,7 +1775,9 @@ tryPutMVarzh_fast
}
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_EMPTY_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
+#else
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
else
@@ -1761,9 +1786,9 @@ tryPutMVarzh_fast
StgMVar_value(mvar) = R2;
#if defined(THREADED_RTS)
- unlockClosure(mvar, stg_FULL_MVAR_info);
+ unlockClosure(mvar, stg_MVAR_DIRTY_info);
#else
- SET_INFO(mvar,stg_FULL_MVAR_info);
+ SET_INFO(mvar,stg_MVAR_DIRTY_info);
#endif
}
diff --git a/rts/Printer.c b/rts/Printer.c
index d46283cbb3..3e80bd1a6f 100644
--- a/rts/Printer.c
+++ b/rts/Printer.c
@@ -340,7 +340,8 @@ printClosure( StgClosure *obj )
debugBelch("MUT_ARR_PTRS_FROZEN(size=%lu)\n", (lnat)((StgMutArrPtrs *)obj)->ptrs);
break;
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
StgMVar* mv = (StgMVar*)obj;
debugBelch("MVAR(head=%p, tail=%p, value=%p)\n", mv->head, mv->tail, mv->value);
diff --git a/rts/ProfHeap.c b/rts/ProfHeap.c
index db9e41fa18..08597b1d98 100644
--- a/rts/ProfHeap.c
+++ b/rts/ProfHeap.c
@@ -146,7 +146,8 @@ static char *type_names[] = {
"BLACKHOLE",
"SE_BLACKHOLE",
"SE_CAF_BLACKHOLE",
- "MVAR",
+ "MVAR_CLEAN",
+ "MVAR_DIRTY",
"ARR_WORDS",
"MUT_ARR_PTRS_CLEAN",
"MUT_ARR_PTRS_DIRTY",
@@ -974,7 +975,8 @@ heapCensusChain( Census *census, bdescr *bd )
size = bco_sizeW((StgBCO *)p);
break;
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
case WEAK:
case STABLE_NAME:
case MUT_VAR_CLEAN:
diff --git a/rts/RaiseAsync.c b/rts/RaiseAsync.c
index b71e126f04..bb244d883d 100644
--- a/rts/RaiseAsync.c
+++ b/rts/RaiseAsync.c
@@ -282,7 +282,13 @@ check_target:
// ASSUMPTION: tso->block_info must always point to a
// closure. In the threaded RTS it does.
- if (get_itbl(mvar)->type != MVAR) goto retry;
+ switch (get_itbl(mvar)->type) {
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ break;
+ default:
+ goto retry;
+ }
info = lockClosure((StgClosure *)mvar);
diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c
index 036eacf08b..745b8e75db 100644
--- a/rts/RetainerProfile.c
+++ b/rts/RetainerProfile.c
@@ -491,7 +491,8 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child )
// three children (fixed), no SRT
// need to push a stackElement
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
// head must be TSO and the head of a linked list of TSOs.
// Shoule it be a child? Seems to be yes.
*first_child = (StgClosure *)((StgMVar *)c)->head;
@@ -804,7 +805,8 @@ pop( StgClosure **c, StgClosure **cp, retainer *r )
// three children (fixed), no SRT
// need to push a stackElement
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
if (se->info.next.step == 2) {
*c = (StgClosure *)((StgMVar *)se->c)->tail;
se->info.next.step++; // move to the next step
@@ -1057,7 +1059,8 @@ isRetainer( StgClosure *c )
case TSO:
// mutable objects
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
case MUT_ARR_PTRS_CLEAN:
diff --git a/rts/Sanity.c b/rts/Sanity.c
index a2ddff87d6..dcb6e5b5ea 100644
--- a/rts/Sanity.c
+++ b/rts/Sanity.c
@@ -256,7 +256,8 @@ checkClosure( StgClosure* p )
info = get_itbl(p);
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
StgMVar *mvar = (StgMVar *)p;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(mvar->head));
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index d24eb63d9d..0a4dbdc561 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -467,11 +467,11 @@ INFO_TABLE(stg_STABLE_NAME,0,1,STABLE_NAME,"STABLE_NAME","STABLE_NAME")
and entry code for each type.
------------------------------------------------------------------------- */
-INFO_TABLE(stg_FULL_MVAR,3,0,MVAR,"MVAR","MVAR")
-{ foreign "C" barf("FULL_MVAR object entered!") never returns; }
+INFO_TABLE(stg_MVAR_CLEAN,3,0,MVAR_CLEAN,"MVAR","MVAR")
+{ foreign "C" barf("MVAR object entered!") never returns; }
-INFO_TABLE(stg_EMPTY_MVAR,3,0,MVAR,"MVAR","MVAR")
-{ foreign "C" barf("EMPTY_MVAR object entered!") never returns; }
+INFO_TABLE(stg_MVAR_DIRTY,3,0,MVAR_DIRTY,"MVAR","MVAR")
+{ foreign "C" barf("MVAR object entered!") never returns; }
/* -----------------------------------------------------------------------------
STM
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index 53eb2fbb86..b8a40d47e9 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -644,7 +644,8 @@ thread_obj (StgInfoTable *info, StgPtr p)
return p + sizeofW(StgWeak);
}
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
StgMVar *mvar = (StgMVar *)p;
thread_(&mvar->head);
diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c
index a0c2ae7a84..42b6b1f666 100644
--- a/rts/sm/Evac.c
+++ b/rts/sm/Evac.c
@@ -448,7 +448,8 @@ loop:
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY:
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
return copy(q,sizeW_fromITBL(info),stp);
case CONSTR_0_1:
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index e4b5098e24..47c30ae5e7 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -124,6 +124,7 @@ static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
#ifdef DEBUG
nat mutlist_MUTVARS,
mutlist_MUTARRS,
+ mutlist_MVARS,
mutlist_OTHERS;
#endif
@@ -637,9 +638,9 @@ GarbageCollect ( rtsBool force_major_gc )
copied += mut_list_size;
debugTrace(DEBUG_gc,
- "mut_list_size: %lu (%d vars, %d arrays, %d others)",
+ "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d others)",
(unsigned long)(mut_list_size * sizeof(W_)),
- mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS);
+ mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS);
}
for (s = 0; s < generations[g].n_steps; s++) {
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index b95466edb9..d3ce8cf92d 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -36,7 +36,7 @@ extern lnat new_blocks; // blocks allocated during this GC
extern lnat new_scavd_blocks; // ditto, but depth-first blocks
#ifdef DEBUG
-extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS;
+extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS;
#endif
StgClosure * isAlive(StgClosure *p);
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 00faff1d84..54fe9a472a 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -293,15 +293,23 @@ scavenge(step *stp)
q = p;
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
+ rtsBool saved_eager_promotion = eager_promotion;
+
StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
+ eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
p += sizeofW(StgMVar);
break;
}
@@ -696,17 +704,25 @@ linear_scan:
q = p;
switch (info->type) {
- case MVAR:
- {
- StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
- break;
- }
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ {
+ rtsBool saved_eager_promotion = eager_promotion;
+
+ StgMVar *mvar = ((StgMVar *)p);
+ eager_promotion = rtsFalse;
+ mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
+ mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
+ mvar->value = evacuate((StgClosure *)mvar->value);
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
+ break;
+ }
case FUN_2_0:
scavenge_fun_srt(info);
@@ -1074,15 +1090,23 @@ scavenge_one(StgPtr p)
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
+ rtsBool saved_eager_promotion = eager_promotion;
+
StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
+ eager_promotion = rtsFalse;
mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
break;
}
@@ -1409,6 +1433,10 @@ scavenge_mutable_list(generation *gen)
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
mutlist_MUTARRS++; break;
+ case MVAR_CLEAN:
+ barf("MVAR_CLEAN on mutable list");
+ case MVAR_DIRTY:
+ mutlist_MVARS++; break;
default:
mutlist_OTHERS++; break;
}
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index f9e32f288a..cd840dd428 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -781,12 +781,15 @@ allocatePinned( nat n )
}
/* -----------------------------------------------------------------------------
+ Write Barriers
+ -------------------------------------------------------------------------- */
+
+/*
This is the write barrier for MUT_VARs, a.k.a. IORefs. A
MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
and is put on the mutable list.
- -------------------------------------------------------------------------- */
-
+*/
void
dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
{
@@ -799,6 +802,23 @@ dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
}
}
+/*
+ This is the write barrier for MVARs. An MVAR_CLEAN objects is not
+ on the mutable list; a MVAR_DIRTY is. When written to, a
+ MVAR_CLEAN turns into a MVAR_DIRTY and is put on the mutable list.
+ The check for MVAR_CLEAN is inlined at the call site for speed,
+ this really does make a difference on concurrency-heavy benchmarks
+ such as Chaneneos and cheap-concurrency.
+*/
+void
+dirty_MVAR(StgRegTable *reg, StgClosure *p)
+{
+ Capability *cap = regTableToCapability(reg);
+ bdescr *bd;
+ bd = Bdescr((StgPtr)p);
+ if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
+}
+
/* -----------------------------------------------------------------------------
Allocation functions for GMP.