summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorSimon Marlow <simonmarhaskell@gmail.com>2008-04-16 23:39:51 +0000
committerSimon Marlow <simonmarhaskell@gmail.com>2008-04-16 23:39:51 +0000
commit04cddd339c000df6d02c90ce59dbffa58d2fe166 (patch)
tree4ba138d182f71f2751daeb3cb77c0fc86cf1110f /rts
parent9de1ad504a0a12dabd42b206f06ca04fa0e7009a (diff)
downloadhaskell-04cddd339c000df6d02c90ce59dbffa58d2fe166.tar.gz
Add a write barrier to the TSO link field (#1589)
Diffstat (limited to 'rts')
-rw-r--r--rts/PrimOps.cmm51
-rw-r--r--rts/RaiseAsync.c54
-rw-r--r--rts/RetainerProfile.c2
-rw-r--r--rts/Sanity.c2
-rw-r--r--rts/Schedule.c36
-rw-r--r--rts/Schedule.h24
-rw-r--r--rts/StgMiscClosures.cmm4
-rw-r--r--rts/Threads.c33
-rw-r--r--rts/Threads.h6
-rw-r--r--rts/posix/Select.c14
-rw-r--r--rts/sm/Compact.c2
-rw-r--r--rts/sm/Evac.c-inc4
-rw-r--r--rts/sm/GCAux.c2
-rw-r--r--rts/sm/MarkWeak.c10
-rw-r--r--rts/sm/Scav.c46
-rw-r--r--rts/sm/Scav.c-inc13
-rw-r--r--rts/sm/Storage.c29
17 files changed, 186 insertions, 146 deletions
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index c7c372756c..9216969bb6 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -1561,9 +1561,10 @@ takeMVarzh_fast
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
+ CurrentTSO);
}
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
@@ -1584,15 +1585,18 @@ takeMVarzh_fast
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- dirtyTSO(tso);
+
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar),mvar) [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
- StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
@@ -1664,15 +1668,17 @@ tryTakeMVarzh_fast
/* actually perform the putMVar for the thread that we just woke up */
tso = StgMVar_head(mvar);
PerformPut(tso,StgMVar_value(mvar));
- dirtyTSO(tso);
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(StgMVar_head(mvar) "ptr", mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr",
- StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
@@ -1721,9 +1727,10 @@ putMVarzh_fast
if (StgMVar_head(mvar) == stg_END_TSO_QUEUE_closure) {
StgMVar_head(mvar) = CurrentTSO;
} else {
- StgTSO_link(StgMVar_tail(mvar)) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr", StgMVar_tail(mvar),
+ CurrentTSO);
}
- StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgTSO__link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
StgTSO_why_blocked(CurrentTSO) = BlockedOnMVar::I16;
StgTSO_block_info(CurrentTSO) = mvar;
StgMVar_tail(mvar) = CurrentTSO;
@@ -1740,14 +1747,17 @@ putMVarzh_fast
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
- dirtyTSO(tso);
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
@@ -1812,14 +1822,17 @@ tryPutMVarzh_fast
/* actually perform the takeMVar */
tso = StgMVar_head(mvar);
PerformTake(tso, R2);
- dirtyTSO(tso);
+ if (StgTSO_flags(tso) & TSO_DIRTY == 0) {
+ foreign "C" dirty_TSO(MyCapability(), tso);
+ }
#if defined(GRAN) || defined(PAR)
/* ToDo: check 2nd arg (mvar) is right */
("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr",mvar "ptr") [];
StgMVar_head(mvar) = tso;
#else
- ("ptr" tso) = foreign "C" unblockOne(MyCapability() "ptr", StgMVar_head(mvar) "ptr") [];
+ ("ptr" tso) = foreign "C" unblockOne_(MyCapability() "ptr",
+ StgMVar_head(mvar) "ptr", 1) [];
StgMVar_head(mvar) = tso;
#endif
@@ -2037,11 +2050,11 @@ for2:
* macro in Schedule.h).
*/
#define APPEND_TO_BLOCKED_QUEUE(tso) \
- ASSERT(StgTSO_link(tso) == END_TSO_QUEUE); \
+ ASSERT(StgTSO__link(tso) == END_TSO_QUEUE); \
if (W_[blocked_queue_hd] == END_TSO_QUEUE) { \
W_[blocked_queue_hd] = tso; \
} else { \
- StgTSO_link(W_[blocked_queue_tl]) = tso; \
+ foreign "C" setTSOLink(MyCapability() "ptr", W_[blocked_queue_tl], tso); \
} \
W_[blocked_queue_tl] = tso;
@@ -2137,15 +2150,15 @@ delayzh_fast
while:
if (t != END_TSO_QUEUE && StgTSO_block_info(t) < target) {
prev = t;
- t = StgTSO_link(t);
+ t = StgTSO__link(t);
goto while;
}
- StgTSO_link(CurrentTSO) = t;
+ StgTSO__link(CurrentTSO) = t;
if (prev == NULL) {
W_[sleeping_queue] = CurrentTSO;
} else {
- StgTSO_link(prev) = CurrentTSO;
+ foreign "C" setTSOLink(MyCapability() "ptr", prev, CurrentTSO) [];
}
jump stg_block_noregs;
#endif
diff --git a/rts/RaiseAsync.c b/rts/RaiseAsync.c
index 21bc78ec75..9d03d07ab4 100644
--- a/rts/RaiseAsync.c
+++ b/rts/RaiseAsync.c
@@ -30,7 +30,7 @@ static void raiseAsync (Capability *cap,
static void removeFromQueues(Capability *cap, StgTSO *tso);
-static void blockedThrowTo (StgTSO *source, StgTSO *target);
+static void blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target);
static void performBlockedException (Capability *cap,
StgTSO *source, StgTSO *target);
@@ -152,7 +152,7 @@ throwTo (Capability *cap, // the Capability we hold
// follow ThreadRelocated links in the target first
while (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
// No, it might be a WHITEHOLE:
// ASSERT(get_itbl(target)->type == TSO);
}
@@ -261,10 +261,10 @@ check_target:
// just moved this TSO.
if (target->what_next == ThreadRelocated) {
unlockTSO(target);
- target = target->link;
+ target = target->_link;
goto retry;
}
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
}
@@ -294,7 +294,7 @@ check_target:
info = lockClosure((StgClosure *)mvar);
if (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
unlockClosure((StgClosure *)mvar,info);
goto retry;
}
@@ -309,12 +309,12 @@ check_target:
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
lockClosure((StgClosure *)target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
unlockClosure((StgClosure *)mvar, info);
*out = target;
return THROWTO_BLOCKED; // caller releases TSO
} else {
- removeThreadFromMVarQueue(mvar, target);
+ removeThreadFromMVarQueue(cap, mvar, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
unlockClosure((StgClosure *)mvar, info);
@@ -333,12 +333,12 @@ check_target:
if (target->flags & TSO_BLOCKEX) {
lockTSO(target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
RELEASE_LOCK(&sched_mutex);
*out = target;
return THROWTO_BLOCKED; // caller releases TSO
} else {
- removeThreadFromQueue(&blackhole_queue, target);
+ removeThreadFromQueue(cap, &blackhole_queue, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
RELEASE_LOCK(&sched_mutex);
@@ -373,12 +373,12 @@ check_target:
goto retry;
}
if (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
unlockTSO(target2);
goto retry;
}
if (target2->what_next == ThreadRelocated) {
- target->block_info.tso = target2->link;
+ target->block_info.tso = target2->_link;
unlockTSO(target2);
goto retry;
}
@@ -397,12 +397,12 @@ check_target:
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
lockTSO(target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
unlockTSO(target2);
*out = target;
return THROWTO_BLOCKED;
} else {
- removeThreadFromQueue(&target2->blocked_exceptions, target);
+ removeThreadFromQueue(cap, &target2->blocked_exceptions, target);
raiseAsync(cap, target, exception, rtsFalse, NULL);
unblockOne(cap, target);
unlockTSO(target2);
@@ -419,7 +419,7 @@ check_target:
}
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
} else {
@@ -436,7 +436,7 @@ check_target:
// thread is blocking exceptions, and block on its
// blocked_exception queue.
lockTSO(target);
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
*out = target;
return THROWTO_BLOCKED;
@@ -449,7 +449,7 @@ check_target:
#endif
if ((target->flags & TSO_BLOCKEX) &&
((target->flags & TSO_INTERRUPTIBLE) == 0)) {
- blockedThrowTo(source,target);
+ blockedThrowTo(cap,source,target);
return THROWTO_BLOCKED;
} else {
removeFromQueues(cap,target);
@@ -469,12 +469,12 @@ check_target:
// complex to achieve as there's no single lock on a TSO; see
// throwTo()).
static void
-blockedThrowTo (StgTSO *source, StgTSO *target)
+blockedThrowTo (Capability *cap, StgTSO *source, StgTSO *target)
{
debugTrace(DEBUG_sched, "throwTo: blocking on thread %lu", (unsigned long)target->id);
- source->link = target->blocked_exceptions;
+ setTSOLink(cap, source, target->blocked_exceptions);
target->blocked_exceptions = source;
- dirtyTSO(target); // we modified the blocked_exceptions queue
+ dirty_TSO(cap,target); // we modified the blocked_exceptions queue
source->block_info.tso = target;
write_barrier(); // throwTo_exception *must* be visible if BlockedOnException is.
@@ -748,11 +748,11 @@ removeFromQueues(Capability *cap, StgTSO *tso)
goto done;
case BlockedOnMVar:
- removeThreadFromMVarQueue((StgMVar *)tso->block_info.closure, tso);
+ removeThreadFromMVarQueue(cap, (StgMVar *)tso->block_info.closure, tso);
goto done;
case BlockedOnBlackHole:
- removeThreadFromQueue(&blackhole_queue, tso);
+ removeThreadFromQueue(cap, &blackhole_queue, tso);
goto done;
case BlockedOnException:
@@ -765,10 +765,10 @@ removeFromQueues(Capability *cap, StgTSO *tso)
// ASSERT(get_itbl(target)->type == TSO);
while (target->what_next == ThreadRelocated) {
- target = target->link;
+ target = target->_link;
}
- removeThreadFromQueue(&target->blocked_exceptions, tso);
+ removeThreadFromQueue(cap, &target->blocked_exceptions, tso);
goto done;
}
@@ -778,7 +778,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
#if defined(mingw32_HOST_OS)
case BlockedOnDoProc:
#endif
- removeThreadFromDeQueue(&blocked_queue_hd, &blocked_queue_tl, tso);
+ removeThreadFromDeQueue(cap, &blocked_queue_hd, &blocked_queue_tl, tso);
#if defined(mingw32_HOST_OS)
/* (Cooperatively) signal that the worker thread should abort
* the request.
@@ -788,7 +788,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
goto done;
case BlockedOnDelay:
- removeThreadFromQueue(&sleeping_queue, tso);
+ removeThreadFromQueue(cap, &sleeping_queue, tso);
goto done;
#endif
@@ -797,7 +797,7 @@ removeFromQueues(Capability *cap, StgTSO *tso)
}
done:
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE; // no write barrier reqd
tso->why_blocked = NotBlocked;
tso->block_info.closure = NULL;
appendToRunQueue(cap,tso);
@@ -871,7 +871,7 @@ raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception,
#endif
// mark it dirty; we're about to change its stack.
- dirtyTSO(tso);
+ dirty_TSO(cap, tso);
sp = tso->sp;
diff --git a/rts/RetainerProfile.c b/rts/RetainerProfile.c
index b17f24f7b4..b71b620d96 100644
--- a/rts/RetainerProfile.c
+++ b/rts/RetainerProfile.c
@@ -1635,7 +1635,7 @@ inner_loop:
#ifdef DEBUG_RETAINER
debugBelch("ThreadRelocated encountered in retainClosure()\n");
#endif
- c = (StgClosure *)((StgTSO *)c)->link;
+ c = (StgClosure *)((StgTSO *)c)->_link;
goto inner_loop;
}
break;
diff --git a/rts/Sanity.c b/rts/Sanity.c
index c9a0772543..e90a5736e5 100644
--- a/rts/Sanity.c
+++ b/rts/Sanity.c
@@ -652,7 +652,7 @@ checkTSO(StgTSO *tso)
StgPtr stack_end = stack + stack_size;
if (tso->what_next == ThreadRelocated) {
- checkTSO(tso->link);
+ checkTSO(tso->_link);
return;
}
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 5fa949cd4a..04ab41cdad 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -592,7 +592,7 @@ run_thread:
cap->in_haskell = rtsTrue;
- dirtyTSO(t);
+ dirty_TSO(cap,t);
#if defined(THREADED_RTS)
if (recent_activity == ACTIVITY_DONE_GC) {
@@ -768,7 +768,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
// Check whether we have more threads on our run queue, or sparks
// in our pool, that we could hand to another Capability.
- if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
+ if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE)
&& sparkPoolSizeCap(cap) < 2) {
return;
}
@@ -809,21 +809,21 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
if (cap->run_queue_hd != END_TSO_QUEUE) {
prev = cap->run_queue_hd;
- t = prev->link;
- prev->link = END_TSO_QUEUE;
+ t = prev->_link;
+ prev->_link = END_TSO_QUEUE;
for (; t != END_TSO_QUEUE; t = next) {
- next = t->link;
- t->link = END_TSO_QUEUE;
+ next = t->_link;
+ t->_link = END_TSO_QUEUE;
if (t->what_next == ThreadRelocated
|| t->bound == task // don't move my bound thread
|| tsoLocked(t)) { // don't move a locked thread
- prev->link = t;
+ setTSOLink(cap, prev, t);
prev = t;
} else if (i == n_free_caps) {
pushed_to_all = rtsTrue;
i = 0;
// keep one for us
- prev->link = t;
+ setTSOLink(cap, prev, t);
prev = t;
} else {
debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
@@ -919,7 +919,7 @@ scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
cap->run_queue_hd = cap->wakeup_queue_hd;
cap->run_queue_tl = cap->wakeup_queue_tl;
} else {
- cap->run_queue_tl->link = cap->wakeup_queue_hd;
+ setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
cap->run_queue_tl = cap->wakeup_queue_tl;
}
cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
@@ -1711,7 +1711,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
IF_DEBUG(sanity,
//debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
checkTSO(t));
- ASSERT(t->link == END_TSO_QUEUE);
+ ASSERT(t->_link == END_TSO_QUEUE);
// Shortcut if we're just switching evaluators: don't bother
// doing stack squeezing (which can be expensive), just run the
@@ -2019,7 +2019,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
@@ -2182,7 +2182,7 @@ forkProcess(HsStablePtr *entry
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
// don't allow threads to catch the ThreadKilled
@@ -2258,7 +2258,7 @@ deleteAllThreads ( Capability *cap )
debugTrace(DEBUG_sched,"deleting all threads");
for (t = all_threads; t != END_TSO_QUEUE; t = next) {
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
deleteThread(cap,t);
@@ -2417,7 +2417,7 @@ resumeThread (void *task_)
tso = task->suspended_tso;
task->suspended_tso = NULL;
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE; // no write barrier reqd
debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
if (tso->why_blocked == BlockedOnCCall) {
@@ -2436,7 +2436,7 @@ resumeThread (void *task_)
#endif
/* We might have GC'd, mark the TSO dirty again */
- dirtyTSO(tso);
+ dirty_TSO(cap,tso);
IF_DEBUG(sanity, checkTSO(tso));
@@ -2786,7 +2786,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
* dead TSO's stack.
*/
tso->what_next = ThreadRelocated;
- tso->link = dest;
+ setTSOLink(cap,tso,dest);
tso->sp = (P_)&(tso->stack[tso->stack_size]);
tso->why_blocked = NotBlocked;
@@ -2934,8 +2934,8 @@ checkBlackHoles (Capability *cap)
*prev = t;
any_woke_up = rtsTrue;
} else {
- prev = &t->link;
- t = t->link;
+ prev = &t->_link;
+ t = t->_link;
}
}
diff --git a/rts/Schedule.h b/rts/Schedule.h
index a4a95f3c34..32b7e59a5e 100644
--- a/rts/Schedule.h
+++ b/rts/Schedule.h
@@ -186,11 +186,11 @@ void print_bqe (StgBlockingQueueElement *bqe);
INLINE_HEADER void
appendToRunQueue (Capability *cap, StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_hd = tso;
} else {
- cap->run_queue_tl->link = tso;
+ setTSOLink(cap, cap->run_queue_tl, tso);
}
cap->run_queue_tl = tso;
}
@@ -202,7 +202,7 @@ appendToRunQueue (Capability *cap, StgTSO *tso)
INLINE_HEADER void
pushOnRunQueue (Capability *cap, StgTSO *tso)
{
- tso->link = cap->run_queue_hd;
+ setTSOLink(cap, tso, cap->run_queue_hd);
cap->run_queue_hd = tso;
if (cap->run_queue_tl == END_TSO_QUEUE) {
cap->run_queue_tl = tso;
@@ -216,8 +216,8 @@ popRunQueue (Capability *cap)
{
StgTSO *t = cap->run_queue_hd;
ASSERT(t != END_TSO_QUEUE);
- cap->run_queue_hd = t->link;
- t->link = END_TSO_QUEUE;
+ cap->run_queue_hd = t->_link;
+ t->_link = END_TSO_QUEUE; // no write barrier req'd
if (cap->run_queue_hd == END_TSO_QUEUE) {
cap->run_queue_tl = END_TSO_QUEUE;
}
@@ -230,11 +230,11 @@ popRunQueue (Capability *cap)
INLINE_HEADER void
appendToBlockedQueue(StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (blocked_queue_hd == END_TSO_QUEUE) {
blocked_queue_hd = tso;
} else {
- blocked_queue_tl->link = tso;
+ setTSOLink(&MainCapability, blocked_queue_tl, tso);
}
blocked_queue_tl = tso;
}
@@ -244,11 +244,11 @@ appendToBlockedQueue(StgTSO *tso)
INLINE_HEADER void
appendToWakeupQueue (Capability *cap, StgTSO *tso)
{
- ASSERT(tso->link == END_TSO_QUEUE);
+ ASSERT(tso->_link == END_TSO_QUEUE);
if (cap->wakeup_queue_hd == END_TSO_QUEUE) {
cap->wakeup_queue_hd = tso;
} else {
- cap->wakeup_queue_tl->link = tso;
+ setTSOLink(cap, cap->wakeup_queue_tl, tso);
}
cap->wakeup_queue_tl = tso;
}
@@ -293,11 +293,5 @@ emptyThreadQueues(Capability *cap)
#endif /* !IN_STG_CODE */
-INLINE_HEADER void
-dirtyTSO (StgTSO *tso)
-{
- tso->flags |= TSO_DIRTY;
-}
-
#endif /* SCHEDULE_H */
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index f9076ca9a9..6a8f773586 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -309,7 +309,7 @@ INFO_TABLE(stg_BLACKHOLE,0,1,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
#endif
/* Put ourselves on the blackhole queue */
- StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ StgTSO__link(CurrentTSO) = W_[blackhole_queue];
W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
@@ -374,7 +374,7 @@ INFO_TABLE(stg_CAF_BLACKHOLE,0,1,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE")
#endif
/* Put ourselves on the blackhole queue */
- StgTSO_link(CurrentTSO) = W_[blackhole_queue];
+ StgTSO__link(CurrentTSO) = W_[blackhole_queue];
W_[blackhole_queue] = CurrentTSO;
/* jot down why and on what closure we are blocked */
diff --git a/rts/Threads.c b/rts/Threads.c
index d7b5f4168e..efdf772d23 100644
--- a/rts/Threads.c
+++ b/rts/Threads.c
@@ -119,7 +119,7 @@ createThread(Capability *cap, nat size)
/* put a stop frame on the stack */
tso->sp -= sizeofW(StgStopFrame);
SET_HDR((StgClosure*)tso->sp,(StgInfoTable *)&stg_stop_thread_info,CCS_SYSTEM);
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE;
// ToDo: check this
#if defined(GRAN)
@@ -292,17 +292,17 @@ rts_getThreadId(StgPtr tso)
-------------------------------------------------------------------------- */
void
-removeThreadFromQueue (StgTSO **queue, StgTSO *tso)
+removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso)
{
StgTSO *t, *prev;
prev = NULL;
- for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->link) {
+ for (t = *queue; t != END_TSO_QUEUE; prev = t, t = t->_link) {
if (t == tso) {
if (prev) {
- prev->link = t->link;
+ setTSOLink(cap,prev,t->_link);
} else {
- *queue = t->link;
+ *queue = t->_link;
}
return;
}
@@ -311,17 +311,18 @@ removeThreadFromQueue (StgTSO **queue, StgTSO *tso)
}
void
-removeThreadFromDeQueue (StgTSO **head, StgTSO **tail, StgTSO *tso)
+removeThreadFromDeQueue (Capability *cap,
+ StgTSO **head, StgTSO **tail, StgTSO *tso)
{
StgTSO *t, *prev;
prev = NULL;
- for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->link) {
+ for (t = *head; t != END_TSO_QUEUE; prev = t, t = t->_link) {
if (t == tso) {
if (prev) {
- prev->link = t->link;
+ setTSOLink(cap,prev,t->_link);
} else {
- *head = t->link;
+ *head = t->_link;
}
if (*tail == tso) {
if (prev) {
@@ -337,9 +338,9 @@ removeThreadFromDeQueue (StgTSO **head, StgTSO **tail, StgTSO *tso)
}
void
-removeThreadFromMVarQueue (StgMVar *mvar, StgTSO *tso)
+removeThreadFromMVarQueue (Capability *cap, StgMVar *mvar, StgTSO *tso)
{
- removeThreadFromDeQueue (&mvar->head, &mvar->tail, tso);
+ removeThreadFromDeQueue (cap, &mvar->head, &mvar->tail, tso);
}
/* ----------------------------------------------------------------------------
@@ -489,8 +490,8 @@ unblockOne_ (Capability *cap, StgTSO *tso,
ASSERT(tso->why_blocked != NotBlocked);
tso->why_blocked = NotBlocked;
- next = tso->link;
- tso->link = END_TSO_QUEUE;
+ next = tso->_link;
+ tso->_link = END_TSO_QUEUE;
#if defined(THREADED_RTS)
if (tso->cap == cap || (!tsoLocked(tso) &&
@@ -792,7 +793,7 @@ printAllThreads(void)
for (i = 0; i < n_capabilities; i++) {
cap = &capabilities[i];
debugBelch("threads on capability %d:\n", cap->no);
- for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->link) {
+ for (t = cap->run_queue_hd; t != END_TSO_QUEUE; t = t->_link) {
printThreadStatus(t);
}
}
@@ -803,7 +804,7 @@ printAllThreads(void)
printThreadStatus(t);
}
if (t->what_next == ThreadRelocated) {
- next = t->link;
+ next = t->_link;
} else {
next = t->global_link;
}
@@ -815,7 +816,7 @@ void
printThreadQueue(StgTSO *t)
{
nat i = 0;
- for (; t != END_TSO_QUEUE; t = t->link) {
+ for (; t != END_TSO_QUEUE; t = t->_link) {
printThreadStatus(t);
i++;
}
diff --git a/rts/Threads.h b/rts/Threads.h
index e331c50dae..541ca873fb 100644
--- a/rts/Threads.h
+++ b/rts/Threads.h
@@ -23,9 +23,9 @@ void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
void awakenBlockedQueue (Capability *cap, StgTSO *tso);
#endif
-void removeThreadFromMVarQueue (StgMVar *mvar, StgTSO *tso);
-void removeThreadFromQueue (StgTSO **queue, StgTSO *tso);
-void removeThreadFromDeQueue (StgTSO **head, StgTSO **tail, StgTSO *tso);
+void removeThreadFromMVarQueue (Capability *cap, StgMVar *mvar, StgTSO *tso);
+void removeThreadFromQueue (Capability *cap, StgTSO **queue, StgTSO *tso);
+void removeThreadFromDeQueue (Capability *cap, StgTSO **head, StgTSO **tail, StgTSO *tso);
StgBool isThreadBound (StgTSO* tso);
diff --git a/rts/posix/Select.c b/rts/posix/Select.c
index 8af57bac6f..ae9c717a9a 100644
--- a/rts/posix/Select.c
+++ b/rts/posix/Select.c
@@ -63,9 +63,9 @@ wakeUpSleepingThreads(lnat ticks)
while (sleeping_queue != END_TSO_QUEUE &&
(int)(ticks - sleeping_queue->block_info.target) >= 0) {
tso = sleeping_queue;
- sleeping_queue = tso->link;
+ sleeping_queue = tso->_link;
tso->why_blocked = NotBlocked;
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE;
IF_DEBUG(scheduler,debugBelch("Waking up sleeping thread %lu\n", (unsigned long)tso->id));
// MainCapability: this code is !THREADED_RTS
pushOnRunQueue(&MainCapability,tso);
@@ -139,7 +139,7 @@ awaitEvent(rtsBool wait)
FD_ZERO(&wfd);
for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
- next = tso->link;
+ next = tso->_link;
/* On FreeBSD FD_SETSIZE is unsigned. Cast it to signed int
* in order to switch off the 'comparison between signed and
@@ -243,7 +243,7 @@ awaitEvent(rtsBool wait)
prev = NULL;
if (select_succeeded || unblock_all) {
for(tso = blocked_queue_hd; tso != END_TSO_QUEUE; tso = next) {
- next = tso->link;
+ next = tso->_link;
switch (tso->why_blocked) {
case BlockedOnRead:
ready = unblock_all || FD_ISSET(tso->block_info.fd, &rfd);
@@ -258,13 +258,13 @@ awaitEvent(rtsBool wait)
if (ready) {
IF_DEBUG(scheduler,debugBelch("Waking up blocked thread %lu\n", (unsigned long)tso->id));
tso->why_blocked = NotBlocked;
- tso->link = END_TSO_QUEUE;
+ tso->_link = END_TSO_QUEUE;
pushOnRunQueue(&MainCapability,tso);
} else {
if (prev == NULL)
blocked_queue_hd = tso;
else
- prev->link = tso;
+ setTSOLink(&MainCapability, prev, tso);
prev = tso;
}
}
@@ -272,7 +272,7 @@ awaitEvent(rtsBool wait)
if (prev == NULL)
blocked_queue_hd = blocked_queue_tl = END_TSO_QUEUE;
else {
- prev->link = END_TSO_QUEUE;
+ prev->_link = END_TSO_QUEUE;
blocked_queue_tl = prev;
}
}
diff --git a/rts/sm/Compact.c b/rts/sm/Compact.c
index ced8df36ae..fa6efa9f4e 100644
--- a/rts/sm/Compact.c
+++ b/rts/sm/Compact.c
@@ -464,7 +464,7 @@ thread_AP_STACK (StgAP_STACK *ap)
static StgPtr
thread_TSO (StgTSO *tso)
{
- thread_(&tso->link);
+ thread_(&tso->_link);
thread_(&tso->global_link);
if ( tso->why_blocked == BlockedOnMVar
diff --git a/rts/sm/Evac.c-inc b/rts/sm/Evac.c-inc
index 16bd297afc..eabdcdcc1d 100644
--- a/rts/sm/Evac.c-inc
+++ b/rts/sm/Evac.c-inc
@@ -330,7 +330,7 @@ loop:
info = get_itbl(q);
if (info->type == TSO &&
((StgTSO *)q)->what_next == ThreadRelocated) {
- q = (StgClosure *)((StgTSO *)q)->link;
+ q = (StgClosure *)((StgTSO *)q)->_link;
*p = q;
goto loop;
}
@@ -537,7 +537,7 @@ loop:
/* Deal with redirected TSOs (a TSO that's had its stack enlarged).
*/
if (tso->what_next == ThreadRelocated) {
- q = (StgClosure *)tso->link;
+ q = (StgClosure *)tso->_link;
*p = q;
goto loop;
}
diff --git a/rts/sm/GCAux.c b/rts/sm/GCAux.c
index fccae2c71d..0fb61f16a2 100644
--- a/rts/sm/GCAux.c
+++ b/rts/sm/GCAux.c
@@ -88,7 +88,7 @@ isAlive(StgClosure *p)
case TSO:
if (((StgTSO *)q)->what_next == ThreadRelocated) {
- p = (StgClosure *)((StgTSO *)q)->link;
+ p = (StgClosure *)((StgTSO *)q)->_link;
continue;
}
return NULL;
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index 3faaf0edfe..9d47cdedeb 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -200,7 +200,7 @@ traverseWeakPtrList(void)
ASSERT(get_itbl(t)->type == TSO);
switch (t->what_next) {
case ThreadRelocated:
- next = t->link;
+ next = t->_link;
*prev = next;
continue;
case ThreadKilled:
@@ -258,7 +258,7 @@ traverseWeakPtrList(void)
*/
{
StgTSO **pt;
- for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
+ for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
*pt = (StgTSO *)isAlive((StgClosure *)*pt);
ASSERT(*pt != NULL);
}
@@ -291,7 +291,7 @@ traverseBlackholeQueue (void)
flag = rtsFalse;
prev = NULL;
- for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->link) {
+ for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
// if the thread is not yet alive...
if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
// if the closure it is blocked on is either (a) a
@@ -305,7 +305,9 @@ traverseBlackholeQueue (void)
}
tmp = t;
evacuate((StgClosure **)&tmp);
- if (prev) prev->link = t;
+ if (prev) prev->_link = t;
+ // no write barrier when on the blackhole queue,
+ // because we traverse the whole queue on every GC.
flag = rtsTrue;
}
}
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index ea39ebdff6..b969de3a74 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -132,6 +132,17 @@ scavenge_fun_srt(const StgInfoTable *info)
Scavenge a TSO.
-------------------------------------------------------------------------- */
+STATIC_INLINE void
+scavenge_TSO_link (StgTSO *tso)
+{
+ // We don't always chase the link field: TSOs on the blackhole
+ // queue are not automatically alive, so the link field is a
+ // "weak" pointer in that case.
+ if (tso->why_blocked != BlockedOnBlackHole) {
+ evacuate((StgClosure **)&tso->_link);
+ }
+}
+
static void
scavengeTSO (StgTSO *tso)
{
@@ -156,13 +167,6 @@ scavengeTSO (StgTSO *tso)
}
evacuate((StgClosure **)&tso->blocked_exceptions);
- // We don't always chase the link field: TSOs on the blackhole
- // queue are not automatically alive, so the link field is a
- // "weak" pointer in that case.
- if (tso->why_blocked != BlockedOnBlackHole) {
- evacuate((StgClosure **)&tso->link);
- }
-
// scavange current transaction record
evacuate((StgClosure **)&tso->trec);
@@ -171,8 +175,15 @@ scavengeTSO (StgTSO *tso)
if (gct->failed_to_evac) {
tso->flags |= TSO_DIRTY;
+ scavenge_TSO_link(tso);
} else {
tso->flags &= ~TSO_DIRTY;
+ scavenge_TSO_link(tso);
+ if (gct->failed_to_evac) {
+ tso->flags |= TSO_LINK_DIRTY;
+ } else {
+ tso->flags &= ~TSO_LINK_DIRTY;
+ }
}
gct->eager_promotion = saved_eager;
@@ -517,7 +528,6 @@ linear_scan:
case TSO:
{
scavengeTSO((StgTSO*)p);
- gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
@@ -837,7 +847,6 @@ scavenge_one(StgPtr p)
case TSO:
{
scavengeTSO((StgTSO*)p);
- gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
@@ -1026,14 +1035,17 @@ scavenge_mutable_list(generation *gen)
case TSO: {
StgTSO *tso = (StgTSO *)p;
if ((tso->flags & TSO_DIRTY) == 0) {
- // A clean TSO: we don't have to traverse its
- // stack. However, we *do* follow the link field:
- // we don't want to have to mark a TSO dirty just
- // because we put it on a different queue.
- if (tso->why_blocked != BlockedOnBlackHole) {
- evacuate((StgClosure **)&tso->link);
- }
- recordMutableGen_GC((StgClosure *)p,gen);
+ // Must be on the mutable list because its link
+ // field is dirty.
+ ASSERT(tso->flags & TSO_LINK_DIRTY);
+
+ scavenge_TSO_link(tso);
+ if (gct->failed_to_evac) {
+ recordMutableGen_GC((StgClosure *)p,gen);
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ tso->flags &= ~TSO_LINK_DIRTY;
+ }
continue;
}
}
diff --git a/rts/sm/Scav.c-inc b/rts/sm/Scav.c-inc
index bff193be7c..ae6a6bba59 100644
--- a/rts/sm/Scav.c-inc
+++ b/rts/sm/Scav.c-inc
@@ -327,18 +327,7 @@ scavenge_block (bdescr *bd)
case TSO:
{
StgTSO *tso = (StgTSO *)p;
-
- gct->eager_promotion = rtsFalse;
- scavengeTSO(tso);
- gct->eager_promotion = saved_eager_promotion;
-
- if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
- gct->failed_to_evac = rtsTrue; // always on the mutable list
+ scavengeTSO(tso);
p += tso_sizeW(tso);
break;
}
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 744bd5735f..bd321b331d 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -822,6 +822,35 @@ dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
}
}
+// Setting a TSO's link field with a write barrier.
+// It is *not* necessary to call this function when
+// * setting the link field to END_TSO_QUEUE
+// * putting a TSO on the blackhole_queue
+// * setting the link field of the currently running TSO, as it
+// will already be dirty.
+void
+setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
+{
+ bdescr *bd;
+ if ((tso->flags & (TSO_DIRTY|TSO_LINK_DIRTY)) == 0) {
+ tso->flags |= TSO_LINK_DIRTY;
+ bd = Bdescr((StgPtr)tso);
+ if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ }
+ tso->_link = target;
+}
+
+void
+dirty_TSO (Capability *cap, StgTSO *tso)
+{
+ bdescr *bd;
+ if ((tso->flags & TSO_DIRTY) == 0) {
+ tso->flags |= TSO_DIRTY;
+ bd = Bdescr((StgPtr)tso);
+ if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no);
+ }
+}
+
/*
This is the write barrier for MVARs. An MVAR_CLEAN objects is not
on the mutable list; a MVAR_DIRTY is. When written to, a