summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
Diffstat (limited to 'rts')
-rw-r--r--rts/Capability.c1
-rw-r--r--rts/Capability.h1
-rw-r--r--rts/Exception.cmm5
-rw-r--r--rts/PrimOps.cmm102
-rw-r--r--rts/RtsSymbols.c1
-rw-r--r--rts/STM.c347
-rw-r--r--rts/STM.h13
-rw-r--r--rts/StgMiscClosures.cmm13
-rw-r--r--rts/sm/GC.c8
-rw-r--r--rts/sm/GC.h4
-rw-r--r--rts/sm/Scav.c4
11 files changed, 30 insertions, 469 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index f9141ee025..74f7a295e9 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -298,7 +298,6 @@ initCapability (Capability *cap, uint32_t i)
cap->weak_ptr_list_hd = NULL;
cap->weak_ptr_list_tl = NULL;
cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
- cap->free_invariant_check_queues = END_INVARIANT_CHECK_QUEUE;
cap->free_trec_chunks = END_STM_CHUNK_LIST;
cap->free_trec_headers = NO_TREC;
cap->transaction_tokens = 0;
diff --git a/rts/Capability.h b/rts/Capability.h
index 5ab693e516..e4df0b881f 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -154,7 +154,6 @@ struct Capability_ {
// Per-capability STM-related data
StgTVarWatchQueue *free_tvar_watch_queues;
- StgInvariantCheckQueue *free_invariant_check_queues;
StgTRecChunk *free_trec_chunks;
StgTRecHeader *free_trec_headers;
uint32_t transaction_tokens;
diff --git a/rts/Exception.cmm b/rts/Exception.cmm
index 8deecbb1e8..8ea94b19f2 100644
--- a/rts/Exception.cmm
+++ b/rts/Exception.cmm
@@ -489,11 +489,6 @@ retry_pop_stack:
ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
- if (outer != NO_TREC) {
- ccall stmAbortTransaction(MyCapability() "ptr", outer "ptr");
- ccall stmFreeAbortedTRec(MyCapability() "ptr", outer "ptr");
- }
-
StgTSO_trec(CurrentTSO) = NO_TREC;
if (r != 0) {
// Transaction was valid: continue searching for a catch frame
diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm
index 8c2eeb1b98..293c4fea0c 100644
--- a/rts/PrimOps.cmm
+++ b/rts/PrimOps.cmm
@@ -1057,11 +1057,10 @@ INFO_TABLE_RET(stg_catch_retry_frame, CATCH_RETRY_FRAME,
// Atomically frame ------------------------------------------------------------
// This must match StgAtomicallyFrame in Closures.h
-#define ATOMICALLY_FRAME_FIELDS(w_,p_,info_ptr,p1,p2,code,next,result) \
+#define ATOMICALLY_FRAME_FIELDS(w_,p_,info_ptr,p1,p2,code,result) \
w_ info_ptr, \
PROF_HDR_FIELDS(w_,p1,p2) \
p_ code, \
- p_ next, \
p_ result
@@ -1070,67 +1069,36 @@ INFO_TABLE_RET(stg_atomically_frame, ATOMICALLY_FRAME,
ATOMICALLY_FRAME_FIELDS(W_,P_,
info_ptr, p1, p2,
code,
- next_invariant,
frame_result))
return (P_ result) // value returned to the frame
{
W_ valid;
- gcptr trec, outer, next_invariant, q;
+ gcptr trec, outer, q;
trec = StgTSO_trec(CurrentTSO);
outer = StgTRecHeader_enclosing_trec(trec);
- if (outer == NO_TREC) {
- /* First time back at the atomically frame -- pick up invariants */
- ("ptr" next_invariant) =
- ccall stmGetInvariantsToCheck(MyCapability() "ptr", trec "ptr");
- frame_result = result;
+ /* Back at the atomically frame */
+ frame_result = result;
+ /* try to commit */
+ (valid) = ccall stmCommitTransaction(MyCapability() "ptr", trec "ptr");
+ if (valid != 0) {
+ /* Transaction was valid: commit succeeded */
+ StgTSO_trec(CurrentTSO) = NO_TREC;
+ return (frame_result);
} else {
- /* Second/subsequent time back at the atomically frame -- abort the
- * tx that's checking the invariant and move on to the next one */
- StgTSO_trec(CurrentTSO) = outer;
- StgInvariantCheckQueue_my_execution(next_invariant) = trec;
- ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
- /* Don't free trec -- it's linked from q and will be stashed in the
- * invariant if we eventually commit. */
- next_invariant =
- StgInvariantCheckQueue_next_queue_entry(next_invariant);
- trec = outer;
- }
-
- if (next_invariant != END_INVARIANT_CHECK_QUEUE) {
- /* We can't commit yet: another invariant to check */
- ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr", trec "ptr");
+ /* Transaction was not valid: try again */
+ ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr",
+ NO_TREC "ptr");
StgTSO_trec(CurrentTSO) = trec;
- q = StgInvariantCheckQueue_invariant(next_invariant);
+
jump stg_ap_v_fast
+ // push the StgAtomicallyFrame again: the code generator is
+ // clever enough to only assign the fields that have changed.
(ATOMICALLY_FRAME_FIELDS(,,info_ptr,p1,p2,
- code,next_invariant,frame_result))
- (StgAtomicInvariant_code(q));
-
- } else {
-
- /* We've got no more invariants to check, try to commit */
- (valid) = ccall stmCommitTransaction(MyCapability() "ptr", trec "ptr");
- if (valid != 0) {
- /* Transaction was valid: commit succeeded */
- StgTSO_trec(CurrentTSO) = NO_TREC;
- return (frame_result);
- } else {
- /* Transaction was not valid: try again */
- ("ptr" trec) = ccall stmStartTransaction(MyCapability() "ptr",
- NO_TREC "ptr");
- StgTSO_trec(CurrentTSO) = trec;
- next_invariant = END_INVARIANT_CHECK_QUEUE;
-
- jump stg_ap_v_fast
- // push the StgAtomicallyFrame again: the code generator is
- // clever enough to only assign the fields that have changed.
- (ATOMICALLY_FRAME_FIELDS(,,info_ptr,p1,p2,
- code,next_invariant,frame_result))
- (code);
- }
+ code,frame_result))
+ (code);
}
}
@@ -1140,7 +1108,6 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
ATOMICALLY_FRAME_FIELDS(W_,P_,
info_ptr, p1, p2,
code,
- next_invariant,
frame_result))
return (/* no return values */)
{
@@ -1152,7 +1119,7 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
/* Previous attempt is still valid: no point trying again yet */
jump stg_block_noregs
(ATOMICALLY_FRAME_FIELDS(,,info_ptr, p1, p2,
- code,next_invariant,frame_result))
+ code,frame_result))
();
} else {
/* Previous attempt is no longer valid: try again */
@@ -1162,7 +1129,7 @@ INFO_TABLE_RET(stg_atomically_waiting_frame, ATOMICALLY_FRAME,
// change the frame header to stg_atomically_frame_info
jump stg_ap_v_fast
(ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info, p1, p2,
- code,next_invariant,frame_result))
+ code,frame_result))
(code);
}
}
@@ -1213,7 +1180,7 @@ stg_atomicallyzh (P_ stm)
{
P_ old_trec;
P_ new_trec;
- P_ code, next_invariant, frame_result;
+ P_ code, frame_result;
// stmStartTransaction may allocate
MAYBE_GC_P(stg_atomicallyzh, stm);
@@ -1228,7 +1195,6 @@ stg_atomicallyzh (P_ stm)
}
code = stm;
- next_invariant = END_INVARIANT_CHECK_QUEUE;
frame_result = NO_TREC;
/* Start the memory transcation */
@@ -1237,7 +1203,7 @@ stg_atomicallyzh (P_ stm)
jump stg_ap_v_fast
(ATOMICALLY_FRAME_FIELDS(,,stg_atomically_frame_info, CCCS, 0,
- code,next_invariant,frame_result))
+ code,frame_result))
(stm);
}
@@ -1340,16 +1306,6 @@ retry_pop_stack:
// We've reached the ATOMICALLY_FRAME: attempt to wait
ASSERT(frame_type == ATOMICALLY_FRAME);
- if (outer != NO_TREC) {
- // We called retry while checking invariants, so abort the current
- // invariant check (merging its TVar accesses into the parents read
- // set so we'll wait on them)
- ccall stmAbortTransaction(MyCapability() "ptr", trec "ptr");
- ccall stmFreeAbortedTRec(MyCapability() "ptr", trec "ptr");
- trec = outer;
- StgTSO_trec(CurrentTSO) = trec;
- outer = StgTRecHeader_enclosing_trec(trec);
- }
ASSERT(outer == NO_TREC);
(r) = ccall stmWait(MyCapability() "ptr", CurrentTSO "ptr", trec "ptr");
@@ -1369,20 +1325,6 @@ retry_pop_stack:
}
}
-stg_checkzh (P_ closure /* STM a */)
-{
- W_ trec;
-
- MAYBE_GC_P (stg_checkzh, closure);
-
- trec = StgTSO_trec(CurrentTSO);
- ccall stmAddInvariantToCheck(MyCapability() "ptr",
- trec "ptr",
- closure "ptr");
- return ();
-}
-
-
stg_newTVarzh (P_ init)
{
W_ tv;
diff --git a/rts/RtsSymbols.c b/rts/RtsSymbols.c
index 4952f013f7..783992b11a 100644
--- a/rts/RtsSymbols.c
+++ b/rts/RtsSymbols.c
@@ -558,7 +558,6 @@
SymI_HasProto(stg_catchzh) \
SymI_HasProto(stg_catchRetryzh) \
SymI_HasProto(stg_catchSTMzh) \
- SymI_HasProto(stg_checkzh) \
SymI_HasProto(stg_clearCCSzh) \
SymI_HasProto(stg_compactAddWithSharingzh) \
SymI_HasProto(stg_compactAddzh) \
diff --git a/rts/STM.c b/rts/STM.c
index 5c8fd4ff40..058eec7409 100644
--- a/rts/STM.c
+++ b/rts/STM.c
@@ -211,15 +211,6 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
return (result == expected);
}
-
-static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
- // Nothing -- uniproc
- return true;
-}
-
-static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
- // Nothing -- uniproc
-}
#endif
#if defined(STM_CG_LOCK) /*........................................*/
@@ -272,15 +263,6 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
TRACE("%p : %d", result ? "success" : "failure");
return (result == expected);
}
-
-static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
- // Nothing -- protected by STM lock
- return true;
-}
-
-static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
- // Nothing -- protected by STM lock
-}
#endif
#if defined(STM_FG_LOCKS) /*...................................*/
@@ -332,32 +314,10 @@ static StgBool cond_lock_tvar(StgTRecHeader *trec,
TRACE("%p : %s", trec, result ? "success" : "failure");
return (result == expected);
}
-
-static StgBool lock_inv(StgAtomicInvariant *inv) {
- return (cas(&(inv -> lock), 0, 1) == 0);
-}
-
-static void unlock_inv(StgAtomicInvariant *inv) {
- ASSERT(inv -> lock == 1);
- inv -> lock = 0;
-}
#endif
/*......................................................................*/
-static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
- StgClosure *c = q -> closure;
- const StgInfoTable *info = get_itbl(c);
- return (info -> type) == TSO;
-}
-
-static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
- StgClosure *c = q -> closure;
- return (c->header.info == &stg_ATOMIC_INVARIANT_info);
-}
-
-/*......................................................................*/
-
// Helper functions for thread blocking and unblocking
static void park_tso(StgTSO *tso) {
@@ -406,9 +366,7 @@ static void unpark_waiters_on(Capability *cap, StgTVar *s) {
for (;
q != END_STM_WATCH_QUEUE;
q = q -> prev_queue_entry) {
- if (watcher_is_tso(q)) {
unpark_tso(cap, (StgTSO *)(q -> closure));
- }
}
}
@@ -416,16 +374,6 @@ static void unpark_waiters_on(Capability *cap, StgTVar *s) {
// Helper functions for downstream allocation and initialization
-static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
- StgAtomicInvariant *invariant) {
- StgInvariantCheckQueue *result;
- result = (StgInvariantCheckQueue *)allocate(cap, sizeofW(StgInvariantCheckQueue));
- SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
- result -> invariant = invariant;
- result -> my_execution = NO_TREC;
- return result;
-}
-
static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
StgClosure *closure) {
StgTVarWatchQueue *result;
@@ -452,7 +400,6 @@ static StgTRecHeader *new_stg_trec_header(Capability *cap,
result -> enclosing_trec = enclosing_trec;
result -> current_chunk = new_stg_trec_chunk(cap);
- result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
if (enclosing_trec == NO_TREC) {
result -> state = TREC_ACTIVE;
@@ -470,20 +417,6 @@ static StgTRecHeader *new_stg_trec_header(Capability *cap,
// Allocation / deallocation functions that retain per-capability lists
// of closures that can be re-used
-static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
- StgAtomicInvariant *invariant) {
- StgInvariantCheckQueue *result = NULL;
- if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
- result = new_stg_invariant_check_queue(cap, invariant);
- } else {
- result = cap -> free_invariant_check_queues;
- result -> invariant = invariant;
- result -> my_execution = NO_TREC;
- cap -> free_invariant_check_queues = result -> next_queue_entry;
- }
- return result;
-}
-
static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
StgClosure *closure) {
StgTVarWatchQueue *result = NULL;
@@ -536,7 +469,6 @@ static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
cap -> free_trec_headers = result -> enclosing_trec;
result -> enclosing_trec = enclosing_trec;
result -> current_chunk -> next_entry_idx = 0;
- result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
if (enclosing_trec == NO_TREC) {
result -> state = TREC_ACTIVE;
} else {
@@ -1111,202 +1043,8 @@ static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeade
/*......................................................................*/
-/*
- * Add/remove links between an invariant TVars. The caller must have
- * locked the TVars involved and the invariant.
- */
-
-static void disconnect_invariant(Capability *cap,
- StgAtomicInvariant *inv) {
- StgTRecHeader *last_execution = inv -> last_execution;
-
- TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
-
- FOR_EACH_ENTRY(last_execution, e, {
- StgTVar *s = e -> tvar;
- StgTVarWatchQueue *q = s -> first_watch_queue_entry;
- DEBUG_ONLY( StgBool found = false );
- TRACE(" looking for trec on tvar=%p", s);
- for (q = s -> first_watch_queue_entry;
- q != END_STM_WATCH_QUEUE;
- q = q -> next_queue_entry) {
- if (q -> closure == (StgClosure*)inv) {
- StgTVarWatchQueue *pq;
- StgTVarWatchQueue *nq;
- nq = q -> next_queue_entry;
- pq = q -> prev_queue_entry;
- if (nq != END_STM_WATCH_QUEUE) {
- nq -> prev_queue_entry = pq;
- }
- if (pq != END_STM_WATCH_QUEUE) {
- pq -> next_queue_entry = nq;
- } else {
- ASSERT(s -> first_watch_queue_entry == q);
- s -> first_watch_queue_entry = nq;
- dirty_TVAR(cap,s); // we modified first_watch_queue_entry
- }
- TRACE(" found it in watch queue entry %p", q);
- free_stg_tvar_watch_queue(cap, q);
- DEBUG_ONLY( found = true );
- break;
- }
- }
- ASSERT(found);
- });
- inv -> last_execution = NO_TREC;
-}
-
-static void connect_invariant_to_trec(Capability *cap,
- StgAtomicInvariant *inv,
- StgTRecHeader *my_execution) {
- TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
-
- ASSERT(inv -> last_execution == NO_TREC);
-
- FOR_EACH_ENTRY(my_execution, e, {
- StgTVar *s = e -> tvar;
- StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
- StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
-
- // We leave "last_execution" holding the values that will be
- // in the heap after the transaction we're in the process
- // of committing has finished.
- TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
- if (entry != NULL) {
- e -> expected_value = entry -> new_value;
- e -> new_value = entry -> new_value;
- }
-
- TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
- q -> next_queue_entry = fq;
- q -> prev_queue_entry = END_STM_WATCH_QUEUE;
- if (fq != END_STM_WATCH_QUEUE) {
- fq -> prev_queue_entry = q;
- }
- s -> first_watch_queue_entry = q;
- dirty_TVAR(cap,s); // we modified first_watch_queue_entry
- });
-
- inv -> last_execution = my_execution;
-}
-
-/*
- * Add a new invariant to the trec's list of invariants to check on commit
- */
-void stmAddInvariantToCheck(Capability *cap,
- StgTRecHeader *trec,
- StgClosure *code) {
- StgAtomicInvariant *invariant;
- StgInvariantCheckQueue *q;
- TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
- ASSERT(trec != NO_TREC);
- ASSERT(trec -> state == TREC_ACTIVE ||
- trec -> state == TREC_CONDEMNED);
-
-
- // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
- // to signal that this is a new invariant in the current atomic block
-
- invariant = (StgAtomicInvariant *) allocate(cap, sizeofW(StgAtomicInvariant));
- TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
- SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
- invariant -> code = code;
- invariant -> last_execution = NO_TREC;
- invariant -> lock = 0;
-
- // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
-
- q = alloc_stg_invariant_check_queue(cap, invariant);
- TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
- q -> invariant = invariant;
- q -> my_execution = NO_TREC;
- q -> next_queue_entry = trec -> invariants_to_check;
- trec -> invariants_to_check = q;
-
- TRACE("%p : stmAddInvariantToCheck done", trec);
-}
-
-/*
- * Fill in the trec's list of invariants that might be violated by the
- * current transaction.
- */
-
-StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
- StgTRecChunk *c;
- TRACE("%p : stmGetInvariantsToCheck, head was %p",
- trec,
- trec -> invariants_to_check);
-
- ASSERT(trec != NO_TREC);
- ASSERT((trec -> state == TREC_ACTIVE) ||
- (trec -> state == TREC_WAITING) ||
- (trec -> state == TREC_CONDEMNED));
- ASSERT(trec -> enclosing_trec == NO_TREC);
-
- lock_stm(trec);
- c = trec -> current_chunk;
- while (c != END_STM_CHUNK_LIST) {
- unsigned int i;
- for (i = 0; i < c -> next_entry_idx; i ++) {
- TRecEntry *e = &(c -> entries[i]);
- if (entry_is_update(e)) {
- StgTVar *s = e -> tvar;
- StgClosure *old = lock_tvar(trec, s);
-
- // Pick up any invariants on the TVar being updated
- // by entry "e"
-
- StgTVarWatchQueue *q;
- TRACE("%p : checking for invariants on %p", trec, s);
- for (q = s -> first_watch_queue_entry;
- q != END_STM_WATCH_QUEUE;
- q = q -> next_queue_entry) {
- if (watcher_is_invariant(q)) {
- StgBool found = false;
- StgInvariantCheckQueue *q2;
- TRACE("%p : Touching invariant %p", trec, q -> closure);
- for (q2 = trec -> invariants_to_check;
- q2 != END_INVARIANT_CHECK_QUEUE;
- q2 = q2 -> next_queue_entry) {
- if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
- TRACE("%p : Already found %p", trec, q -> closure);
- found = true;
- break;
- }
- }
-
- if (!found) {
- StgInvariantCheckQueue *q3;
- TRACE("%p : Not already found %p", trec, q -> closure);
- q3 = alloc_stg_invariant_check_queue(cap,
- (StgAtomicInvariant*) q -> closure);
- q3 -> next_queue_entry = trec -> invariants_to_check;
- trec -> invariants_to_check = q3;
- }
- }
- }
-
- unlock_tvar(cap, trec, s, old, false);
- }
- }
- c = c -> prev_chunk;
- }
-
- unlock_stm(trec);
-
- TRACE("%p : stmGetInvariantsToCheck, head now %p",
- trec,
- trec -> invariants_to_check);
-
- return (trec -> invariants_to_check);
-}
-
-/*......................................................................*/
-
StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
StgInt64 max_commits_at_start = max_commits;
- StgBool touched_invariants;
- StgBool use_read_phase;
TRACE("%p : stmCommitTransaction()", trec);
ASSERT(trec != NO_TREC);
@@ -1317,69 +1055,15 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
ASSERT((trec -> state == TREC_ACTIVE) ||
(trec -> state == TREC_CONDEMNED));
- // touched_invariants is true if we've written to a TVar with invariants
- // attached to it, or if we're trying to add a new invariant to the system.
-
- touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
-
- // If we have touched invariants then (i) lock the invariant, and (ii) add
- // the invariant's read set to our own. Step (i) is needed to serialize
- // concurrent transactions that attempt to make conflicting updates
- // to the invariant's trec (suppose it read from t1 and t2, and that one
- // concurrent transcation writes only to t1, and a second writes only to
- // t2). Step (ii) is needed so that both transactions will lock t1 and t2
- // to gain access to their wait lists (and hence be able to unhook the
- // invariant from both tvars).
-
- if (touched_invariants) {
- StgInvariantCheckQueue *q = trec -> invariants_to_check;
- TRACE("%p : locking invariants", trec);
- while (q != END_INVARIANT_CHECK_QUEUE) {
- StgTRecHeader *inv_old_trec;
- StgAtomicInvariant *inv;
- TRACE("%p : locking invariant %p", trec, q -> invariant);
- inv = q -> invariant;
- if (!lock_inv(inv)) {
- TRACE("%p : failed to lock %p", trec, inv);
- trec -> state = TREC_CONDEMNED;
- break;
- }
-
- inv_old_trec = inv -> last_execution;
- if (inv_old_trec != NO_TREC) {
- StgTRecChunk *c = inv_old_trec -> current_chunk;
- while (c != END_STM_CHUNK_LIST) {
- unsigned int i;
- for (i = 0; i < c -> next_entry_idx; i ++) {
- TRecEntry *e = &(c -> entries[i]);
- TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
- merge_read_into (cap, trec, e -> tvar, e -> expected_value);
- }
- c = c -> prev_chunk;
- }
- }
- q = q -> next_queue_entry;
- }
- TRACE("%p : finished locking invariants", trec);
- }
-
// Use a read-phase (i.e. don't lock TVars we've read but not updated) if
- // (i) the configuration lets us use a read phase, and (ii) we've not
- // touched or introduced any invariants.
- //
- // In principle we could extend the implementation to support a read-phase
- // and invariants, but it complicates the logic: the links between
- // invariants and TVars are managed by the TVar watch queues which are
- // protected by the TVar's locks.
-
- use_read_phase = ((config_use_read_phase) && (!touched_invariants));
+ // the configuration lets us use a read phase.
- bool result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), true);
+ bool result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), true);
if (result) {
// We now know that all the updated locations hold their expected values.
ASSERT(trec -> state == TREC_ACTIVE);
- if (use_read_phase) {
+ if (config_use_read_phase) {
StgInt64 max_commits_at_end;
StgInt64 max_concurrent_commits;
TRACE("%p : doing read check", trec);
@@ -1399,32 +1083,11 @@ StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
// at the end of the call to validate_and_acquire_ownership. This forms the
// linearization point of the commit.
- // 1. If we have touched or introduced any invariants then unhook them
- // from the TVars they depended on last time they were executed
- // and hook them on the TVars that they now depend on.
- if (touched_invariants) {
- StgInvariantCheckQueue *q = trec -> invariants_to_check;
- while (q != END_INVARIANT_CHECK_QUEUE) {
- StgAtomicInvariant *inv = q -> invariant;
- if (inv -> last_execution != NO_TREC) {
- disconnect_invariant(cap, inv);
- }
-
- TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
- connect_invariant_to_trec(cap, inv, q -> my_execution);
-
- TRACE("%p : unlocking invariant %p", trec, inv);
- unlock_inv(inv);
-
- q = q -> next_queue_entry;
- }
- }
-
- // 2. Make the updates required by the transaction
+ // Make the updates required by the transaction.
FOR_EACH_ENTRY(trec, e, {
StgTVar *s;
s = e -> tvar;
- if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
+ if ((!config_use_read_phase) || (e -> new_value != e -> expected_value)) {
// Either the entry is an update or we're not using a read phase:
// write the value back to the TVar, unlocking it if necessary.
diff --git a/rts/STM.h b/rts/STM.h
index 2484c2f991..3d32daace2 100644
--- a/rts/STM.h
+++ b/rts/STM.h
@@ -138,18 +138,6 @@ StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec);
*/
/*
- * Fill in the trec's list of invariants that might be violated by the current
- * transaction.
- */
-
-StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap,
- StgTRecHeader *trec);
-
-void stmAddInvariantToCheck(Capability *cap,
- StgTRecHeader *trec,
- StgClosure *code);
-
-/*
* Test whether the current transaction context is valid and, if so,
* commit its memory accesses to the heap. stmCommitTransaction must
* unblock any threads which are waiting on tvars that updates have
@@ -209,7 +197,6 @@ void stmWriteTVar(Capability *cap,
/* NULLs */
#define END_STM_WATCH_QUEUE ((StgTVarWatchQueue *)(void *)&stg_END_STM_WATCH_QUEUE_closure)
-#define END_INVARIANT_CHECK_QUEUE ((StgInvariantCheckQueue *)(void *)&stg_END_INVARIANT_CHECK_QUEUE_closure)
#define END_STM_CHUNK_LIST ((StgTRecChunk *)(void *)&stg_END_STM_CHUNK_LIST_closure)
#define NO_TREC ((StgTRecHeader *)(void *)&stg_NO_TREC_closure)
diff --git a/rts/StgMiscClosures.cmm b/rts/StgMiscClosures.cmm
index c307293cce..3add25e219 100644
--- a/rts/StgMiscClosures.cmm
+++ b/rts/StgMiscClosures.cmm
@@ -485,24 +485,15 @@ INFO_TABLE(stg_TVAR_DIRTY, 2, 1, TVAR, "TVAR", "TVAR")
INFO_TABLE(stg_TVAR_WATCH_QUEUE, 3, 0, MUT_PRIM, "TVAR_WATCH_QUEUE", "TVAR_WATCH_QUEUE")
{ foreign "C" barf("TVAR_WATCH_QUEUE object entered!") never returns; }
-INFO_TABLE(stg_ATOMIC_INVARIANT, 2, 1, MUT_PRIM, "ATOMIC_INVARIANT", "ATOMIC_INVARIANT")
-{ foreign "C" barf("ATOMIC_INVARIANT object entered!") never returns; }
-
-INFO_TABLE(stg_INVARIANT_CHECK_QUEUE, 3, 0, MUT_PRIM, "INVARIANT_CHECK_QUEUE", "INVARIANT_CHECK_QUEUE")
-{ foreign "C" barf("INVARIANT_CHECK_QUEUE object entered!") never returns; }
-
INFO_TABLE(stg_TREC_CHUNK, 0, 0, TREC_CHUNK, "TREC_CHUNK", "TREC_CHUNK")
{ foreign "C" barf("TREC_CHUNK object entered!") never returns; }
-INFO_TABLE(stg_TREC_HEADER, 3, 1, MUT_PRIM, "TREC_HEADER", "TREC_HEADER")
+INFO_TABLE(stg_TREC_HEADER, 2, 1, MUT_PRIM, "TREC_HEADER", "TREC_HEADER")
{ foreign "C" barf("TREC_HEADER object entered!") never returns; }
INFO_TABLE_CONSTR(stg_END_STM_WATCH_QUEUE,0,0,0,CONSTR_NOCAF,"END_STM_WATCH_QUEUE","END_STM_WATCH_QUEUE")
{ foreign "C" barf("END_STM_WATCH_QUEUE object entered!") never returns; }
-INFO_TABLE_CONSTR(stg_END_INVARIANT_CHECK_QUEUE,0,0,0,CONSTR_NOCAF,"END_INVARIANT_CHECK_QUEUE","END_INVARIANT_CHECK_QUEUE")
-{ foreign "C" barf("END_INVARIANT_CHECK_QUEUE object entered!") never returns; }
-
INFO_TABLE_CONSTR(stg_END_STM_CHUNK_LIST,0,0,0,CONSTR_NOCAF,"END_STM_CHUNK_LIST","END_STM_CHUNK_LIST")
{ foreign "C" barf("END_STM_CHUNK_LIST object entered!") never returns; }
@@ -511,8 +502,6 @@ INFO_TABLE_CONSTR(stg_NO_TREC,0,0,0,CONSTR_NOCAF,"NO_TREC","NO_TREC")
CLOSURE(stg_END_STM_WATCH_QUEUE_closure,stg_END_STM_WATCH_QUEUE);
-CLOSURE(stg_END_INVARIANT_CHECK_QUEUE_closure,stg_END_INVARIANT_CHECK_QUEUE);
-
CLOSURE(stg_END_STM_CHUNK_LIST_closure,stg_END_STM_CHUNK_LIST);
CLOSURE(stg_NO_TREC_closure,stg_NO_TREC);
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index 1e07948516..67eba93d52 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -120,8 +120,6 @@ uint32_t mutlist_MUTVARS,
mutlist_TVAR_WATCH_QUEUE,
mutlist_TREC_CHUNK,
mutlist_TREC_HEADER,
- mutlist_ATOMIC_INVARIANT,
- mutlist_INVARIANT_CHECK_QUEUE,
mutlist_OTHERS;
#endif
@@ -249,8 +247,6 @@ GarbageCollect (uint32_t collect_gen,
mutlist_TVAR_WATCH_QUEUE = 0;
mutlist_TREC_CHUNK = 0;
mutlist_TREC_HEADER = 0;
- mutlist_ATOMIC_INVARIANT = 0;
- mutlist_INVARIANT_CHECK_QUEUE = 0;
mutlist_OTHERS = 0;
#endif
@@ -554,13 +550,11 @@ GarbageCollect (uint32_t collect_gen,
copied += mut_list_size;
debugTrace(DEBUG_gc,
- "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d TVARs, %d TVAR_WATCH_QUEUEs, %d TREC_CHUNKs, %d TREC_HEADERs, %d ATOMIC_INVARIANTs, %d INVARIANT_CHECK_QUEUEs, %d others)",
+ "mut_list_size: %lu (%d vars, %d arrays, %d MVARs, %d TVARs, %d TVAR_WATCH_QUEUEs, %d TREC_CHUNKs, %d TREC_HEADERs, %d others)",
(unsigned long)(mut_list_size * sizeof(W_)),
mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS,
mutlist_TVAR, mutlist_TVAR_WATCH_QUEUE,
mutlist_TREC_CHUNK, mutlist_TREC_HEADER,
- mutlist_ATOMIC_INVARIANT,
- mutlist_INVARIANT_CHECK_QUEUE,
mutlist_OTHERS);
}
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index af662859ff..437a25f8d9 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -42,9 +42,7 @@ extern uint32_t mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS,
mutlist_TVAR,
mutlist_TVAR_WATCH_QUEUE,
mutlist_TREC_CHUNK,
- mutlist_TREC_HEADER,
- mutlist_ATOMIC_INVARIANT,
- mutlist_INVARIANT_CHECK_QUEUE;
+ mutlist_TREC_HEADER;
#endif
#if defined(PROF_SPIN) && defined(THREADED_RTS)
diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c
index 770865ce9f..72411bc975 100644
--- a/rts/sm/Scav.c
+++ b/rts/sm/Scav.c
@@ -1578,10 +1578,6 @@ scavenge_mutable_list(bdescr *bd, generation *gen)
mutlist_TVAR_WATCH_QUEUE++;
else if (((StgClosure*)p)->header.info == &stg_TREC_HEADER_info)
mutlist_TREC_HEADER++;
- else if (((StgClosure*)p)->header.info == &stg_ATOMIC_INVARIANT_info)
- mutlist_ATOMIC_INVARIANT++;
- else if (((StgClosure*)p)->header.info == &stg_INVARIANT_CHECK_QUEUE_info)
- mutlist_INVARIANT_CHECK_QUEUE++;
else
mutlist_OTHERS++;
break;