diff options
author | kcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-12-05 09:18:38 +0000 |
---|---|---|
committer | kcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-12-05 09:18:38 +0000 |
commit | 4fc7b5acfc1d42a0701c8fff726a3ebe7f563dd9 (patch) | |
tree | 20d85354103063e38b162a6a90b7ae51fb4b6104 /libsanitizer/tsan/tsan_interface_atomic.cc | |
parent | 50e6c257ee5ad435e3a736a1375ccc7639fd9244 (diff) | |
download | gcc-4fc7b5acfc1d42a0701c8fff726a3ebe7f563dd9.tar.gz |
libsanitizer merge from upstream r196090
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@205695 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer/tsan/tsan_interface_atomic.cc')
-rw-r--r-- | libsanitizer/tsan/tsan_interface_atomic.cc | 33 |
1 files changed, 19 insertions, 14 deletions
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc index 02ebb47e6af..180d87b7993 100644 --- a/libsanitizer/tsan/tsan_interface_atomic.cc +++ b/libsanitizer/tsan/tsan_interface_atomic.cc @@ -249,11 +249,10 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, // Assume the access is atomic. if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) { MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>()); - return *a; + return *a; // as if atomic } SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.acquire(&s->clock); + AcquireImpl(thr, pc, &s->clock); T v = *a; s->mtx.ReadUnlock(); __sync_synchronize(); @@ -271,13 +270,15 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, // Strictly saying even relaxed store cuts off release sequence, // so must reset the clock. if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) { - *a = v; + *a = v; // as if atomic return; } __sync_synchronize(); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->clock.ReleaseStore(&s->clock); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + ReleaseImpl(thr, pc, &s->clock); *a = v; s->mtx.Unlock(); // Trainling memory barrier to provide sequential consistency @@ -291,13 +292,15 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { SyncVar *s = 0; if (mo != mo_relaxed) { s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); if (IsAcqRelOrder(mo)) - thr->clock.acq_rel(&s->clock); + AcquireReleaseImpl(thr, pc, &s->clock); else if (IsReleaseOrder(mo)) - thr->clock.release(&s->clock); + ReleaseImpl(thr, pc, &s->clock); else if (IsAcquireOrder(mo)) - thr->clock.acquire(&s->clock); + AcquireImpl(thr, pc, &s->clock); } v = F(a, v); if (s) @@ -355,13 +358,15 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, SyncVar *s = 0; if (mo != mo_relaxed) { s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); if (IsAcqRelOrder(mo)) - thr->clock.acq_rel(&s->clock); + AcquireReleaseImpl(thr, pc, &s->clock); else if (IsReleaseOrder(mo)) - thr->clock.release(&s->clock); + ReleaseImpl(thr, pc, &s->clock); else if (IsAcquireOrder(mo)) - thr->clock.acquire(&s->clock); + AcquireImpl(thr, pc, &s->clock); } T cc = *c; T pr = func_cas(a, cc, v); |