summaryrefslogtreecommitdiff
path: root/libsanitizer/tsan/tsan_interface_atomic.cc
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/tsan/tsan_interface_atomic.cc')
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc33
1 files changed, 19 insertions, 14 deletions
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 02ebb47e6af..180d87b7993 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -249,11 +249,10 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
// Assume the access is atomic.
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
- return *a;
+ return *a; // as if atomic
}
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->clock.acquire(&s->clock);
+ AcquireImpl(thr, pc, &s->clock);
T v = *a;
s->mtx.ReadUnlock();
__sync_synchronize();
@@ -271,13 +270,15 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
// Strictly saying even relaxed store cuts off release sequence,
// so must reset the clock.
if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
- *a = v;
+ *a = v; // as if atomic
return;
}
__sync_synchronize();
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
- thr->clock.ReleaseStore(&s->clock);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, pc, &s->clock);
*a = v;
s->mtx.Unlock();
// Trainling memory barrier to provide sequential consistency
@@ -291,13 +292,15 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
SyncVar *s = 0;
if (mo != mo_relaxed) {
s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
if (IsAcqRelOrder(mo))
- thr->clock.acq_rel(&s->clock);
+ AcquireReleaseImpl(thr, pc, &s->clock);
else if (IsReleaseOrder(mo))
- thr->clock.release(&s->clock);
+ ReleaseImpl(thr, pc, &s->clock);
else if (IsAcquireOrder(mo))
- thr->clock.acquire(&s->clock);
+ AcquireImpl(thr, pc, &s->clock);
}
v = F(a, v);
if (s)
@@ -355,13 +358,15 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
SyncVar *s = 0;
if (mo != mo_relaxed) {
s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
if (IsAcqRelOrder(mo))
- thr->clock.acq_rel(&s->clock);
+ AcquireReleaseImpl(thr, pc, &s->clock);
else if (IsReleaseOrder(mo))
- thr->clock.release(&s->clock);
+ ReleaseImpl(thr, pc, &s->clock);
else if (IsAcquireOrder(mo))
- thr->clock.acquire(&s->clock);
+ AcquireImpl(thr, pc, &s->clock);
}
T cc = *c;
T pr = func_cas(a, cc, v);