summaryrefslogtreecommitdiff
path: root/libsanitizer/tsan/tsan_rtl.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/tsan/tsan_rtl.cpp')
-rw-r--r--libsanitizer/tsan/tsan_rtl.cpp51
1 files changed, 10 insertions, 41 deletions
diff --git a/libsanitizer/tsan/tsan_rtl.cpp b/libsanitizer/tsan/tsan_rtl.cpp
index 0efa99788ab..bcf489a71d5 100644
--- a/libsanitizer/tsan/tsan_rtl.cpp
+++ b/libsanitizer/tsan/tsan_rtl.cpp
@@ -77,7 +77,7 @@ void OnInitialize() {
}
#endif
-static char thread_registry_placeholder[sizeof(ThreadRegistry)];
+static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadContextBase *CreateThreadContext(u32 tid) {
// Map thread trace when context is created.
@@ -115,15 +115,15 @@ static const u32 kThreadQuarantineSize = 64;
Context::Context()
: initialized(),
- report_mtx(MutexTypeReport, StatMtxReport),
+ report_mtx(MutexTypeReport),
nreported(),
nmissed_expected(),
thread_registry(new (thread_registry_placeholder) ThreadRegistry(
CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)),
- racy_mtx(MutexTypeRacy, StatMtxRacy),
+ racy_mtx(MutexTypeRacy),
racy_stacks(),
racy_addresses(),
- fired_suppressions_mtx(MutexTypeFired, StatMtxFired),
+ fired_suppressions_mtx(MutexTypeFired),
clock_alloc(LINKER_INITIALIZED, "clock allocator") {
fired_suppressions.reserve(8);
}
@@ -522,18 +522,14 @@ int Finalize(ThreadState *thr) {
failed = OnFinalize(failed);
-#if TSAN_COLLECT_STATS
- StatAggregate(ctx->stat, thr->stat);
- StatOutput(ctx->stat);
-#endif
-
return failed ? common_flags()->exitcode : 0;
}
#if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) {
+void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
+ ScopedErrorReportLock::Lock();
// Suppress all reports in the pthread_atfork callbacks.
// Reports will deadlock on the report_mtx.
// We could ignore sync operations as well,
@@ -545,16 +541,18 @@ void ForkBefore(ThreadState *thr, uptr pc) {
thr->ignore_interceptors++;
}
-void ForkParentAfter(ThreadState *thr, uptr pc) {
+void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
+ ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
}
-void ForkChildAfter(ThreadState *thr, uptr pc) {
+void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
+ ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
@@ -693,9 +691,6 @@ ALWAYS_INLINE
void MemoryAccessImpl1(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
// This potentially can live in an MMX/SSE scratch register.
// The required intrinsics are:
@@ -752,7 +747,6 @@ void MemoryAccessImpl1(ThreadState *thr, uptr addr,
return;
// choose a random candidate slot and replace it
StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
- StatInc(thr, StatShadowReplace);
return;
RACE:
HandleRace(thr, shadow_mem, cur, old);
@@ -891,19 +885,11 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopRodata);
return;
}
FastState fast_state = thr->fast_state;
if (UNLIKELY(fast_state.GetIgnoreBit())) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopIgnored);
return;
}
@@ -914,10 +900,6 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite))) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopSame);
return;
}
@@ -939,10 +921,6 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
u64 *shadow_mem, Shadow cur) {
if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite))) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopSame);
return;
}
@@ -999,7 +977,6 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
// Reset middle part.
u64 *p1 = p;
p = RoundDown(end, kPageSize);
- UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
Die();
// Set the ending.
@@ -1059,7 +1036,6 @@ void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
- StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
if (kCollectHistory) {
thr->fast_state.IncrementEpoch();
@@ -1081,7 +1057,6 @@ void FuncEntry(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
- StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
if (kCollectHistory) {
thr->fast_state.IncrementEpoch();
@@ -1156,12 +1131,6 @@ void build_consistency_debug() {}
void build_consistency_release() {}
#endif
-#if TSAN_COLLECT_STATS
-void build_consistency_stats() {}
-#else
-void build_consistency_nostats() {}
-#endif
-
} // namespace __tsan
#if !SANITIZER_GO