diff options
Diffstat (limited to 'libsanitizer/tsan/tsan_rtl.h')
-rw-r--r-- | libsanitizer/tsan/tsan_rtl.h | 44 |
1 files changed, 41 insertions, 3 deletions
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h index b472c0f77df..e1e121e2ee0 100644 --- a/libsanitizer/tsan/tsan_rtl.h +++ b/libsanitizer/tsan/tsan_rtl.h @@ -235,7 +235,7 @@ struct ThreadState { } ALIGNED(SANITIZER_CACHE_LINE_SIZE); #if !SANITIZER_GO -#if SANITIZER_MAC || SANITIZER_ANDROID +#if SANITIZER_APPLE || SANITIZER_ANDROID ThreadState *cur_thread(); void set_cur_thread(ThreadState *thr); void cur_thread_finalize(); @@ -256,7 +256,7 @@ inline void set_cur_thread(ThreadState *thr) { reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr; } inline void cur_thread_finalize() { } -# endif // SANITIZER_MAC || SANITIZER_ANDROID +# endif // SANITIZER_APPLE || SANITIZER_ANDROID #endif // SANITIZER_GO class ThreadContext final : public ThreadContextBase { @@ -314,9 +314,43 @@ struct Context { ThreadRegistry thread_registry; + // This is used to prevent a very unlikely but very pathological behavior. + // Since memory access handling is not synchronized with DoReset, + // a thread running concurrently with DoReset can leave a bogus shadow value + // that will be later falsely detected as a race. For such false races + // RestoreStack will return false and we will not report it. + // However, consider that a thread leaves a whole lot of such bogus values + // and these values are later read by a whole lot of threads. + // This will cause massive amounts of ReportRace calls and lots of + // serialization. In very pathological cases the resulting slowdown + // can be >100x. This is very unlikely, but it was presumably observed + // in practice: https://github.com/google/sanitizers/issues/1552 + // If this happens, previous access sid+epoch will be the same for all of + // these false races b/c if the thread will try to increment epoch, it will + // notice that DoReset has happened and will stop producing bogus shadow + // values. So, last_spurious_race is used to remember the last sid+epoch + // for which RestoreStack returned false. Then it is used to filter out + // races with the same sid+epoch very early and quickly. + // It is of course possible that multiple threads left multiple bogus shadow + // values and all of them are read by lots of threads at the same time. + // In such case last_spurious_race will only be able to deduplicate a few + // races from one thread, then few from another and so on. An alternative + // would be to hold an array of such sid+epoch, but we consider such scenario + // as even less likely. + // Note: this can lead to some rare false negatives as well: + // 1. When a legit access with the same sid+epoch participates in a race + // as the "previous" memory access, it will be wrongly filtered out. + // 2. When RestoreStack returns false for a legit memory access because it + // was already evicted from the thread trace, we will still remember it in + // last_spurious_race. Then if there is another racing memory access from + // the same thread that happened in the same epoch, but was stored in the + // next thread trace part (which is still preserved in the thread trace), + // we will also wrongly filter it out while RestoreStack would actually + // succeed for that second memory access. + RawShadow last_spurious_race; + Mutex racy_mtx; Vector<RacyStacks> racy_stacks; - Vector<RacyAddress> racy_addresses; // Number of fired suppressions may be large enough. Mutex fired_suppressions_mtx; InternalMmapVector<FiredSuppression> fired_suppressions; @@ -338,6 +372,10 @@ struct Context { uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx); uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx); uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx); +#if SANITIZER_GO + uptr mapped_shadow_begin; + uptr mapped_shadow_end; +#endif }; extern Context *ctx; // The one and the only global runtime context. |