summaryrefslogtreecommitdiff
path: root/libsanitizer/tsan
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/tsan')
-rw-r--r--libsanitizer/tsan/tsan_dense_alloc.h115
-rw-r--r--libsanitizer/tsan/tsan_dispatch_defs.h2
-rw-r--r--libsanitizer/tsan/tsan_fd.cpp10
-rw-r--r--libsanitizer/tsan/tsan_fd.h2
-rw-r--r--libsanitizer/tsan/tsan_flags.inc8
-rw-r--r--libsanitizer/tsan/tsan_interceptors_libdispatch.cpp4
-rw-r--r--libsanitizer/tsan/tsan_interceptors_mac.cpp4
-rw-r--r--libsanitizer/tsan/tsan_interceptors_posix.cpp76
-rw-r--r--libsanitizer/tsan/tsan_malloc_mac.cpp2
-rw-r--r--libsanitizer/tsan/tsan_platform.h3
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cpp6
-rw-r--r--libsanitizer/tsan/tsan_platform_mac.cpp63
-rw-r--r--libsanitizer/tsan/tsan_report.cpp7
-rw-r--r--libsanitizer/tsan/tsan_report.h1
-rw-r--r--libsanitizer/tsan/tsan_rtl.cpp99
-rw-r--r--libsanitizer/tsan/tsan_rtl.h44
-rw-r--r--libsanitizer/tsan/tsan_rtl_access.cpp9
-rw-r--r--libsanitizer/tsan/tsan_rtl_ppc64.S1
-rw-r--r--libsanitizer/tsan/tsan_rtl_report.cpp55
-rw-r--r--libsanitizer/tsan/tsan_rtl_thread.cpp2
-rw-r--r--libsanitizer/tsan/tsan_shadow.h10
21 files changed, 308 insertions, 215 deletions
diff --git a/libsanitizer/tsan/tsan_dense_alloc.h b/libsanitizer/tsan/tsan_dense_alloc.h
index 7a39a39d51d..2eaff39057b 100644
--- a/libsanitizer/tsan/tsan_dense_alloc.h
+++ b/libsanitizer/tsan/tsan_dense_alloc.h
@@ -85,14 +85,7 @@ class DenseSlabAlloc {
}
void FlushCache(Cache *c) {
- if (!c->pos)
- return;
- SpinMutexLock lock(&mtx_);
- while (c->pos) {
- IndexT idx = c->cache[--c->pos];
- *(IndexT*)Map(idx) = freelist_;
- freelist_ = idx;
- }
+ while (c->pos) Drain(c);
}
void InitCache(Cache *c) {
@@ -106,7 +99,7 @@ class DenseSlabAlloc {
template <typename Func>
void ForEach(Func func) {
- SpinMutexLock lock(&mtx_);
+ Lock lock(&mtx_);
uptr fillpos = atomic_load_relaxed(&fillpos_);
for (uptr l1 = 0; l1 < fillpos; l1++) {
for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);
@@ -115,48 +108,86 @@ class DenseSlabAlloc {
private:
T *map_[kL1Size];
- SpinMutex mtx_;
- IndexT freelist_ = {0};
+ Mutex mtx_;
+ // The freelist is organized as a lock-free stack of batches of nodes.
+ // The stack itself uses Block::next links, while the batch within each
+ // stack node uses Block::batch links.
+ // Low 32-bits of freelist_ is the node index, top 32-bits is ABA-counter.
+ atomic_uint64_t freelist_ = {0};
atomic_uintptr_t fillpos_ = {0};
const char *const name_;
- void Refill(Cache *c) {
- SpinMutexLock lock(&mtx_);
- if (freelist_ == 0) {
- uptr fillpos = atomic_load_relaxed(&fillpos_);
- if (fillpos == kL1Size) {
- Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
- name_, kL1Size, kL2Size);
- Die();
- }
- VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
- fillpos, kL1Size, kL2Size);
- T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
- // Reserve 0 as invalid index.
- IndexT start = fillpos == 0 ? 1 : 0;
- for (IndexT i = start; i < kL2Size; i++) {
- new(batch + i) T;
- *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
- }
- *(IndexT*)(batch + kL2Size - 1) = 0;
- freelist_ = fillpos * kL2Size + start;
- map_[fillpos] = batch;
- atomic_store_relaxed(&fillpos_, fillpos + 1);
- }
- for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
- IndexT idx = freelist_;
+ struct Block {
+ IndexT next;
+ IndexT batch;
+ };
+
+ Block *MapBlock(IndexT idx) { return reinterpret_cast<Block *>(Map(idx)); }
+
+ static constexpr u64 kCounterInc = 1ull << 32;
+ static constexpr u64 kCounterMask = ~(kCounterInc - 1);
+
+ NOINLINE void Refill(Cache *c) {
+ // Pop 1 batch of nodes from the freelist.
+ IndexT idx;
+ u64 xchg;
+ u64 cmp = atomic_load(&freelist_, memory_order_acquire);
+ do {
+ idx = static_cast<IndexT>(cmp);
+ if (!idx)
+ return AllocSuperBlock(c);
+ Block *ptr = MapBlock(idx);
+ xchg = ptr->next | (cmp & kCounterMask);
+ } while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,
+ memory_order_acq_rel));
+ // Unpack it into c->cache.
+ while (idx) {
c->cache[c->pos++] = idx;
- freelist_ = *(IndexT*)Map(idx);
+ idx = MapBlock(idx)->batch;
}
}
- void Drain(Cache *c) {
- SpinMutexLock lock(&mtx_);
- for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ NOINLINE void Drain(Cache *c) {
+ // Build a batch of at most Cache::kSize / 2 nodes linked by Block::batch.
+ IndexT head_idx = 0;
+ for (uptr i = 0; i < Cache::kSize / 2 && c->pos; i++) {
IndexT idx = c->cache[--c->pos];
- *(IndexT*)Map(idx) = freelist_;
- freelist_ = idx;
+ Block *ptr = MapBlock(idx);
+ ptr->batch = head_idx;
+ head_idx = idx;
+ }
+ // Push it onto the freelist stack.
+ Block *head = MapBlock(head_idx);
+ u64 xchg;
+ u64 cmp = atomic_load(&freelist_, memory_order_acquire);
+ do {
+ head->next = static_cast<IndexT>(cmp);
+ xchg = head_idx | (cmp & kCounterMask) + kCounterInc;
+ } while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,
+ memory_order_acq_rel));
+ }
+
+ NOINLINE void AllocSuperBlock(Cache *c) {
+ Lock lock(&mtx_);
+ uptr fillpos = atomic_load_relaxed(&fillpos_);
+ if (fillpos == kL1Size) {
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n", name_, kL1Size,
+ kL2Size);
+ Die();
+ }
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
+ fillpos, kL1Size, kL2Size);
+ T *batch = (T *)MmapOrDie(kL2Size * sizeof(T), name_);
+ map_[fillpos] = batch;
+ // Reserve 0 as invalid index.
+ for (IndexT i = fillpos ? 0 : 1; i < kL2Size; i++) {
+ new (batch + i) T;
+ c->cache[c->pos++] = i + fillpos * kL2Size;
+ if (c->pos == Cache::kSize)
+ Drain(c);
}
+ atomic_store_relaxed(&fillpos_, fillpos + 1);
+ CHECK(c->pos);
}
};
diff --git a/libsanitizer/tsan/tsan_dispatch_defs.h b/libsanitizer/tsan/tsan_dispatch_defs.h
index 94e0b50fed3..54c0b0ba4b4 100644
--- a/libsanitizer/tsan/tsan_dispatch_defs.h
+++ b/libsanitizer/tsan/tsan_dispatch_defs.h
@@ -56,7 +56,7 @@ extern const dispatch_block_t _dispatch_data_destructor_munmap;
# define DISPATCH_NOESCAPE
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
#else
# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak))
diff --git a/libsanitizer/tsan/tsan_fd.cpp b/libsanitizer/tsan/tsan_fd.cpp
index cf8f491fdbf..ab295a69dce 100644
--- a/libsanitizer/tsan/tsan_fd.cpp
+++ b/libsanitizer/tsan/tsan_fd.cpp
@@ -34,6 +34,7 @@ struct FdDesc {
atomic_uintptr_t aux_sync; // FdSync*
Tid creation_tid;
StackID creation_stack;
+ bool closed;
};
struct FdContext {
@@ -120,6 +121,7 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
}
d->creation_tid = thr->tid;
d->creation_stack = CurrentStackId(thr, pc);
+ d->closed = false;
// This prevents false positives on fd_close_norace3.cpp test.
// The mechanics of the false positive are not completely clear,
// but it happens only if global reset is enabled (flush_memory_ms=1)
@@ -155,7 +157,7 @@ void FdOnFork(ThreadState *thr, uptr pc) {
}
}
-bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack) {
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack, bool *closed) {
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
if (tab == 0)
@@ -166,6 +168,7 @@ bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack) {
*fd = l1 * kTableSizeL1 + l2;
*tid = d->creation_tid;
*stack = d->creation_stack;
+ *closed = d->closed;
return true;
}
}
@@ -242,8 +245,9 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
reinterpret_cast<FdSync *>(
atomic_load(&d->aux_sync, memory_order_relaxed)));
atomic_store(&d->aux_sync, 0, memory_order_relaxed);
- d->creation_tid = kInvalidTid;
- d->creation_stack = kInvalidStackID;
+ d->closed = true;
+ d->creation_tid = thr->tid;
+ d->creation_stack = CurrentStackId(thr, pc);
}
void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
diff --git a/libsanitizer/tsan/tsan_fd.h b/libsanitizer/tsan/tsan_fd.h
index 92625dc4b4a..dddc1d2ab24 100644
--- a/libsanitizer/tsan/tsan_fd.h
+++ b/libsanitizer/tsan/tsan_fd.h
@@ -54,7 +54,7 @@ void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
-bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack);
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack, bool *closed);
void FdOnFork(ThreadState *thr, uptr pc);
uptr File2addr(const char *path);
diff --git a/libsanitizer/tsan/tsan_flags.inc b/libsanitizer/tsan/tsan_flags.inc
index 32cf3bbf152..731d776cc89 100644
--- a/libsanitizer/tsan/tsan_flags.inc
+++ b/libsanitizer/tsan/tsan_flags.inc
@@ -23,10 +23,6 @@ TSAN_FLAG(bool, enable_annotations, true,
TSAN_FLAG(bool, suppress_equal_stacks, true,
"Suppress a race report if we've already output another race report "
"with the same stack.")
-TSAN_FLAG(bool, suppress_equal_addresses, true,
- "Suppress a race report if we've already output another race report "
- "on the same address.")
-
TSAN_FLAG(bool, report_bugs, true,
"Turns off bug reporting entirely (useful for benchmarking).")
TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
@@ -74,9 +70,9 @@ TSAN_FLAG(int, io_sync, 1,
TSAN_FLAG(bool, die_after_fork, true,
"Die after multi-threaded fork if the child creates new threads.")
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
-TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_MAC ? true : false,
+TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_APPLE ? true : false,
"Ignore reads and writes from all interceptors.")
-TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_APPLE ? true : false,
"Interceptors should only detect races when called from instrumented "
"modules.")
TSAN_FLAG(bool, shared_ptr_interceptor, true,
diff --git a/libsanitizer/tsan/tsan_interceptors_libdispatch.cpp b/libsanitizer/tsan/tsan_interceptors_libdispatch.cpp
index cbbb7ecb239..88d5f0a4811 100644
--- a/libsanitizer/tsan/tsan_interceptors_libdispatch.cpp
+++ b/libsanitizer/tsan/tsan_interceptors_libdispatch.cpp
@@ -19,7 +19,7 @@
#include "BlocksRuntime/Block.h"
#include "tsan_dispatch_defs.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
# include <Availability.h>
#endif
@@ -225,7 +225,7 @@ DISPATCH_INTERCEPT(dispatch_barrier, true)
// dispatch_async_and_wait() and friends were introduced in macOS 10.14.
// Linking of these interceptors fails when using an older SDK.
-#if !SANITIZER_MAC || defined(__MAC_10_14)
+#if !SANITIZER_APPLE || defined(__MAC_10_14)
// macOS 10.14 is greater than our minimal deployment target. To ensure we
// generate a weak reference so the TSan dylib continues to work on older
// systems, we need to forward declare the intercepted functions as "weak
diff --git a/libsanitizer/tsan/tsan_interceptors_mac.cpp b/libsanitizer/tsan/tsan_interceptors_mac.cpp
index ed064150d00..1ee47bcd123 100644
--- a/libsanitizer/tsan/tsan_interceptors_mac.cpp
+++ b/libsanitizer/tsan/tsan_interceptors_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "interception/interception.h"
#include "tsan_interceptors.h"
@@ -518,4 +518,4 @@ STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
} // namespace __tsan
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp
index 60ca9633868..17f6b1f472d 100644
--- a/libsanitizer/tsan/tsan_interceptors_posix.cpp
+++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp
@@ -35,7 +35,7 @@
using namespace __tsan;
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_APPLE
#define stdout __stdoutp
#define stderr __stderrp
#endif
@@ -102,14 +102,14 @@ extern __sanitizer_FILE __sF[];
#else
extern __sanitizer_FILE *stdout, *stderr;
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
#else
const int PTHREAD_MUTEX_RECURSIVE = 2;
const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
const int EPOLL_CTL_ADD = 1;
#endif
const int SIGILL = 4;
@@ -119,7 +119,7 @@ const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
const int SIGBUS = 10;
const int SIGSYS = 12;
#else
@@ -129,7 +129,7 @@ const int SIGSYS = 31;
void *const MAP_FAILED = (void*)-1;
#if SANITIZER_NETBSD
const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
-#elif !SANITIZER_MAC
+#elif !SANITIZER_APPLE
const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
#endif
const int MAP_FIXED = 0x10;
@@ -142,7 +142,7 @@ typedef __sanitizer::u16 mode_t;
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
const int SA_SIGINFO = 0x40;
const int SIG_SETMASK = 3;
#elif defined(__mips__)
@@ -189,7 +189,7 @@ struct InterceptorContext {
// in a single cache line if possible (it's accessed in every interceptor).
ALIGNED(64) LibIgnore libignore;
__sanitizer_sigaction sigactions[kSigCount];
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD
unsigned finalize_key;
#endif
@@ -461,7 +461,7 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
return res;
}
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD
static void on_exit_callback_installed_at(int status, void *arg) {
ThreadState *thr = cur_thread();
AtExitCtx *ctx = (AtExitCtx*)arg;
@@ -553,11 +553,11 @@ static void LongJmp(ThreadState *thr, uptr *env) {
// FIXME: put everything below into a common extern "C" block?
extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
TSAN_INTERCEPTOR(int, setjmp, void *env);
TSAN_INTERCEPTOR(int, _setjmp, void *env);
TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
#if SANITIZER_NETBSD
#define setjmp_symname __setjmp14
@@ -619,7 +619,7 @@ DEFINE_REAL(int, sigsetjmp_symname, void *env)
#if !SANITIZER_NETBSD
DEFINE_REAL(int, __sigsetjmp, void *env)
#endif
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
#if SANITIZER_NETBSD
#define longjmp_symname __longjmp14
@@ -658,7 +658,7 @@ TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
}
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
if (in_symbolizer())
return InternalAlloc(size);
@@ -816,7 +816,7 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
#define TSAN_MAYBE_INTERCEPT_MEMALIGN
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
if (in_symbolizer())
return InternalAlloc(sz, nullptr, align);
@@ -847,7 +847,7 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
#define TSAN_MAYBE_INTERCEPT_PVALLOC
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
if (in_symbolizer()) {
void *p = InternalAlloc(sz, nullptr, align);
@@ -919,7 +919,7 @@ static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
// these interceptors with INTERFACE_ATTRIBUTE.
// On OS X, we don't support statically linking, so we just use a regular
// interceptor.
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
#else
#define STDCXX_INTERCEPTOR(rettype, name, ...) \
@@ -962,7 +962,7 @@ void PlatformCleanUpThreadState(ThreadState *thr) {
}
} // namespace __tsan
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
@@ -994,7 +994,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadState *thr = cur_thread_init();
// Thread-local state is not initialized yet.
ScopedIgnoreInterceptors ignore;
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(interceptor_ctx()->finalize_key,
(void *)GetPthreadDestructorIterations())) {
@@ -1102,7 +1102,7 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
{
SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
CHECK_EQ(thr, &cur_thread_placeholder);
#endif
}
@@ -1271,7 +1271,7 @@ INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
void *reltime) {
void *cond = init_cond(c);
@@ -1348,7 +1348,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
int res = REAL(pthread_mutex_timedlock)(m, abstime);
@@ -1359,7 +1359,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
}
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
int res = REAL(pthread_spin_init)(m, pshared);
@@ -1442,7 +1442,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
@@ -1472,7 +1472,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
@@ -1490,7 +1490,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
@@ -1524,7 +1524,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
return errno_EINVAL;
atomic_uint32_t *a;
- if (SANITIZER_MAC)
+ if (SANITIZER_APPLE)
a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
else if (SANITIZER_NETBSD)
a = static_cast<atomic_uint32_t*>
@@ -1534,7 +1534,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
// Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
// result in crashes due to too little stack space.
- if (guard_acquire(thr, pc, a, !SANITIZER_MAC)) {
+ if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
(*f)();
guard_release(thr, pc, a, kGuardDone);
}
@@ -1661,7 +1661,7 @@ TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
return newfd2;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
int newfd2 = REAL(dup3)(oldfd, newfd, flags);
@@ -1805,7 +1805,7 @@ TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
int res = REAL(pipe2)(pipefd, flags);
@@ -2263,7 +2263,7 @@ TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
}
#endif
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
void *data);
struct dl_iterate_phdr_data {
@@ -2320,7 +2320,7 @@ struct TsanInterceptorContext {
const uptr pc;
};
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
static void HandleRecvmsg(ThreadState *thr, uptr pc,
__sanitizer_msghdr *msg) {
int fds[64];
@@ -2460,7 +2460,7 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
off); \
} while (false)
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, msg)
@@ -2521,7 +2521,7 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act,
sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
sizeof(sigactions[sig].sa_mask));
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
sigactions[sig].sa_restorer = act->sa_restorer;
#endif
internal_memcpy(&newact, act, sizeof(newact));
@@ -2568,7 +2568,7 @@ struct ScopedSyscall {
}
};
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -2772,7 +2772,7 @@ static void finalize(void *arg) {
Die();
}
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
static void unreachable() {
Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
@@ -2783,7 +2783,7 @@ static void unreachable() {
SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
void InitializeInterceptors() {
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
// We need to setup it early, because functions like dlsym() can call it.
REAL(memset) = internal_memset;
REAL(memcpy) = internal_memcpy;
@@ -2795,7 +2795,7 @@ void InitializeInterceptors() {
InitializeSignalInterceptors();
InitializeLibdispatchInterceptors();
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
// We can not use TSAN_INTERCEPT to get setjmp addr,
// because it does &setjmp and setjmp is not present in some versions of libc.
using __interception::InterceptFunction;
@@ -2948,7 +2948,7 @@ void InitializeInterceptors() {
TSAN_MAYBE_INTERCEPT__LWP_EXIT;
TSAN_MAYBE_INTERCEPT_THR_EXIT;
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
@@ -2963,7 +2963,7 @@ void InitializeInterceptors() {
Die();
}
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
Printf("ThreadSanitizer: failed to create thread key\n");
Die();
diff --git a/libsanitizer/tsan/tsan_malloc_mac.cpp b/libsanitizer/tsan/tsan_malloc_mac.cpp
index 0e861bf1f96..ac844ae8a44 100644
--- a/libsanitizer/tsan/tsan_malloc_mac.cpp
+++ b/libsanitizer/tsan/tsan_malloc_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common/sanitizer_errno.h"
#include "tsan_interceptors.h"
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index 233bf0a39df..7c13c733513 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -394,6 +394,7 @@ struct MappingGo48 {
0300 0000 0000 - 0700 0000 0000: -
0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 8000 0000 0000: -
+PIE binaries currently not supported, but it should be theoretically possible.
*/
struct MappingGoWindows {
@@ -587,7 +588,7 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) {
#else // SANITIZER_GO
# if SANITIZER_IOS && !SANITIZER_IOSSIM
return Func::template Apply<MappingAppleAarch64>(arg);
-# elif defined(__x86_64__) || SANITIZER_MAC
+# elif defined(__x86_64__) || SANITIZER_APPLE
return Func::template Apply<Mapping48AddressSpace>(arg);
# elif defined(__aarch64__)
switch (vmaSize) {
diff --git a/libsanitizer/tsan/tsan_platform_linux.cpp b/libsanitizer/tsan/tsan_platform_linux.cpp
index 17dbdff8a53..807f6be2eee 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cpp
+++ b/libsanitizer/tsan/tsan_platform_linux.cpp
@@ -402,7 +402,11 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
#elif defined(__powerpc__)
# define LONG_JMP_SP_ENV_SLOT 0
#elif SANITIZER_FREEBSD
-# define LONG_JMP_SP_ENV_SLOT 2
+# ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT 1
+# else
+# define LONG_JMP_SP_ENV_SLOT 2
+# endif
#elif SANITIZER_LINUX
# ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT 13
diff --git a/libsanitizer/tsan/tsan_platform_mac.cpp b/libsanitizer/tsan/tsan_platform_mac.cpp
index 44b98d46cfb..1aac0fb2752 100644
--- a/libsanitizer/tsan/tsan_platform_mac.cpp
+++ b/libsanitizer/tsan/tsan_platform_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -200,44 +200,26 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
# if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
-// On OS X, GCD worker threads are created without a call to pthread_create. We
-// need to properly register these threads with ThreadCreate and ThreadStart.
-// These threads don't have a parent thread, as they are created "spuriously".
-// We're using a libpthread API that notifies us about a newly created thread.
-// The `thread == pthread_self()` check indicates this is actually a worker
-// thread. If it's just a regular thread, this hook is called on the parent
-// thread.
-typedef void (*pthread_introspection_hook_t)(unsigned int event,
- pthread_t thread, void *addr,
- size_t size);
-extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
- pthread_introspection_hook_t hook);
-static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
-static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
-static pthread_introspection_hook_t prev_pthread_introspection_hook;
-static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
- void *addr, size_t size) {
- if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
- if (thread == pthread_self()) {
- // The current thread is a newly created GCD worker thread.
- ThreadState *thr = cur_thread();
- Processor *proc = ProcCreate();
- ProcWire(proc, thr);
- ThreadState *parent_thread_state = nullptr; // No parent.
- Tid tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
- CHECK_NE(tid, kMainTid);
- ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
- }
- } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
- CHECK_EQ(thread, pthread_self());
+// Register GCD worker threads, which are created without an observable call to
+// pthread_create().
+static void ThreadCreateCallback(uptr thread, bool gcd_worker) {
+ if (gcd_worker) {
ThreadState *thr = cur_thread();
- if (thr->tctx) {
- DestroyThreadState();
- }
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadState *parent_thread_state = nullptr; // No parent.
+ Tid tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
+ CHECK_NE(tid, kMainTid);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
}
+}
- if (prev_pthread_introspection_hook != nullptr)
- prev_pthread_introspection_hook(event, thread, addr, size);
+// Destroy thread state for *all* threads.
+static void ThreadTerminateCallback(uptr thread) {
+ ThreadState *thr = cur_thread();
+ if (thr->tctx) {
+ DestroyThreadState();
+ }
}
#endif
@@ -261,8 +243,11 @@ void InitializePlatform() {
InitializeThreadStateStorage();
- prev_pthread_introspection_hook =
- pthread_introspection_hook_install(&my_pthread_introspection_hook);
+ ThreadEventCallbacks callbacks = {
+ .create = ThreadCreateCallback,
+ .terminate = ThreadTerminateCallback,
+ };
+ InstallPthreadIntrospectionHook(callbacks);
#endif
if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
@@ -316,4 +301,4 @@ int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
} // namespace __tsan
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/libsanitizer/tsan/tsan_report.cpp b/libsanitizer/tsan/tsan_report.cpp
index 9f151279b60..9b03adc16b9 100644
--- a/libsanitizer/tsan/tsan_report.cpp
+++ b/libsanitizer/tsan/tsan_report.cpp
@@ -98,7 +98,7 @@ static const char *ReportTypeString(ReportType typ, uptr tag) {
UNREACHABLE("missing case");
}
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
static const char *const kInterposedFunctionPrefix = "wrap_";
#else
static const char *const kInterposedFunctionPrefix = "__interceptor_";
@@ -200,8 +200,9 @@ static void PrintLocation(const ReportLocation *loc) {
} else if (loc->type == ReportLocationTLS) {
Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationFD) {
- Printf(" Location is file descriptor %d created by %s at:\n",
- loc->fd, thread_name(thrbuf, loc->tid));
+ Printf(" Location is file descriptor %d %s by %s at:\n", loc->fd,
+ loc->fd_closed ? "destroyed" : "created",
+ thread_name(thrbuf, loc->tid));
print_stack = true;
}
Printf("%s", d.Default());
diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h
index 718eacde7ec..3c88864af14 100644
--- a/libsanitizer/tsan/tsan_report.h
+++ b/libsanitizer/tsan/tsan_report.h
@@ -76,6 +76,7 @@ struct ReportLocation {
uptr external_tag = 0;
Tid tid = kInvalidTid;
int fd = 0;
+ bool fd_closed = false;
bool suppressable = false;
ReportStack *stack = nullptr;
};
diff --git a/libsanitizer/tsan/tsan_rtl.cpp b/libsanitizer/tsan/tsan_rtl.cpp
index 1d6fc725266..db3d94518b8 100644
--- a/libsanitizer/tsan/tsan_rtl.cpp
+++ b/libsanitizer/tsan/tsan_rtl.cpp
@@ -45,7 +45,7 @@ void (*on_initialize)(void);
int (*on_finalize)(int);
#endif
-#if !SANITIZER_GO && !SANITIZER_MAC
+#if !SANITIZER_GO && !SANITIZER_APPLE
__attribute__((tls_model("initial-exec")))
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
SANITIZER_CACHE_LINE_SIZE);
@@ -197,30 +197,45 @@ static void DoResetImpl(uptr epoch) {
}
DPrintf("Resetting shadow...\n");
- if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
- "shadow")) {
+ auto shadow_begin = ShadowBeg();
+ auto shadow_end = ShadowEnd();
+#if SANITIZER_GO
+ CHECK_NE(0, ctx->mapped_shadow_begin);
+ shadow_begin = ctx->mapped_shadow_begin;
+ shadow_end = ctx->mapped_shadow_end;
+ VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
+#endif
+
+#if SANITIZER_WINDOWS
+ auto resetFailed =
+ !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
+#else
+ auto resetFailed =
+ !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
+#endif
+ if (resetFailed) {
Printf("failed to reset shadow memory\n");
Die();
}
DPrintf("Resetting meta shadow...\n");
ctx->metamap.ResetClocks();
+ StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
ctx->resetting = false;
}
// Clang does not understand locking all slots in the loop:
// error: expecting mutex 'slot.mtx' to be held at start of each loop
void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
- {
- for (auto& slot : ctx->slots) {
- slot.mtx.Lock();
- if (UNLIKELY(epoch == 0))
- epoch = ctx->global_epoch;
- if (UNLIKELY(epoch != ctx->global_epoch)) {
- // Epoch can't change once we've locked the first slot.
- CHECK_EQ(slot.sid, 0);
- slot.mtx.Unlock();
- return;
- }
+ for (auto& slot : ctx->slots) {
+ slot.mtx.Lock();
+ if (UNLIKELY(epoch == 0))
+ epoch = ctx->global_epoch;
+ if (UNLIKELY(epoch != ctx->global_epoch)) {
+ // Epoch can't change once we've locked the first slot.
+ CHECK_EQ(slot.sid, 0);
+ slot.mtx.Unlock();
+ return;
}
}
DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
@@ -370,7 +385,6 @@ Context::Context()
}),
racy_mtx(MutexTypeRacy),
racy_stacks(),
- racy_addresses(),
fired_suppressions_mtx(MutexTypeFired),
slot_mtx(MutexTypeSlots),
resetting() {
@@ -559,18 +573,50 @@ void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
#endif
void MapShadow(uptr addr, uptr size) {
+ // Ensure thead registry lock held, so as to synchronize
+ // with DoReset, which also access the mapped_shadow_* ctxt fields.
+ ThreadRegistryLock lock0(&ctx->thread_registry);
+ static bool data_mapped = false;
+
+#if !SANITIZER_GO
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
- "shadow"))
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
Die();
+#else
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
+ VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
+ addr, addr + size, shadow_begin, shadow_end);
+
+ if (!data_mapped) {
+ // First call maps data+bss.
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
+ } else {
+ VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
+ ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
+ // Second and subsequent calls map heap.
+ if (shadow_end <= ctx->mapped_shadow_end)
+ return;
+ if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
+ ctx->mapped_shadow_begin = shadow_begin;
+ if (shadow_begin < ctx->mapped_shadow_end)
+ shadow_begin = ctx->mapped_shadow_end;
+ VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
+ "shadow"))
+ Die();
+ ctx->mapped_shadow_end = shadow_end;
+ }
+#endif
// Meta shadow is 2:1, so tread carefully.
- static bool data_mapped = false;
static uptr mapped_meta_end = 0;
uptr meta_begin = (uptr)MemToMeta(addr);
uptr meta_end = (uptr)MemToMeta(addr + size);
@@ -587,8 +633,7 @@ void MapShadow(uptr addr, uptr size) {
// Windows wants 64K alignment.
meta_begin = RoundDownTo(meta_begin, 64 << 10);
meta_end = RoundUpTo(meta_end, 64 << 10);
- if (meta_end <= mapped_meta_end)
- return;
+ CHECK_GT(meta_end, mapped_meta_end);
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
@@ -651,9 +696,6 @@ void Initialize(ThreadState *thr) {
__tsan::InitializePlatformEarly();
#if !SANITIZER_GO
- // Re-exec ourselves if we need to set additional env or command line args.
- MaybeReexec();
-
InitializeAllocator();
ReplaceSystemMalloc();
#endif
@@ -722,8 +764,10 @@ void MaybeSpawnBackgroundThread() {
int Finalize(ThreadState *thr) {
bool failed = false;
+#if !SANITIZER_GO
if (common_flags()->print_module_map == 1)
DumpProcessMap();
+#endif
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
@@ -952,6 +996,15 @@ void TraceSwitchPartImpl(ThreadState* thr) {
TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
d.addr, d.stack_id);
}
+ // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
+ // after the call. It's possible that TryTraceFunc/TraceMutexLock above
+ // filled the trace part exactly up to the TracePart::kAlignment gap
+ // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
+ EventFunc *ev;
+ if (!TraceAcquire(thr, &ev)) {
+ CHECK(TraceSkipGap(thr));
+ CHECK(TraceAcquire(thr, &ev));
+ }
{
Lock lock(&ctx->slot_mtx);
// There is a small chance that the slot may be not queued at this point.
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index b472c0f77df..e1e121e2ee0 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -235,7 +235,7 @@ struct ThreadState {
} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
#if !SANITIZER_GO
-#if SANITIZER_MAC || SANITIZER_ANDROID
+#if SANITIZER_APPLE || SANITIZER_ANDROID
ThreadState *cur_thread();
void set_cur_thread(ThreadState *thr);
void cur_thread_finalize();
@@ -256,7 +256,7 @@ inline void set_cur_thread(ThreadState *thr) {
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
}
inline void cur_thread_finalize() { }
-# endif // SANITIZER_MAC || SANITIZER_ANDROID
+# endif // SANITIZER_APPLE || SANITIZER_ANDROID
#endif // SANITIZER_GO
class ThreadContext final : public ThreadContextBase {
@@ -314,9 +314,43 @@ struct Context {
ThreadRegistry thread_registry;
+ // This is used to prevent a very unlikely but very pathological behavior.
+ // Since memory access handling is not synchronized with DoReset,
+ // a thread running concurrently with DoReset can leave a bogus shadow value
+ // that will be later falsely detected as a race. For such false races
+ // RestoreStack will return false and we will not report it.
+ // However, consider that a thread leaves a whole lot of such bogus values
+ // and these values are later read by a whole lot of threads.
+ // This will cause massive amounts of ReportRace calls and lots of
+ // serialization. In very pathological cases the resulting slowdown
+ // can be >100x. This is very unlikely, but it was presumably observed
+ // in practice: https://github.com/google/sanitizers/issues/1552
+ // If this happens, previous access sid+epoch will be the same for all of
+ // these false races b/c if the thread will try to increment epoch, it will
+ // notice that DoReset has happened and will stop producing bogus shadow
+ // values. So, last_spurious_race is used to remember the last sid+epoch
+ // for which RestoreStack returned false. Then it is used to filter out
+ // races with the same sid+epoch very early and quickly.
+ // It is of course possible that multiple threads left multiple bogus shadow
+ // values and all of them are read by lots of threads at the same time.
+ // In such case last_spurious_race will only be able to deduplicate a few
+ // races from one thread, then few from another and so on. An alternative
+ // would be to hold an array of such sid+epoch, but we consider such scenario
+ // as even less likely.
+ // Note: this can lead to some rare false negatives as well:
+ // 1. When a legit access with the same sid+epoch participates in a race
+ // as the "previous" memory access, it will be wrongly filtered out.
+ // 2. When RestoreStack returns false for a legit memory access because it
+ // was already evicted from the thread trace, we will still remember it in
+ // last_spurious_race. Then if there is another racing memory access from
+ // the same thread that happened in the same epoch, but was stored in the
+ // next thread trace part (which is still preserved in the thread trace),
+ // we will also wrongly filter it out while RestoreStack would actually
+ // succeed for that second memory access.
+ RawShadow last_spurious_race;
+
Mutex racy_mtx;
Vector<RacyStacks> racy_stacks;
- Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
Mutex fired_suppressions_mtx;
InternalMmapVector<FiredSuppression> fired_suppressions;
@@ -338,6 +372,10 @@ struct Context {
uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
+#if SANITIZER_GO
+ uptr mapped_shadow_begin;
+ uptr mapped_shadow_end;
+#endif
};
extern Context *ctx; // The one and the only global runtime context.
diff --git a/libsanitizer/tsan/tsan_rtl_access.cpp b/libsanitizer/tsan/tsan_rtl_access.cpp
index 7d771bfaad7..8b20984a010 100644
--- a/libsanitizer/tsan/tsan_rtl_access.cpp
+++ b/libsanitizer/tsan/tsan_rtl_access.cpp
@@ -145,15 +145,6 @@ void TraceTime(ThreadState* thr) {
TraceEvent(thr, ev);
}
-ALWAYS_INLINE RawShadow LoadShadow(RawShadow* p) {
- return static_cast<RawShadow>(
- atomic_load((atomic_uint32_t*)p, memory_order_relaxed));
-}
-
-ALWAYS_INLINE void StoreShadow(RawShadow* sp, RawShadow s) {
- atomic_store((atomic_uint32_t*)sp, static_cast<u32>(s), memory_order_relaxed);
-}
-
NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
Shadow old,
AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
diff --git a/libsanitizer/tsan/tsan_rtl_ppc64.S b/libsanitizer/tsan/tsan_rtl_ppc64.S
index 9e533a71a9c..8285e21aa1e 100644
--- a/libsanitizer/tsan/tsan_rtl_ppc64.S
+++ b/libsanitizer/tsan/tsan_rtl_ppc64.S
@@ -1,6 +1,5 @@
#include "tsan_ppc_regs.h"
- .machine altivec
.section .text
.hidden __tsan_setjmp
.globl _setjmp
diff --git a/libsanitizer/tsan/tsan_rtl_report.cpp b/libsanitizer/tsan/tsan_rtl_report.cpp
index 4cf8816489d..c2cff60e2da 100644
--- a/libsanitizer/tsan/tsan_rtl_report.cpp
+++ b/libsanitizer/tsan/tsan_rtl_report.cpp
@@ -281,16 +281,16 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
int fd = -1;
Tid creat_tid = kInvalidTid;
StackID creat_stack = 0;
- if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
+ bool closed = false;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack, &closed)) {
auto *loc = New<ReportLocation>();
loc->type = ReportLocationFD;
+ loc->fd_closed = closed;
loc->fd = fd;
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
rep_->locs.PushBack(loc);
- ThreadContext *tctx = FindThreadByTidLocked(creat_tid);
- if (tctx)
- AddThread(tctx);
+ AddThread(creat_tid);
return;
}
MBlock *b = 0;
@@ -312,8 +312,7 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
loc->tid = b->tid;
loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
- if (ThreadContext *tctx = FindThreadByTidLocked(b->tid))
- AddThread(tctx);
+ AddThread(b->tid);
return;
}
bool is_stack = false;
@@ -629,35 +628,6 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
return false;
}
-static bool FindRacyAddress(const RacyAddress &ra0) {
- for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
- RacyAddress ra2 = ctx->racy_addresses[i];
- uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
- uptr minend = min(ra0.addr_max, ra2.addr_max);
- if (maxbeg < minend) {
- VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
- return true;
- }
- }
- return false;
-}
-
-static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
- if (!flags()->suppress_equal_addresses)
- return false;
- RacyAddress ra0 = {addr_min, addr_max};
- {
- ReadLock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- }
- Lock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- ctx->racy_addresses.PushBack(ra0);
- return false;
-}
-
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
// These should have been checked in ShouldReport.
// It's too late to check them here, we have already taken locks.
@@ -730,6 +700,11 @@ static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
return false;
}
+static bool SpuriousRace(Shadow old) {
+ Shadow last(LoadShadow(&ctx->last_spurious_race));
+ return last.sid() == old.sid() && last.epoch() == old.epoch();
+}
+
void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
AccessType typ0) {
CheckedMutex::CheckNoLocks();
@@ -750,6 +725,8 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
!(typ0 & kAccessFree) && !(typ1 & kAccessFree))
return;
+ if (SpuriousRace(old))
+ return;
const uptr kMop = 2;
Shadow s[kMop] = {cur, old};
@@ -761,8 +738,6 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
uptr addr_max = max(end0, end1);
if (IsExpectedReport(addr_min, addr_max - addr_min))
return;
- if (HandleRacyAddress(thr, addr_min, addr_max))
- return;
ReportType rep_typ = ReportTypeRace;
if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
@@ -791,9 +766,13 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
ThreadRegistryLock l0(&ctx->thread_registry);
Lock slots_lock(&ctx->slot_mtx);
+ if (SpuriousRace(old))
+ return;
if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
- size1, typ1, &tids[1], &traces[1], mset[1], &tags[1]))
+ size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
+ StoreShadow(&ctx->last_spurious_race, old.raw());
return;
+ }
if (IsFiredSuppression(ctx, rep_typ, traces[1]))
return;
diff --git a/libsanitizer/tsan/tsan_rtl_thread.cpp b/libsanitizer/tsan/tsan_rtl_thread.cpp
index 86c8b3764cc..77488f84328 100644
--- a/libsanitizer/tsan/tsan_rtl_thread.cpp
+++ b/libsanitizer/tsan/tsan_rtl_thread.cpp
@@ -53,7 +53,7 @@ static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
// Disabled on Mac because lldb test TestTsanBasic fails:
// https://reviews.llvm.org/D112603#3163158
-#if !SANITIZER_GO && !SANITIZER_MAC
+#if !SANITIZER_GO && !SANITIZER_APPLE
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == kMainTid) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
diff --git a/libsanitizer/tsan/tsan_shadow.h b/libsanitizer/tsan/tsan_shadow.h
index b222acf9e6c..6b8114ef513 100644
--- a/libsanitizer/tsan/tsan_shadow.h
+++ b/libsanitizer/tsan/tsan_shadow.h
@@ -178,6 +178,16 @@ class Shadow {
static_assert(sizeof(Shadow) == kShadowSize, "bad Shadow size");
+ALWAYS_INLINE RawShadow LoadShadow(RawShadow *p) {
+ return static_cast<RawShadow>(
+ atomic_load((atomic_uint32_t *)p, memory_order_relaxed));
+}
+
+ALWAYS_INLINE void StoreShadow(RawShadow *sp, RawShadow s) {
+ atomic_store((atomic_uint32_t *)sp, static_cast<u32>(s),
+ memory_order_relaxed);
+}
+
} // namespace __tsan
#endif