diff options
Diffstat (limited to 'libsanitizer/lsan/lsan_common.cc')
-rw-r--r-- | libsanitizer/lsan/lsan_common.cc | 48 |
1 files changed, 38 insertions, 10 deletions
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc index 4afce9df071..a4424a8871a 100644 --- a/libsanitizer/lsan/lsan_common.cc +++ b/libsanitizer/lsan/lsan_common.cc @@ -13,14 +13,15 @@ #include "lsan_common.h" #include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_suppressions.h" -#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #if CAN_SANITIZE_LEAKS @@ -102,7 +103,7 @@ InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; } void InitializeRootRegions() { CHECK(!root_regions); ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)]; - root_regions = new(placeholder) InternalMmapVector<RootRegion>(1); + root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT } const char *MaybeCallLsanDefaultOptions() { @@ -212,9 +213,10 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { - InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount()); + InternalMmapVector<uptr> registers(suspended_threads.RegisterCount()); uptr registers_begin = reinterpret_cast<uptr>(registers.data()); - uptr registers_end = registers_begin + registers.size(); + uptr registers_end = + reinterpret_cast<uptr>(registers.data() + registers.size()); for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); LOG_THREADS("Processing thread %d.\n", os_id); @@ -409,8 +411,9 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) { } } -// On Linux, handles dynamically allocated TLS blocks by treating all chunks -// allocated from ld-linux.so as reachable. +// On Linux, treats all chunks allocated from ld-linux.so as reachable, which +// covers dynamically allocated TLS blocks, internal dynamic loader's loaded +// modules accounting etc. // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. // They are allocated with a __libc_memalign() call in allocate_and_init() // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those @@ -441,7 +444,7 @@ void ProcessPC(Frontier *frontier) { // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. - Frontier frontier(1); + Frontier frontier; ForEachChunk(CollectIgnoredCb, &frontier); ProcessGlobalRegions(&frontier); @@ -503,7 +506,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) { } static void PrintMatchedSuppressions() { - InternalMmapVector<Suppression *> matched(1); + InternalMmapVector<Suppression *> matched; GetSuppressionContext()->GetMatched(&matched); if (!matched.size()) return; @@ -522,11 +525,36 @@ struct CheckForLeaksParam { LeakReport leak_report; }; +static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { + const InternalMmapVector<tid_t> &suspended_threads = + *(const InternalMmapVector<tid_t> *)arg; + if (tctx->status == ThreadStatusRunning) { + uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), + tctx->os_id, CompareLess<int>()); + if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) + Report("Running thread %d was not suspended. False leaks are possible.\n", + tctx->os_id); + }; +} + +static void ReportUnsuspendedThreads( + const SuspendedThreadsList &suspended_threads) { + InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); + for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) + threads[i] = suspended_threads.GetThreadID(i); + + Sort(threads.data(), threads.size()); + + GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( + &ReportIfNotSuspended, &threads); +} + static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, void *arg) { CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); CHECK(param); CHECK(!param->success); + ReportUnsuspendedThreads(suspended_threads); ClassifyAllChunks(suspended_threads); ForEachChunk(CollectLeaksCb, ¶m->leak_report); // Clean up for subsequent leak checks. This assumes we did not overwrite any @@ -681,7 +709,7 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { uptr unsuppressed_count = UnsuppressedLeakCount(); if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) Printf("The %zu top leak(s):\n", num_leaks_to_report); - InternalSort(&leaks_, leaks_.size(), LeakComparator); + Sort(leaks_.data(), leaks_.size(), &LeakComparator); uptr leaks_reported = 0; for (uptr i = 0; i < leaks_.size(); i++) { if (leaks_[i].is_suppressed) continue; |