summaryrefslogtreecommitdiff
path: root/libsanitizer/sanitizer_common
diff options
context:
space:
mode:
authorkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-23 11:41:33 +0000
committerkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-23 11:41:33 +0000
commit4a2c1ffc3bc03c1f519fe1ef62cafeda13481fe2 (patch)
tree80989bd161e60d01560788cb7427eb644b227884 /libsanitizer/sanitizer_common
parentbc5663df31f641cce031d61b31540dd88a473cb5 (diff)
downloadgcc-4a2c1ffc3bc03c1f519fe1ef62cafeda13481fe2.tar.gz
libsanitizer merge from upstream r173241
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@195404 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer/sanitizer_common')
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.h358
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_clang.h2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.cc38
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.h77
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc222
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc144
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_internal_defs.h7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_lfstack.h71
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cc68
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_list.h4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cc20
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mutex.h17
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h21
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_quarantine.h170
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.cc190
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.h11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_win.cc38
18 files changed, 1086 insertions, 374 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h
index d0fc315b97e..1c9852e5003 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -17,6 +17,7 @@
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
+#include "sanitizer_lfstack.h"
namespace __sanitizer {
@@ -62,7 +63,8 @@ namespace __sanitizer {
// c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32
-template <uptr kMaxSizeLog, uptr kMaxNumCached, uptr kMaxBytesCachedLog>
+template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog,
+ uptr kMinBatchClassT>
class SizeClassMap {
static const uptr kMinSizeLog = 3;
static const uptr kMidSizeLog = kMinSizeLog + 4;
@@ -73,6 +75,14 @@ class SizeClassMap {
static const uptr M = (1 << S) - 1;
public:
+ static const uptr kMaxNumCached = kMaxNumCachedT;
+ struct TransferBatch {
+ TransferBatch *next;
+ uptr count;
+ void *batch[kMaxNumCached];
+ };
+
+ static const uptr kMinBatchClass = kMinBatchClassT;
static const uptr kMaxSize = 1 << kMaxSizeLog;
static const uptr kNumClasses =
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
@@ -148,44 +158,25 @@ class SizeClassMap {
if (c > 0)
CHECK_LT(Size(c-1), s);
}
- }
-};
-
-typedef SizeClassMap<15, 256, 16> DefaultSizeClassMap;
-typedef SizeClassMap<15, 64, 14> CompactSizeClassMap;
-
-struct AllocatorListNode {
- AllocatorListNode *next;
+ // TransferBatch for kMinBatchClass must fit into the block itself.
+ const uptr batch_size = sizeof(TransferBatch)
+ - sizeof(void*) // NOLINT
+ * (kMaxNumCached - MaxCached(kMinBatchClass));
+ CHECK_LE(batch_size, Size(kMinBatchClass));
+ // TransferBatch for kMinBatchClass-1 must not fit into the block itself.
+ const uptr batch_size1 = sizeof(TransferBatch)
+ - sizeof(void*) // NOLINT
+ * (kMaxNumCached - MaxCached(kMinBatchClass - 1));
+ CHECK_GT(batch_size1, Size(kMinBatchClass - 1));
+ }
};
-typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
-
-// Move at most max_count chunks from allocate_from to allocate_to.
-// This function is better be a method of AllocatorFreeList, but we can't
-// inherit it from IntrusiveList as the ancient gcc complains about non-PODness.
-static inline uptr BulkMove(uptr max_count,
- AllocatorFreeList *allocate_from,
- AllocatorFreeList *allocate_to) {
- CHECK(!allocate_from->empty());
- CHECK(allocate_to->empty());
- uptr res = 0;
- if (allocate_from->size() <= max_count) {
- res = allocate_from->size();
- allocate_to->append_front(allocate_from);
- CHECK(allocate_from->empty());
- } else {
- for (uptr i = 0; i < max_count; i++) {
- AllocatorListNode *node = allocate_from->front();
- allocate_from->pop_front();
- allocate_to->push_front(node);
- }
- res = max_count;
- CHECK(!allocate_from->empty());
- }
- CHECK(!allocate_to->empty());
- return res;
-}
+typedef SizeClassMap<17, 256, 16, FIRST_32_SECOND_64(33, 36)>
+ DefaultSizeClassMap;
+typedef SizeClassMap<17, 64, 14, FIRST_32_SECOND_64(25, 28)>
+ CompactSizeClassMap;
+template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
@@ -214,6 +205,11 @@ template <const uptr kSpaceBeg, const uptr kSpaceSize,
class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator64 {
public:
+ typedef typename SizeClassMap::TransferBatch Batch;
+ typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
+
void Init() {
CHECK_EQ(kSpaceBeg,
reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
@@ -235,36 +231,20 @@ class SizeClassAllocator64 {
alignment <= SizeClassMap::kMaxSize;
}
- void *Allocate(uptr size, uptr alignment) {
- if (size < alignment) size = alignment;
- CHECK(CanAllocate(size, alignment));
- return AllocateBySizeClass(ClassID(size));
- }
-
- void Deallocate(void *p) {
- CHECK(PointerIsMine(p));
- DeallocateBySizeClass(p, GetSizeClass(p));
- }
-
- // Allocate several chunks of the given class_id.
- void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
+ Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- if (region->free_list.empty()) {
- PopulateFreeList(class_id, region);
- }
- region->n_allocated += BulkMove(SizeClassMap::MaxCached(class_id),
- &region->free_list, free_list);
+ Batch *b = region->free_list.Pop();
+ if (b == 0)
+ b = PopulateFreeList(c, class_id, region);
+ region->n_allocated += b->count;
+ return b;
}
- // Swallow the entire free_list for the given class_id.
- void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
- CHECK_LT(class_id, kNumClasses);
+ void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- region->n_freed += free_list->size();
- region->free_list.append_front(free_list);
+ region->free_list.Push(b);
+ region->n_freed += b->count;
}
static bool PointerIsMine(void *p) {
@@ -352,15 +332,15 @@ class SizeClassAllocator64 {
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
// Populate the free list with at most this number of bytes at once
// or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 15;
+ static const uptr kPopulateSize = 1 << 14;
// Call mmap for user memory with at least this size.
static const uptr kUserMapSize = 1 << 15;
// Call mmap for metadata memory with at least this size.
static const uptr kMetaMapSize = 1 << 16;
struct RegionInfo {
- SpinMutex mutex;
- AllocatorFreeList free_list;
+ BlockingMutex mutex;
+ LFStack<Batch> free_list;
uptr allocated_user; // Bytes allocated for user memory.
uptr allocated_meta; // Bytes allocated for metadata.
uptr mapped_user; // Bytes mapped for user memory.
@@ -388,11 +368,16 @@ class SizeClassAllocator64 {
return offset / (u32)size;
}
- void PopulateFreeList(uptr class_id, RegionInfo *region) {
- CHECK(region->free_list.empty());
+ Batch *NOINLINE PopulateFreeList(AllocatorCache *c, uptr class_id,
+ RegionInfo *region) {
+ BlockingMutexLock l(&region->mutex);
+ Batch *b = region->free_list.Pop();
+ if (b)
+ return b;
uptr size = SizeClassMap::Size(class_id);
+ uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + kPopulateSize;
+ uptr end_idx = beg_idx + count * size;
uptr region_beg = kSpaceBeg + kRegionSize * class_id;
if (end_idx + size > region->mapped_user) {
// Do the mmap for the user memory.
@@ -403,17 +388,9 @@ class SizeClassAllocator64 {
MapWithCallback(region_beg + region->mapped_user, map_size);
region->mapped_user += map_size;
}
- uptr idx = beg_idx;
- uptr i = 0;
- do { // do-while loop because we need to put at least one item.
- uptr p = region_beg + idx;
- region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
- idx += size;
- i++;
- } while (idx < end_idx);
- region->allocated_user += idx - beg_idx;
- CHECK_LE(region->allocated_user, region->mapped_user);
- region->allocated_meta += i * kMetadataSize;
+ uptr total_count = (region->mapped_user - beg_idx - size)
+ / size / count * count;
+ region->allocated_meta += total_count * kMetadataSize;
if (region->allocated_meta > region->mapped_meta) {
uptr map_size = kMetaMapSize;
while (region->allocated_meta > region->mapped_meta + map_size)
@@ -431,27 +408,22 @@ class SizeClassAllocator64 {
kRegionSize / 1024 / 1024, size);
Die();
}
- }
-
- void *AllocateBySizeClass(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- if (region->free_list.empty()) {
- PopulateFreeList(class_id, region);
+ for (;;) {
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)(region_beg + beg_idx);
+ b->count = count;
+ for (uptr i = 0; i < count; i++)
+ b->batch[i] = (void*)(region_beg + beg_idx + i * size);
+ region->allocated_user += count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+ beg_idx += count * size;
+ if (beg_idx + count * size + size > region->mapped_user)
+ break;
+ region->free_list.Push(b);
}
- CHECK(!region->free_list.empty());
- AllocatorListNode *node = region->free_list.front();
- region->free_list.pop_front();
- region->n_allocated++;
- return reinterpret_cast<void*>(node);
- }
-
- void DeallocateBySizeClass(void *p, uptr class_id) {
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(&region->mutex);
- region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
- region->n_freed++;
+ return b;
}
};
@@ -480,6 +452,11 @@ template <const uptr kSpaceBeg, const u64 kSpaceSize,
class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator32 {
public:
+ typedef typename SizeClassMap::TransferBatch Batch;
+ typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
+
void Init() {
state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
}
@@ -500,17 +477,6 @@ class SizeClassAllocator32 {
alignment <= SizeClassMap::kMaxSize;
}
- void *Allocate(uptr size, uptr alignment) {
- if (size < alignment) size = alignment;
- CHECK(CanAllocate(size, alignment));
- return AllocateBySizeClass(ClassID(size));
- }
-
- void Deallocate(void *p) {
- CHECK(PointerIsMine(p));
- DeallocateBySizeClass(p, GetSizeClass(p));
- }
-
void *GetMetaData(void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
@@ -522,20 +488,23 @@ class SizeClassAllocator32 {
return reinterpret_cast<void*>(meta);
}
- // Allocate several chunks of the given class_id.
- void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
+ Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
- EnsureSizeClassHasAvailableChunks(sci, class_id);
+ if (sci->free_list.empty())
+ PopulateFreeList(c, sci, class_id);
CHECK(!sci->free_list.empty());
- BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list, free_list);
+ Batch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
}
- // Swallow the entire free_list for the given class_id.
- void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
+ void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
+ CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
- sci->free_list.append_front(free_list);
+ sci->free_list.push_front(b);
}
bool PointerIsMine(void *p) {
@@ -593,8 +562,8 @@ class SizeClassAllocator32 {
struct SizeClassInfo {
SpinMutex mutex;
- AllocatorFreeList free_list;
- char padding[kCacheLineSize - sizeof(uptr) - sizeof(AllocatorFreeList)];
+ IntrusiveList<Batch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
};
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
@@ -624,31 +593,28 @@ class SizeClassAllocator32 {
return &state_->size_class_info_array[class_id];
}
- void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) {
- if (!sci->free_list.empty()) return;
+ void PopulateFreeList(AllocatorCache *c, SizeClassInfo *sci, uptr class_id) {
uptr size = SizeClassMap::Size(class_id);
uptr reg = AllocateRegion(class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize);
- for (uptr i = reg; i < reg + n_chunks * size; i += size)
- sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i));
- }
-
- void *AllocateBySizeClass(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- EnsureSizeClassHasAvailableChunks(sci, class_id);
- CHECK(!sci->free_list.empty());
- AllocatorListNode *node = sci->free_list.front();
- sci->free_list.pop_front();
- return reinterpret_cast<void*>(node);
- }
-
- void DeallocateBySizeClass(void *p, uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ uptr max_count = SizeClassMap::MaxCached(class_id);
+ Batch *b = 0;
+ for (uptr i = reg; i < reg + n_chunks * size; i += size) {
+ if (b == 0) {
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)i;
+ b->count = 0;
+ }
+ b->batch[b->count++] = (void*)i;
+ if (b->count == max_count) {
+ sci->free_list.push_back(b);
+ b = 0;
+ }
+ }
+ if (b)
+ sci->free_list.push_back(b);
}
struct State {
@@ -658,13 +624,14 @@ class SizeClassAllocator32 {
State *state_;
};
-// Objects of this type should be used as local caches for SizeClassAllocator64.
-// Since the typical use of this class is to have one object per thread in TLS,
-// is has to be POD.
+// Objects of this type should be used as local caches for SizeClassAllocator64
+// or SizeClassAllocator32. Since the typical use of this class is to have one
+// object per thread in TLS, is has to be POD.
template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache {
typedef SizeClassAllocator Allocator;
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+
// Don't need to call Init if the object is a global (i.e. zero-initialized).
void Init() {
internal_memset(this, 0, sizeof(*this));
@@ -673,46 +640,77 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- AllocatorFreeList *free_list = &free_lists_[class_id];
- if (free_list->empty())
- allocator->BulkAllocate(class_id, free_list);
- CHECK(!free_list->empty());
- void *res = free_list->front();
- free_list->pop_front();
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(allocator, class_id);
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
return res;
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- AllocatorFreeList *free_list = &free_lists_[class_id];
- free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
- if (free_list->size() >= 2 * SizeClassMap::MaxCached(class_id))
- DrainHalf(allocator, class_id);
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(allocator, class_id);
+ c->batch[c->count++] = p;
}
void Drain(SizeClassAllocator *allocator) {
- for (uptr i = 0; i < kNumClasses; i++) {
- allocator->BulkDeallocate(i, &free_lists_[i]);
- CHECK(free_lists_[i].empty());
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(allocator, class_id);
}
}
// private:
typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
- AllocatorFreeList free_lists_[kNumClasses];
-
- void DrainHalf(SizeClassAllocator *allocator, uptr class_id) {
- AllocatorFreeList *free_list = &free_lists_[class_id];
- AllocatorFreeList half;
- half.clear();
- const uptr count = free_list->size() / 2;
- for (uptr i = 0; i < count; i++) {
- AllocatorListNode *node = free_list->front();
- free_list->pop_front();
- half.push_front(node);
+ typedef typename SizeClassMap::TransferBatch Batch;
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ void *batch[2 * SizeClassMap::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+
+ void InitCache() {
+ if (per_class_[0].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * SizeClassMap::MaxCached(i);
+ }
+ }
+
+ void NOINLINE Refill(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ Batch *b = allocator->AllocateBatch(this, class_id);
+ for (uptr i = 0; i < b->count; i++)
+ c->batch[i] = b->batch[i];
+ c->count = b->count;
+ if (class_id < SizeClassMap::kMinBatchClass)
+ Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
+ }
+
+ void NOINLINE Drain(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ Batch *b;
+ if (class_id < SizeClassMap::kMinBatchClass)
+ b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
+ else
+ b = (Batch*)c->batch[0];
+ uptr cnt = Min(c->max_count / 2, c->count);
+ for (uptr i = 0; i < cnt; i++) {
+ b->batch[i] = c->batch[i];
+ c->batch[i] = c->batch[i + c->max_count / 2];
}
- allocator->BulkDeallocate(class_id, &half);
+ b->count = cnt;
+ c->count -= cnt;
+ allocator->DeallocateBatch(class_id, b);
}
};
@@ -726,6 +724,7 @@ class LargeMmapAllocator {
internal_memset(this, 0, sizeof(*this));
page_size_ = GetPageSizeCached();
}
+
void *Allocate(uptr size, uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
uptr map_size = RoundUpMapSize(size);
@@ -745,6 +744,8 @@ class LargeMmapAllocator {
h->size = size;
h->map_beg = map_beg;
h->map_size = map_size;
+ uptr size_log = SANITIZER_WORDSIZE - __builtin_clzl(map_size) - 1;
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
{
SpinMutexLock l(&mutex_);
uptr idx = n_chunks_++;
@@ -754,6 +755,7 @@ class LargeMmapAllocator {
stats.n_allocs++;
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
}
return reinterpret_cast<void*>(res);
}
@@ -825,9 +827,15 @@ class LargeMmapAllocator {
void PrintStats() {
Printf("Stats: LargeMmapAllocator: allocated %zd times, "
- "remains %zd (%zd K) max %zd M\n",
+ "remains %zd (%zd K) max %zd M; by size logs: ",
stats.n_allocs, stats.n_allocs - stats.n_frees,
stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
}
private:
@@ -858,7 +866,7 @@ class LargeMmapAllocator {
Header *chunks_[kMaxNumChunks];
uptr n_chunks_;
struct Stats {
- uptr n_allocs, n_frees, currently_allocated, max_allocated;
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
SpinMutex mutex_;
};
@@ -888,14 +896,10 @@ class CombinedAllocator {
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
- if (primary_.CanAllocate(size, alignment)) {
- if (cache) // Allocate from cache.
- res = cache->Allocate(&primary_, primary_.ClassID(size));
- else // No thread-local cache, allocate directly from primary allocator.
- res = primary_.Allocate(size, alignment);
- } else { // Secondary allocator does not use cache.
+ if (primary_.CanAllocate(size, alignment))
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
res = secondary_.Allocate(size, alignment);
- }
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
if (cleared && res)
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
index 68e79f6a2f1..bb4611d51e6 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_clang.h
@@ -39,6 +39,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
@@ -54,6 +55,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
index 55e00e2204c..919e24f3b11 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
@@ -70,6 +70,7 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
@@ -85,6 +86,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cc b/libsanitizer/sanitizer_common/sanitizer_common.cc
index 96e8808f6d1..9b70ee0eb54 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common.cc
@@ -21,10 +21,16 @@ uptr GetPageSizeCached() {
return PageSize;
}
-// By default, dump to stderr. If report_fd is kInvalidFd, try to obtain file
-// descriptor by opening file in report_path.
+static bool log_to_file = false; // Set to true by __sanitizer_set_report_path
+
+// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
+// isn't equal to the current PID, try to obtain file descriptor by opening
+// file "report_path_prefix.<PID>".
static fd_t report_fd = kStderrFd;
-static char report_path[4096]; // Set via __sanitizer_set_report_path.
+static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path.
+// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
+// child thread will be different from |report_fd_pid|.
+static int report_fd_pid = 0;
static void (*DieCallback)(void);
void SetDieCallback(void (*callback)(void)) {
@@ -48,21 +54,29 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
if (CheckFailedCallback) {
CheckFailedCallback(file, line, cond, v1, v2);
}
- Report("Sanitizer CHECK failed: %s:%d %s (%zd, %zd)\n", file, line, cond,
- v1, v2);
+ Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
+ v1, v2);
Die();
}
static void MaybeOpenReportFile() {
- if (report_fd != kInvalidFd)
- return;
- fd_t fd = internal_open(report_path, true);
+ if (!log_to_file || (report_fd_pid == GetPid())) return;
+ InternalScopedBuffer<char> report_path_full(4096);
+ internal_snprintf(report_path_full.data(), report_path_full.size(),
+ "%s.%d", report_path_prefix, GetPid());
+ fd_t fd = internal_open(report_path_full.data(), true);
if (fd == kInvalidFd) {
report_fd = kStderrFd;
- Report("ERROR: Can't open file: %s\n", report_path);
+ log_to_file = false;
+ Report("ERROR: Can't open file: %s\n", report_path_full.data());
Die();
}
+ if (report_fd != kInvalidFd) {
+ // We're in the child. Close the parent's log.
+ internal_close(report_fd);
+ }
report_fd = fd;
+ report_fd_pid = GetPid();
}
bool PrintsToTty() {
@@ -182,14 +196,16 @@ extern "C" {
void __sanitizer_set_report_path(const char *path) {
if (!path) return;
uptr len = internal_strlen(path);
- if (len > sizeof(report_path) - 100) {
+ if (len > sizeof(report_path_prefix) - 100) {
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
path[0], path[1], path[2], path[3],
path[4], path[5], path[6], path[7]);
Die();
}
- internal_snprintf(report_path, sizeof(report_path), "%s.%d", path, GetPid());
+ internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
+ report_path_prefix[len] = '\0';
report_fd = kInvalidFd;
+ log_to_file = true;
}
void __sanitizer_set_report_fd(int fd) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.h
deleted file mode 100644
index 97c6b6f7beb..00000000000
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.h
+++ /dev/null
@@ -1,77 +0,0 @@
-//===-- sanitizer_common_interceptors.h -------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Common function interceptors for tools like AddressSanitizer,
-// ThreadSanitizer, MemorySanitizer, etc.
-//
-// This file should be included into the tool's interceptor file,
-// which has to define it's own macros:
-// COMMON_INTERCEPTOR_ENTER
-// COMMON_INTERCEPTOR_READ_RANGE
-// COMMON_INTERCEPTOR_WRITE_RANGE
-//
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_COMMON_INTERCEPTORS_H
-#define SANITIZER_COMMON_INTERCEPTORS_H
-
-#include "interception/interception.h"
-#include "sanitizer_platform_interceptors.h"
-
-#if SANITIZER_INTERCEPT_READ
-INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
- COMMON_INTERCEPTOR_ENTER(read, fd, ptr, count);
- SSIZE_T res = REAL(read)(fd, ptr, count);
- if (res > 0)
- COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
- return res;
-}
-#endif
-
-#if SANITIZER_INTERCEPT_PREAD
-INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
- COMMON_INTERCEPTOR_ENTER(pread, fd, ptr, count, offset);
- SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
- if (res > 0)
- COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
- return res;
-}
-#endif
-
-#if SANITIZER_INTERCEPT_PREAD64
-INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
- COMMON_INTERCEPTOR_ENTER(pread64, fd, ptr, count, offset);
- SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
- if (res > 0)
- COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
- return res;
-}
-#endif
-
-#if SANITIZER_INTERCEPT_READ
-# define INIT_READ INTERCEPT_FUNCTION(read)
-#else
-# define INIT_READ
-#endif
-
-#if SANITIZER_INTERCEPT_PREAD
-# define INIT_PREAD INTERCEPT_FUNCTION(pread)
-#else
-# define INIT_PREAD
-#endif
-
-#if SANITIZER_INTERCEPT_PREAD64
-# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
-#else
-# define INIT_PREAD64
-#endif
-
-#define SANITIZER_COMMON_INTERCEPTORS_INIT \
- INIT_READ; \
- INIT_PREAD; \
- INIT_PREAD64; \
-
-#endif // SANITIZER_COMMON_INTERCEPTORS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
new file mode 100644
index 00000000000..724a326ef4c
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -0,0 +1,222 @@
+//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_FD_ACQUIRE
+// COMMON_INTERCEPTOR_FD_RELEASE
+// COMMON_INTERCEPTOR_SET_THREAD_NAME
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+#include "sanitizer_platform_interceptors.h"
+
+#include <stdarg.h>
+
+#if SANITIZER_INTERCEPT_READ
+INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
+ SSIZE_T res = REAL(read)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+# define INIT_READ INTERCEPT_FUNCTION(read)
+#else
+# define INIT_READ
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD
+INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+# define INIT_PREAD INTERCEPT_FUNCTION(pread)
+#else
+# define INIT_PREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD64
+INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
+#else
+# define INIT_PREAD64
+#endif
+
+#if SANITIZER_INTERCEPT_WRITE
+INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(write)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+# define INIT_WRITE INTERCEPT_FUNCTION(write)
+#else
+# define INIT_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE
+INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+# define INIT_PWRITE INTERCEPT_FUNCTION(pwrite)
+#else
+# define INIT_PWRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE64
+INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite64)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+# define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64)
+#else
+# define INIT_PWRITE64
+#endif
+
+#if SANITIZER_INTERCEPT_PRCTL
+INTERCEPTOR(int, prctl, int option,
+ unsigned long arg2, unsigned long arg3, // NOLINT
+ unsigned long arg4, unsigned long arg5) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
+ static const int PR_SET_NAME = 15;
+ int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ if (option == PR_SET_NAME) {
+ char buff[16];
+ internal_strncpy(buff, (char*)arg2, 15);
+ buff[15] = 0;
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
+ }
+ return res;
+}
+# define INIT_PRCTL INTERCEPT_FUNCTION(prctl)
+#else
+# define INIT_PRCTL
+#endif // SANITIZER_INTERCEPT_PRCTL
+
+
+#if SANITIZER_INTERCEPT_SCANF
+
+#include "sanitizer_common_interceptors_scanf.inc"
+
+INTERCEPTOR(int, vscanf, const char *format, va_list ap) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vscanf, format, ap);
+ scanf_common(ctx, format, ap);
+ int res = REAL(vscanf)(format, ap); // NOLINT
+ return res;
+}
+
+INTERCEPTOR(int, vsscanf, const char *str, const char *format, // NOLINT
+ va_list ap) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vsscanf, str, format, ap);
+ scanf_common(ctx, format, ap);
+ int res = REAL(vsscanf)(str, format, ap); // NOLINT
+ // FIXME: read of str
+ return res;
+}
+
+INTERCEPTOR(int, vfscanf, void *stream, const char *format, // NOLINT
+ va_list ap) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vfscanf, stream, format, ap);
+ scanf_common(ctx, format, ap);
+ int res = REAL(vfscanf)(stream, format, ap); // NOLINT
+ return res;
+}
+
+INTERCEPTOR(int, scanf, const char *format, ...) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, scanf, format);
+ va_list ap;
+ va_start(ap, format);
+ int res = vscanf(format, ap); // NOLINT
+ va_end(ap);
+ return res;
+}
+
+INTERCEPTOR(int, fscanf, void* stream, const char *format, ...) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fscanf, stream, format);
+ va_list ap;
+ va_start(ap, format);
+ int res = vfscanf(stream, format, ap); // NOLINT
+ va_end(ap);
+ return res;
+}
+
+INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) { // NOLINT
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sscanf, str, format); // NOLINT
+ va_list ap;
+ va_start(ap, format);
+ int res = vsscanf(str, format, ap); // NOLINT
+ va_end(ap);
+ return res;
+}
+
+#define INIT_SCANF \
+ INTERCEPT_FUNCTION(scanf); \
+ INTERCEPT_FUNCTION(sscanf); /* NOLINT */ \
+ INTERCEPT_FUNCTION(fscanf); \
+ INTERCEPT_FUNCTION(vscanf); \
+ INTERCEPT_FUNCTION(vsscanf); \
+ INTERCEPT_FUNCTION(vfscanf)
+
+#else
+#define INIT_SCANF
+#endif
+
+#define SANITIZER_COMMON_INTERCEPTORS_INIT \
+ INIT_READ; \
+ INIT_PREAD; \
+ INIT_PREAD64; \
+ INIT_PRCTL; \
+ INIT_WRITE; \
+ INIT_SCANF;
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
new file mode 100644
index 00000000000..f7cab5f0dbb
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_scanf.inc
@@ -0,0 +1,144 @@
+//===-- sanitizer_common_interceptors_scanf.inc -----------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf implementation for use in *Sanitizer interceptors.
+//
+//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
+#ifdef _WIN32
+#define va_copy(dst, src) ((dst) = (src))
+#endif // _WIN32
+
+struct ScanfSpec {
+ char c;
+ unsigned size;
+};
+
+// One-letter specs.
+static const ScanfSpec scanf_specs[] = {
+ {'p', sizeof(void *)},
+ {'e', sizeof(float)},
+ {'E', sizeof(float)},
+ {'a', sizeof(float)},
+ {'f', sizeof(float)},
+ {'g', sizeof(float)},
+ {'d', sizeof(int)},
+ {'i', sizeof(int)},
+ {'o', sizeof(int)},
+ {'u', sizeof(int)},
+ {'x', sizeof(int)},
+ {'X', sizeof(int)},
+ {'n', sizeof(int)},
+ {'t', sizeof(PTRDIFF_T)},
+ {'z', sizeof(SIZE_T)},
+ {'j', sizeof(INTMAX_T)},
+ {'h', sizeof(short)}
+};
+
+static const unsigned scanf_specs_cnt =
+ sizeof(scanf_specs) / sizeof(scanf_specs[0]);
+
+// %ll?, %L?, %q? specs
+static const ScanfSpec scanf_llspecs[] = {
+ {'e', sizeof(long double)},
+ {'f', sizeof(long double)},
+ {'g', sizeof(long double)},
+ {'d', sizeof(long long)},
+ {'i', sizeof(long long)},
+ {'o', sizeof(long long)},
+ {'u', sizeof(long long)},
+ {'x', sizeof(long long)}
+};
+
+static const unsigned scanf_llspecs_cnt =
+ sizeof(scanf_llspecs) / sizeof(scanf_llspecs[0]);
+
+// %l? specs
+static const ScanfSpec scanf_lspecs[] = {
+ {'e', sizeof(double)},
+ {'f', sizeof(double)},
+ {'g', sizeof(double)},
+ {'d', sizeof(long)},
+ {'i', sizeof(long)},
+ {'o', sizeof(long)},
+ {'u', sizeof(long)},
+ {'x', sizeof(long)},
+ {'X', sizeof(long)},
+};
+
+static const unsigned scanf_lspecs_cnt =
+ sizeof(scanf_lspecs) / sizeof(scanf_lspecs[0]);
+
+static unsigned match_spec(const struct ScanfSpec *spec, unsigned n, char c) {
+ for (unsigned i = 0; i < n; ++i)
+ if (spec[i].c == c)
+ return spec[i].size;
+ return 0;
+}
+
+static void scanf_common(void *ctx, const char *format, va_list ap_const) {
+ va_list aq;
+ va_copy(aq, ap_const);
+
+ const char *p = format;
+ unsigned size;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ ++p;
+ if (*p == '*' || *p == '%' || *p == 0) {
+ ++p;
+ continue;
+ }
+ if (*p == '0' || (*p >= '1' && *p <= '9')) {
+ size = internal_atoll(p);
+ // +1 for the \0 at the end
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size + 1);
+ ++p;
+ continue;
+ }
+
+ if (*p == 'L' || *p == 'q') {
+ ++p;
+ size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ continue;
+ }
+
+ if (*p == 'l') {
+ ++p;
+ if (*p == 'l') {
+ ++p;
+ size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ continue;
+ } else {
+ size = match_spec(scanf_lspecs, scanf_lspecs_cnt, *p);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ continue;
+ }
+ }
+
+ if (*p == 'h' && *(p + 1) == 'h') {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), sizeof(char));
+ p += 2;
+ continue;
+ }
+
+ size = match_spec(scanf_specs, scanf_specs_cnt, *p);
+ if (size) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size);
+ ++p;
+ continue;
+ }
+ }
+ va_end(aq);
+}
diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
index a6795c6720b..01f08f57801 100644
--- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
+++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
@@ -36,6 +36,7 @@ using namespace __sanitizer; // NOLINT
# define UNLIKELY(x) (x)
# define UNUSED
# define USED
+# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
# define ALWAYS_INLINE __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
@@ -49,6 +50,12 @@ using namespace __sanitizer; // NOLINT
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
# define UNUSED __attribute__((unused))
# define USED __attribute__((used))
+# if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(x) generates prefetchnt0 on x86
+# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
+# else
+# define PREFETCH(x) __builtin_prefetch(x)
+# endif
#endif // _MSC_VER
#if defined(_WIN32)
diff --git a/libsanitizer/sanitizer_common/sanitizer_lfstack.h b/libsanitizer/sanitizer_common/sanitizer_lfstack.h
new file mode 100644
index 00000000000..63fbf066943
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_lfstack.h
@@ -0,0 +1,71 @@
+//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Lock-free stack.
+// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
+// The memory passed to Push() must not be ever munmap'ed.
+// The type T must contain T *next field.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LFSTACK_H
+#define SANITIZER_LFSTACK_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+template<typename T>
+struct LFStack {
+ void Clear() {
+ atomic_store(&head_, 0, memory_order_relaxed);
+ }
+
+ bool Empty() const {
+ return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
+ }
+
+ void Push(T *p) {
+ u64 cmp = atomic_load(&head_, memory_order_relaxed);
+ for (;;) {
+ u64 cnt = (cmp & kCounterMask) + kCounterInc;
+ u64 xch = (u64)(uptr)p | cnt;
+ p->next = (T*)(uptr)(cmp & kPtrMask);
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_release))
+ break;
+ }
+ }
+
+ T *Pop() {
+ u64 cmp = atomic_load(&head_, memory_order_acquire);
+ for (;;) {
+ T *cur = (T*)(uptr)(cmp & kPtrMask);
+ if (cur == 0)
+ return 0;
+ T *nxt = cur->next;
+ u64 cnt = (cmp & kCounterMask);
+ u64 xch = (u64)(uptr)nxt | cnt;
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_acquire))
+ return cur;
+ }
+ }
+
+ // private:
+ static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
+ static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
+ static const u64 kCounterMask = ~kPtrMask;
+ static const u64 kCounterInc = kPtrMask + 1;
+
+ atomic_uint64_t head_;
+};
+}
+
+#endif // #ifndef SANITIZER_LFSTACK_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc
index 1d0bf02192c..dc2148f7fc4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -32,6 +32,7 @@
#include <unwind.h>
#include <errno.h>
#include <sys/prctl.h>
+#include <linux/futex.h>
// Are we using 32-bit or 64-bit syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
@@ -198,24 +199,31 @@ const char *GetEnv(const char *name) {
return 0; // Not found.
}
-void ReExec() {
- static const int kMaxArgv = 100;
- InternalScopedBuffer<char*> argv(kMaxArgv + 1);
- static char *buff;
+static void ReadNullSepFileToArray(const char *path, char ***arr,
+ int arr_size) {
+ char *buff;
uptr buff_size = 0;
- ReadFileToBuffer("/proc/self/cmdline", &buff, &buff_size, 1024 * 1024);
- argv[0] = buff;
- int argc, i;
- for (argc = 1, i = 1; ; i++) {
+ *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
+ ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024);
+ (*arr)[0] = buff;
+ int count, i;
+ for (count = 1, i = 1; ; i++) {
if (buff[i] == 0) {
if (buff[i+1] == 0) break;
- argv[argc] = &buff[i+1];
- CHECK_LE(argc, kMaxArgv); // FIXME: make this more flexible.
- argc++;
+ (*arr)[count] = &buff[i+1];
+ CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
+ count++;
}
}
- argv[argc] = 0;
- execv(argv[0], argv.data());
+ (*arr)[count] = 0;
+}
+
+void ReExec() {
+ static const int kMaxArgv = 100, kMaxEnvp = 1000;
+ char **argv, **envp;
+ ReadNullSepFileToArray("/proc/self/cmdline", &argv, kMaxArgv);
+ ReadNullSepFileToArray("/proc/self/environ", &envp, kMaxEnvp);
+ execve(argv[0], argv, envp);
}
void PrepareForSandboxing() {
@@ -366,16 +374,24 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
}
bool SanitizerSetThreadName(const char *name) {
+#ifdef PR_SET_NAME
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
+#else
+ return false;
+#endif
}
bool SanitizerGetThreadName(char *name, int max_len) {
+#ifdef PR_GET_NAME
char buff[17];
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
return false;
internal_strncpy(name, buff, max_len);
name[max_len] = 0;
return true;
+#else
+ return false;
+#endif
}
#ifndef SANITIZER_GO
@@ -434,6 +450,32 @@ void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
#endif // #ifndef SANITIZER_GO
+enum MutexState {
+ MtxUnlocked = 0,
+ MtxLocked = 1,
+ MtxSleeping = 2
+};
+
+BlockingMutex::BlockingMutex(LinkerInitialized) {
+ CHECK_EQ(owner_, 0);
+}
+
+void BlockingMutex::Lock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked)
+ syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping)
+ syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0);
+}
+
} // namespace __sanitizer
#endif // __linux__
diff --git a/libsanitizer/sanitizer_common/sanitizer_list.h b/libsanitizer/sanitizer_common/sanitizer_list.h
index 3df12f550a0..9692e01b8e0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_list.h
+++ b/libsanitizer/sanitizer_common/sanitizer_list.h
@@ -70,6 +70,8 @@ struct IntrusiveList {
void append_front(IntrusiveList<Item> *l) {
CHECK_NE(this, l);
+ if (l->empty())
+ return;
if (empty()) {
*this = *l;
} else if (!l->empty()) {
@@ -82,6 +84,8 @@ struct IntrusiveList {
void append_back(IntrusiveList<Item> *l) {
CHECK_NE(this, l);
+ if (l->empty())
+ return;
if (empty()) {
*this = *l;
} else {
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc
index 0f64b306afb..76bf8670870 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -28,6 +28,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
+#include <libkern/OSAtomic.h>
namespace __sanitizer {
@@ -265,6 +266,25 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
}
+BlockingMutex::BlockingMutex(LinkerInitialized) {
+ // We assume that OS_SPINLOCK_INIT is zero
+}
+
+void BlockingMutex::Lock() {
+ CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
+ CHECK(OS_SPINLOCK_INIT == 0);
+ CHECK(owner_ != (uptr)pthread_self());
+ OSSpinLockLock((OSSpinLock*)&opaque_storage_);
+ CHECK(!owner_);
+ owner_ = (uptr)pthread_self();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK(owner_ == (uptr)pthread_self());
+ owner_ = 0;
+ OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
+}
+
} // namespace __sanitizer
#endif // __APPLE__
diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.h b/libsanitizer/sanitizer_common/sanitizer_mutex.h
index a38a49ae242..27009118e62 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mutex.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mutex.h
@@ -25,11 +25,15 @@ class StaticSpinMutex {
}
void Lock() {
- if (atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ if (TryLock())
return;
LockSlow();
}
+ bool TryLock() {
+ return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
+ }
+
void Unlock() {
atomic_store(&state_, 0, memory_order_release);
}
@@ -61,6 +65,16 @@ class SpinMutex : public StaticSpinMutex {
void operator=(const SpinMutex&);
};
+class BlockingMutex {
+ public:
+ explicit BlockingMutex(LinkerInitialized);
+ void Lock();
+ void Unlock();
+ private:
+ uptr opaque_storage_[10];
+ uptr owner_; // for debugging
+};
+
template<typename MutexType>
class GenericScopedLock {
public:
@@ -100,6 +114,7 @@ class GenericScopedReadLock {
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
+typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
index e32206cb6d4..0ca9444fcb8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -13,15 +13,24 @@
#include "sanitizer_internal_defs.h"
#if !defined(_WIN32)
-# define SANITIZER_INTERCEPT_READ 1
-# define SANITIZER_INTERCEPT_PREAD 1
+# define SI_NOT_WINDOWS 1
#else
-# define SANITIZER_INTERCEPT_READ 0
-# define SANITIZER_INTERCEPT_PREAD 0
+# define SI_NOT_WINDOWS 0
#endif
#if defined(__linux__) && !defined(ANDROID)
-# define SANITIZER_INTERCEPT_PREAD64 1
+# define SI_LINUX_NOT_ANDROID 1
#else
-# define SANITIZER_INTERCEPT_PREAD64 0
+# define SI_LINUX_NOT_ANDROID 0
#endif
+
+# define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+
+# define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
+# define SANITIZER_INTERCEPT_PRCTL SI_LINUX_NOT_ANDROID
+
+# define SANITIZER_INTERCEPT_SCANF 0
diff --git a/libsanitizer/sanitizer_common/sanitizer_quarantine.h b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
new file mode 100644
index 00000000000..042fba7c1da
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
@@ -0,0 +1,170 @@
+//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Memory quarantine for AddressSanitizer and potentially other tools.
+// Quarantine caches some specified amount of memory in per-thread caches,
+// then evicts to global FIFO queue. When the queue reaches specified threshold,
+// oldest memory is recycled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_QUARANTINE_H
+#define SANITIZER_QUARANTINE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_list.h"
+
+namespace __sanitizer {
+
+template<typename Node> class QuarantineCache;
+
+struct QuarantineBatch {
+ static const uptr kSize = 1024;
+ QuarantineBatch *next;
+ uptr size;
+ uptr count;
+ void *batch[kSize];
+};
+
+// The callback interface is:
+// void Callback::Recycle(Node *ptr);
+// void *cb.Allocate(uptr size);
+// void cb.Deallocate(void *ptr);
+template<typename Callback, typename Node>
+class Quarantine {
+ public:
+ typedef QuarantineCache<Callback> Cache;
+
+ explicit Quarantine(LinkerInitialized)
+ : cache_(LINKER_INITIALIZED) {
+ }
+
+ void Init(uptr size, uptr cache_size) {
+ max_size_ = size;
+ min_size_ = size / 10 * 9; // 90% of max size.
+ max_cache_size_ = cache_size;
+ }
+
+ void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
+ c->Enqueue(cb, ptr, size);
+ if (c->Size() > max_cache_size_)
+ Drain(c, cb);
+ }
+
+ void NOINLINE Drain(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
+ Recycle(cb);
+ }
+
+ private:
+ // Read-only data.
+ char pad0_[kCacheLineSize];
+ uptr max_size_;
+ uptr min_size_;
+ uptr max_cache_size_;
+ char pad1_[kCacheLineSize];
+ SpinMutex cache_mutex_;
+ SpinMutex recycle_mutex_;
+ Cache cache_;
+ char pad2_[kCacheLineSize];
+
+ void NOINLINE Recycle(Callback cb) {
+ Cache tmp;
+ {
+ SpinMutexLock l(&cache_mutex_);
+ while (cache_.Size() > min_size_) {
+ QuarantineBatch *b = cache_.DequeueBatch();
+ tmp.EnqueueBatch(b);
+ }
+ }
+ recycle_mutex_.Unlock();
+ DoRecycle(&tmp, cb);
+ }
+
+ void NOINLINE DoRecycle(Cache *c, Callback cb) {
+ while (QuarantineBatch *b = c->DequeueBatch()) {
+ const uptr kPrefetch = 16;
+ for (uptr i = 0; i < kPrefetch; i++)
+ PREFETCH(b->batch[i]);
+ for (uptr i = 0; i < b->count; i++) {
+ PREFETCH(b->batch[i + kPrefetch]);
+ cb.Recycle((Node*)b->batch[i]);
+ }
+ cb.Deallocate(b);
+ }
+ }
+};
+
+// Per-thread cache of memory blocks.
+template<typename Callback>
+class QuarantineCache {
+ public:
+ explicit QuarantineCache(LinkerInitialized) {
+ }
+
+ QuarantineCache()
+ : size_() {
+ list_.clear();
+ }
+
+ uptr Size() const {
+ return atomic_load(&size_, memory_order_relaxed);
+ }
+
+ void Enqueue(Callback cb, void *ptr, uptr size) {
+ if (list_.empty() || list_.back()->count == QuarantineBatch::kSize)
+ AllocBatch(cb);
+ QuarantineBatch *b = list_.back();
+ b->batch[b->count++] = ptr;
+ b->size += size;
+ SizeAdd(size);
+ }
+
+ void Transfer(QuarantineCache *c) {
+ list_.append_back(&c->list_);
+ SizeAdd(c->Size());
+ atomic_store(&c->size_, 0, memory_order_relaxed);
+ }
+
+ void EnqueueBatch(QuarantineBatch *b) {
+ list_.push_back(b);
+ SizeAdd(b->size);
+ }
+
+ QuarantineBatch *DequeueBatch() {
+ if (list_.empty())
+ return 0;
+ QuarantineBatch *b = list_.front();
+ list_.pop_front();
+ SizeAdd(-b->size);
+ return b;
+ }
+
+ private:
+ IntrusiveList<QuarantineBatch> list_;
+ atomic_uintptr_t size_;
+
+ void SizeAdd(uptr add) {
+ atomic_store(&size_, Size() + add, memory_order_relaxed);
+ }
+
+ QuarantineBatch *NOINLINE AllocBatch(Callback cb) {
+ QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ b->count = 0;
+ b->size = 0;
+ list_.push_back(b);
+ return b;
+ }
+};
+}
+
+#endif // #ifndef SANITIZER_QUARANTINE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
index 11393e44503..f62acf35f8f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
@@ -66,7 +66,18 @@ static const char *ExtractInt(const char *str, const char *delims,
char *buff;
const char *ret = ExtractToken(str, delims, &buff);
if (buff != 0) {
- *result = internal_atoll(buff);
+ *result = (int)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+static const char *ExtractUptr(const char *str, const char *delims,
+ uptr *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (uptr)internal_atoll(buff);
}
InternalFree(buff);
return ret;
@@ -96,66 +107,15 @@ class ExternalSymbolizer {
CHECK_NE(output_fd_, kInvalidFd);
}
- // Returns the number of frames for a given address, or zero if
- // symbolization failed.
- uptr SymbolizeCode(uptr addr, const char *module_name, uptr module_offset,
- AddressInfo *frames, uptr max_frames) {
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
CHECK(module_name);
- // FIXME: Make sure this buffer always has sufficient size to hold
- // large debug info.
- static const int kMaxBufferSize = 4096;
- InternalScopedBuffer<char> buffer(kMaxBufferSize);
- char *buffer_data = buffer.data();
- internal_snprintf(buffer_data, kMaxBufferSize, "%s 0x%zx\n",
- module_name, module_offset);
- if (!writeToSymbolizer(buffer_data, internal_strlen(buffer_data)))
+ internal_snprintf(buffer_, kBufferSize, "%s%s 0x%zx\n",
+ is_data ? "DATA " : "", module_name, module_offset);
+ if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
return 0;
-
- if (!readFromSymbolizer(buffer_data, kMaxBufferSize))
+ if (!readFromSymbolizer(buffer_, kBufferSize))
return 0;
- const char *str = buffer_data;
- uptr frame_id;
- CHECK_GT(max_frames, 0);
- for (frame_id = 0; frame_id < max_frames; frame_id++) {
- AddressInfo *info = &frames[frame_id];
- char *function_name = 0;
- str = ExtractToken(str, "\n", &function_name);
- CHECK(function_name);
- if (function_name[0] == '\0') {
- // There are no more frames.
- break;
- }
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- info->function = function_name;
- // Parse <file>:<line>:<column> buffer.
- char *file_line_info = 0;
- str = ExtractToken(str, "\n", &file_line_info);
- CHECK(file_line_info);
- const char *line_info = ExtractToken(file_line_info, ":", &info->file);
- line_info = ExtractInt(line_info, ":", &info->line);
- line_info = ExtractInt(line_info, "", &info->column);
- InternalFree(file_line_info);
-
- // Functions and filenames can be "??", in which case we write 0
- // to address info to mark that names are unknown.
- if (0 == internal_strcmp(info->function, "??")) {
- InternalFree(info->function);
- info->function = 0;
- }
- if (0 == internal_strcmp(info->file, "??")) {
- InternalFree(info->file);
- info->file = 0;
- }
- }
- if (frame_id == 0) {
- // Make sure we return at least one frame.
- AddressInfo *info = &frames[0];
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- frame_id = 1;
- }
- return frame_id;
+ return buffer_;
}
bool Restart() {
@@ -189,6 +149,7 @@ class ExternalSymbolizer {
}
return true;
}
+
bool writeToSymbolizer(const char *buffer, uptr length) {
if (length == 0)
return true;
@@ -204,6 +165,9 @@ class ExternalSymbolizer {
int input_fd_;
int output_fd_;
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+
static const uptr kMaxTimesRestarted = 5;
uptr times_restarted_;
};
@@ -220,30 +184,8 @@ class Symbolizer {
return 0;
const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address();
- uptr actual_frames = 0;
- if (external_symbolizer_ == 0) {
- ReportExternalSymbolizerError(
- "WARNING: Trying to symbolize code, but external "
- "symbolizer is not initialized!\n");
- } else {
- while (true) {
- actual_frames = external_symbolizer_->SymbolizeCode(
- addr, module_name, module_offset, frames, max_frames);
- if (actual_frames > 0) {
- // Symbolization was successful.
- break;
- }
- // Try to restart symbolizer subprocess. If we don't succeed, forget
- // about it and don't try to use it later.
- if (!external_symbolizer_->Restart()) {
- ReportExternalSymbolizerError(
- "WARNING: Failed to use and restart external symbolizer!\n");
- external_symbolizer_ = 0;
- break;
- }
- }
- }
- if (external_symbolizer_ == 0) {
+ const char *str = SendCommand(false, module_name, module_offset);
+ if (str == 0) {
// External symbolizer was not initialized or failed. Fill only data
// about module name and offset.
AddressInfo *info = &frames[0];
@@ -251,17 +193,66 @@ class Symbolizer {
info->FillAddressAndModuleInfo(addr, module_name, module_offset);
return 1;
}
- // Otherwise, the data was filled by external symbolizer.
- return actual_frames;
+ uptr frame_id = 0;
+ for (frame_id = 0; frame_id < max_frames; frame_id++) {
+ AddressInfo *info = &frames[frame_id];
+ char *function_name = 0;
+ str = ExtractToken(str, "\n", &function_name);
+ CHECK(function_name);
+ if (function_name[0] == '\0') {
+ // There are no more frames.
+ break;
+ }
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ info->function = function_name;
+ // Parse <file>:<line>:<column> buffer.
+ char *file_line_info = 0;
+ str = ExtractToken(str, "\n", &file_line_info);
+ CHECK(file_line_info);
+ const char *line_info = ExtractToken(file_line_info, ":", &info->file);
+ line_info = ExtractInt(line_info, ":", &info->line);
+ line_info = ExtractInt(line_info, "", &info->column);
+ InternalFree(file_line_info);
+
+ // Functions and filenames can be "??", in which case we write 0
+ // to address info to mark that names are unknown.
+ if (0 == internal_strcmp(info->function, "??")) {
+ InternalFree(info->function);
+ info->function = 0;
+ }
+ if (0 == internal_strcmp(info->file, "??")) {
+ InternalFree(info->file);
+ info->file = 0;
+ }
+ }
+ if (frame_id == 0) {
+ // Make sure we return at least one frame.
+ AddressInfo *info = &frames[0];
+ info->Clear();
+ info->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ frame_id = 1;
+ }
+ return frame_id;
}
- bool SymbolizeData(uptr addr, AddressInfo *frame) {
+ bool SymbolizeData(uptr addr, DataInfo *info) {
LoadedModule *module = FindModuleForAddress(addr);
if (module == 0)
return false;
const char *module_name = module->full_name();
uptr module_offset = addr - module->base_address();
- frame->FillAddressAndModuleInfo(addr, module_name, module_offset);
+ internal_memset(info, 0, sizeof(*info));
+ info->address = addr;
+ info->module = internal_strdup(module_name);
+ info->module_offset = module_offset;
+ const char *str = SendCommand(true, module_name, module_offset);
+ if (str == 0)
+ return true;
+ str = ExtractToken(str, "\n", &info->name);
+ str = ExtractUptr(str, " ", &info->start);
+ str = ExtractUptr(str, "\n", &info->size);
+ info->start += module->base_address();
return true;
}
@@ -276,6 +267,29 @@ class Symbolizer {
}
private:
+ char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
+ if (external_symbolizer_ == 0) {
+ ReportExternalSymbolizerError(
+ "WARNING: Trying to symbolize code, but external "
+ "symbolizer is not initialized!\n");
+ return 0;
+ }
+ for (;;) {
+ char *reply = external_symbolizer_->SendCommand(is_data, module_name,
+ module_offset);
+ if (reply)
+ return reply;
+ // Try to restart symbolizer subprocess. If we don't succeed, forget
+ // about it and don't try to use it later.
+ if (!external_symbolizer_->Restart()) {
+ ReportExternalSymbolizerError(
+ "WARNING: Failed to use and restart external symbolizer!\n");
+ external_symbolizer_ = 0;
+ return 0;
+ }
+ }
+ }
+
LoadedModule *FindModuleForAddress(uptr address) {
if (modules_ == 0) {
modules_ = (LoadedModule*)(symbolizer_allocator.Allocate(
@@ -316,8 +330,8 @@ uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) {
return symbolizer.SymbolizeCode(address, frames, max_frames);
}
-bool SymbolizeData(uptr address, AddressInfo *frame) {
- return symbolizer.SymbolizeData(address, frame);
+bool SymbolizeData(uptr address, DataInfo *info) {
+ return symbolizer.SymbolizeData(address, info);
}
bool InitializeExternalSymbolizer(const char *path_to_symbolizer) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
index 0714b3824fb..b88fa3f655a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -51,12 +51,21 @@ struct AddressInfo {
}
};
+struct DataInfo {
+ uptr address;
+ char *module;
+ uptr module_offset;
+ char *name;
+ uptr start;
+ uptr size;
+};
+
// Fills at most "max_frames" elements of "frames" with descriptions
// for a given address (in all inlined functions). Returns the number
// of descriptions actually filled.
// This function should NOT be called from two threads simultaneously.
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
-bool SymbolizeData(uptr address, AddressInfo *frame);
+bool SymbolizeData(uptr address, DataInfo *info);
// Attempts to demangle the provided C++ mangled name.
const char *Demangle(const char *Name);
diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_win.cc
index f7300a18b60..242b4429bd7 100644
--- a/libsanitizer/sanitizer_common/sanitizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_win.cc
@@ -18,6 +18,8 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
@@ -224,6 +226,42 @@ int internal_sched_yield() {
return 0;
}
+// ---------------------- BlockingMutex ---------------- {{{1
+enum LockState {
+ LOCK_UNINITIALIZED = 0,
+ LOCK_READY = -1,
+};
+
+BlockingMutex::BlockingMutex(LinkerInitialized li) {
+ // FIXME: see comments in BlockingMutex::Lock() for the details.
+ CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
+
+ CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
+ InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ owner_ = LOCK_READY;
+}
+
+void BlockingMutex::Lock() {
+ if (owner_ == LOCK_UNINITIALIZED) {
+ // FIXME: hm, global BlockingMutex objects are not initialized?!?
+ // This might be a side effect of the clang+cl+link Frankenbuild...
+ new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
+
+ // FIXME: If it turns out the linker doesn't invoke our
+ // constructors, we should probably manually Lock/Unlock all the global
+ // locks while we're starting in one thread to avoid double-init races.
+ }
+ EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+ CHECK(owner_ == LOCK_READY);
+ owner_ = GetThreadSelf();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK(owner_ == GetThreadSelf());
+ owner_ = LOCK_READY;
+ LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
+}
+
} // namespace __sanitizer
#endif // _WIN32