summaryrefslogtreecommitdiff
path: root/libsanitizer/sanitizer_common
diff options
context:
space:
mode:
authorkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-10 12:44:08 +0000
committerkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-01-10 12:44:08 +0000
commitf5ed54288a2a1d1f8d99490f2529fc36b3d2c150 (patch)
tree23cebf7ab15836f70e055aee309f853c0c377de6 /libsanitizer/sanitizer_common
parentefc3a86d56685d9e49ef92d2bfb175c1e67f0476 (diff)
downloadgcc-f5ed54288a2a1d1f8d99490f2529fc36b3d2c150.tar.gz
libsanitizer mege from upstream r171973
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@195083 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer/sanitizer_common')
-rw-r--r--libsanitizer/sanitizer_common/Makefile.am1
-rw-r--r--libsanitizer/sanitizer_common/Makefile.in8
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.h656
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h26
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.cc26
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h18
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.h77
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.cc19
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.h5
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cc80
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cc4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h27
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.cc19
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_printf.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_report_decorator.h35
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.cc10
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stackdepot.h7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cc9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.h2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc40
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_win.cc13
23 files changed, 950 insertions, 141 deletions
diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am
index 708b2a4dc28..cc23dc2425c 100644
--- a/libsanitizer/sanitizer_common/Makefile.am
+++ b/libsanitizer/sanitizer_common/Makefile.am
@@ -22,6 +22,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_symbolizer.cc \
+ sanitizer_symbolizer_itanium.cc \
sanitizer_symbolizer_linux.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_symbolizer_win.cc \
diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in
index d52f42d2610..77b1f1e215d 100644
--- a/libsanitizer/sanitizer_common/Makefile.in
+++ b/libsanitizer/sanitizer_common/Makefile.in
@@ -59,9 +59,9 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_flags.lo sanitizer_libc.lo sanitizer_linux.lo \
sanitizer_mac.lo sanitizer_posix.lo sanitizer_printf.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
- sanitizer_symbolizer.lo sanitizer_symbolizer_linux.lo \
- sanitizer_symbolizer_mac.lo sanitizer_symbolizer_win.lo \
- sanitizer_win.lo
+ sanitizer_symbolizer.lo sanitizer_symbolizer_itanium.lo \
+ sanitizer_symbolizer_linux.lo sanitizer_symbolizer_mac.lo \
+ sanitizer_symbolizer_win.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@
@@ -236,6 +236,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \
sanitizer_symbolizer.cc \
+ sanitizer_symbolizer_itanium.cc \
sanitizer_symbolizer_linux.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_symbolizer_win.cc \
@@ -345,6 +346,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_itanium.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h
index 63107bdbdb0..d0fc315b97e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -20,76 +20,140 @@
namespace __sanitizer {
-// Maps size class id to size and back.
-template <uptr l0, uptr l1, uptr l2, uptr l3, uptr l4, uptr l5,
- uptr s0, uptr s1, uptr s2, uptr s3, uptr s4,
- uptr c0, uptr c1, uptr c2, uptr c3, uptr c4>
-class SplineSizeClassMap {
- private:
- // Here we use a spline composed of 5 polynomials of oder 1.
- // The first size class is l0, then the classes go with step s0
- // untill they reach l1, after which they go with step s1 and so on.
- // Steps should be powers of two for cheap division.
- // The size of the last size class should be a power of two.
- // There should be at most 256 size classes.
- static const uptr u0 = 0 + (l1 - l0) / s0;
- static const uptr u1 = u0 + (l2 - l1) / s1;
- static const uptr u2 = u1 + (l3 - l2) / s2;
- static const uptr u3 = u2 + (l4 - l3) / s3;
- static const uptr u4 = u3 + (l5 - l4) / s4;
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 corresponds to size 0.
+// Classes 1 - 16 correspond to sizes 8 - 128 (size = class_id * 8).
+// Next 8 classes: 128 + i * 16 (i = 1 to 8).
+// Next 8 classes: 256 + i * 32 (i = 1 to 8).
+// ...
+// Next 8 classes: 2^k + i * 2^(k-3) (i = 1 to 8).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is betweed 12% and 6%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCached is the maximal number of chunks per size class.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 8 diff: +8 00% l 3 cached: 256 2048; id 1
+// c02 => s: 16 diff: +8 100% l 4 cached: 256 4096; id 2
+// ...
+// c07 => s: 56 diff: +8 16% l 5 cached: 256 14336; id 7
+//
+// c08 => s: 64 diff: +8 14% l 6 cached: 256 16384; id 8
+// ...
+// c15 => s: 120 diff: +8 07% l 6 cached: 256 30720; id 15
+//
+// c16 => s: 128 diff: +8 06% l 7 cached: 256 32768; id 16
+// c17 => s: 144 diff: +16 12% l 7 cached: 227 32688; id 17
+// ...
+// c23 => s: 240 diff: +16 07% l 7 cached: 136 32640; id 23
+//
+// c24 => s: 256 diff: +16 06% l 8 cached: 128 32768; id 24
+// c25 => s: 288 diff: +32 12% l 8 cached: 113 32544; id 25
+// ...
+// c31 => s: 480 diff: +32 07% l 8 cached: 68 32640; id 31
+//
+// c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32
- public:
- // The number of size classes should be a power of two for fast division.
- static const uptr kNumClasses = u4 + 1;
- static const uptr kMaxSize = l5;
- static const uptr kMinSize = l0;
- COMPILER_CHECK(kNumClasses <= 256);
- COMPILER_CHECK((kNumClasses & (kNumClasses - 1)) == 0);
- COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
+template <uptr kMaxSizeLog, uptr kMaxNumCached, uptr kMaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr kMinSizeLog = 3;
+ static const uptr kMidSizeLog = kMinSizeLog + 4;
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = 3;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ static const uptr kMaxSize = 1 << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses == 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
static uptr Size(uptr class_id) {
- if (class_id <= u0) return l0 + s0 * (class_id - 0);
- if (class_id <= u1) return l1 + s1 * (class_id - u0);
- if (class_id <= u2) return l2 + s2 * (class_id - u1);
- if (class_id <= u3) return l3 + s3 * (class_id - u2);
- if (class_id <= u4) return l4 + s4 * (class_id - u3);
- return 0;
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
}
+
static uptr ClassID(uptr size) {
- if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
- if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
- if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
- if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
- if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
- return 0;
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ if (size > kMaxSize) return 0;
+ uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
+ uptr hbits = (size >> (l - S)) & M;
+ uptr lbits = size & ((1 << (l - S)) - 1);
+ uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
}
static uptr MaxCached(uptr class_id) {
- if (class_id <= u0) return c0;
- if (class_id <= u1) return c1;
- if (class_id <= u2) return c2;
- if (class_id <= u3) return c3;
- if (class_id <= u4) return c4;
- return 0;
+ if (class_id == 0) return 0;
+ uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
+ return Max(1UL, Min(kMaxNumCached, n));
}
-};
-class DefaultSizeClassMap: public SplineSizeClassMap<
- /* l: */1 << 4, 1 << 9, 1 << 12, 1 << 15, 1 << 18, 1 << 21,
- /* s: */1 << 4, 1 << 6, 1 << 9, 1 << 12, 1 << 15,
- /* c: */256, 64, 16, 4, 1> {
- private:
- COMPILER_CHECK(kNumClasses == 256);
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(s);
+ uptr cached = MaxCached(i) * s;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_EQ(ClassID(s), c);
+ if (c != kNumClasses - 1)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(Size(c), Size(c-1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c-1), s);
+ }
+ }
};
-class CompactSizeClassMap: public SplineSizeClassMap<
- /* l: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12, 1 << 15,
- /* s: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12,
- /* c: */256, 64, 16, 4, 1> {
- private:
- COMPILER_CHECK(kNumClasses <= 32);
-};
+typedef SizeClassMap<15, 256, 16> DefaultSizeClassMap;
+typedef SizeClassMap<15, 64, 14> CompactSizeClassMap;
+
struct AllocatorListNode {
AllocatorListNode *next;
@@ -97,11 +161,45 @@ struct AllocatorListNode {
typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
+// Move at most max_count chunks from allocate_from to allocate_to.
+// This function is better be a method of AllocatorFreeList, but we can't
+// inherit it from IntrusiveList as the ancient gcc complains about non-PODness.
+static inline uptr BulkMove(uptr max_count,
+ AllocatorFreeList *allocate_from,
+ AllocatorFreeList *allocate_to) {
+ CHECK(!allocate_from->empty());
+ CHECK(allocate_to->empty());
+ uptr res = 0;
+ if (allocate_from->size() <= max_count) {
+ res = allocate_from->size();
+ allocate_to->append_front(allocate_from);
+ CHECK(allocate_from->empty());
+ } else {
+ for (uptr i = 0; i < max_count; i++) {
+ AllocatorListNode *node = allocate_from->front();
+ allocate_from->pop_front();
+ allocate_to->push_front(node);
+ }
+ res = max_count;
+ CHECK(!allocate_from->empty());
+ }
+ CHECK(!allocate_to->empty());
+ return res;
+}
+
+// Allocators call these callbacks on mmap/munmap.
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const { }
+};
+
// SizeClassAllocator64 -- allocator for 64-bit address space.
//
// Space: a portion of address space of kSpaceSize bytes starting at
// a fixed address (kSpaceBeg). Both constants are powers of two and
// kSpaceBeg is kSpaceSize-aligned.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
//
// Region: a part of Space dedicated to a single size class.
// There are kNumClasses Regions of equal size.
@@ -112,22 +210,35 @@ typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
// A Region looks like this:
// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
template <const uptr kSpaceBeg, const uptr kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap>
+ const uptr kMetadataSize, class SizeClassMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator64 {
public:
void Init() {
- CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
- AllocBeg(), AllocSize())));
+ CHECK_EQ(kSpaceBeg,
+ reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
+ MapWithCallback(kSpaceEnd, AdditionalSize());
}
- bool CanAllocate(uptr size, uptr alignment) {
+ void MapWithCallback(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
return size <= SizeClassMap::kMaxSize &&
alignment <= SizeClassMap::kMaxSize;
}
void *Allocate(uptr size, uptr alignment) {
+ if (size < alignment) size = alignment;
CHECK(CanAllocate(size, alignment));
- return AllocateBySizeClass(SizeClassMap::ClassID(size));
+ return AllocateBySizeClass(ClassID(size));
}
void Deallocate(void *p) {
@@ -143,18 +254,8 @@ class SizeClassAllocator64 {
if (region->free_list.empty()) {
PopulateFreeList(class_id, region);
}
- CHECK(!region->free_list.empty());
- uptr count = SizeClassMap::MaxCached(class_id);
- if (region->free_list.size() <= count) {
- free_list->append_front(&region->free_list);
- } else {
- for (uptr i = 0; i < count; i++) {
- AllocatorListNode *node = region->free_list.front();
- region->free_list.pop_front();
- free_list->push_front(node);
- }
- }
- CHECK(!free_list->empty());
+ region->n_allocated += BulkMove(SizeClassMap::MaxCached(class_id),
+ &region->free_list, free_list);
}
// Swallow the entire free_list for the given class_id.
@@ -162,6 +263,7 @@ class SizeClassAllocator64 {
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id);
SpinMutexLock l(&region->mutex);
+ region->n_freed += free_list->size();
region->free_list.append_front(free_list);
}
@@ -170,16 +272,20 @@ class SizeClassAllocator64 {
}
static uptr GetSizeClass(void *p) {
- return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
+ return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
}
- static void *GetBlockBegin(void *p) {
+ void *GetBlockBegin(void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
- uptr begin = reg_beg + chunk_idx * size;
- return (void*)begin;
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return 0;
}
static uptr GetActuallyAllocatedSize(void *p) {
@@ -206,39 +312,66 @@ class SizeClassAllocator64 {
// Test-only.
void TestOnlyUnmap() {
- UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
+ UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
+ }
+
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) continue;
+ Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
+ class_id,
+ SizeClassMap::Size(class_id),
+ region->mapped_user >> 10,
+ region->n_allocated,
+ region->n_allocated - region->n_freed);
+ }
}
- static uptr AllocBeg() { return kSpaceBeg; }
- static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
-
typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
private:
- static const uptr kRegionSize = kSpaceSize / kNumClasses;
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
- COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
// kRegionSize must be >= 2^32.
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
// Populate the free list with at most this number of bytes at once
// or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 18;
+ static const uptr kPopulateSize = 1 << 15;
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 15;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
struct RegionInfo {
SpinMutex mutex;
AllocatorFreeList free_list;
uptr allocated_user; // Bytes allocated for user memory.
uptr allocated_meta; // Bytes allocated for metadata.
- char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ uptr n_allocated, n_freed; // Just stats.
};
- COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
+ COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
static uptr AdditionalSize() {
- uptr PageSize = GetPageSizeCached();
- uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
- CHECK_EQ(res % PageSize, 0);
- return res;
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
}
RegionInfo *GetRegionInfo(uptr class_id) {
@@ -256,11 +389,20 @@ class SizeClassAllocator64 {
}
void PopulateFreeList(uptr class_id, RegionInfo *region) {
+ CHECK(region->free_list.empty());
uptr size = SizeClassMap::Size(class_id);
uptr beg_idx = region->allocated_user;
uptr end_idx = beg_idx + kPopulateSize;
- region->free_list.clear();
uptr region_beg = kSpaceBeg + kRegionSize * class_id;
+ if (end_idx + size > region->mapped_user) {
+ // Do the mmap for the user memory.
+ uptr map_size = kUserMapSize;
+ while (end_idx + size > region->mapped_user + map_size)
+ map_size += kUserMapSize;
+ CHECK_GE(region->mapped_user + map_size, end_idx);
+ MapWithCallback(region_beg + region->mapped_user, map_size);
+ region->mapped_user += map_size;
+ }
uptr idx = beg_idx;
uptr i = 0;
do { // do-while loop because we need to put at least one item.
@@ -270,7 +412,19 @@ class SizeClassAllocator64 {
i++;
} while (idx < end_idx);
region->allocated_user += idx - beg_idx;
+ CHECK_LE(region->allocated_user, region->mapped_user);
region->allocated_meta += i * kMetadataSize;
+ if (region->allocated_meta > region->mapped_meta) {
+ uptr map_size = kMetaMapSize;
+ while (region->allocated_meta > region->mapped_meta + map_size)
+ map_size += kMetaMapSize;
+ // Do the mmap for the metadata.
+ CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
+ MapWithCallback(region_beg + kRegionSize -
+ region->mapped_meta - map_size, map_size);
+ region->mapped_meta += map_size;
+ }
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
if (region->allocated_user + region->allocated_meta > kRegionSize) {
Printf("Out of memory. Dying.\n");
Printf("The process has exhausted %zuMB for size class %zu.\n",
@@ -289,6 +443,7 @@ class SizeClassAllocator64 {
CHECK(!region->free_list.empty());
AllocatorListNode *node = region->free_list.front();
region->free_list.pop_front();
+ region->n_allocated++;
return reinterpret_cast<void*>(node);
}
@@ -296,7 +451,211 @@ class SizeClassAllocator64 {
RegionInfo *region = GetRegionInfo(class_id);
SpinMutexLock l(&region->mutex);
region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ region->n_freed++;
+ }
+};
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator32 {
+ public:
+ void Init() {
+ state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
+ }
+
+ void *MapWithCallback(uptr size) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = MmapOrDie(size, "SizeClassAllocator32");
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *Allocate(uptr size, uptr alignment) {
+ if (size < alignment) size = alignment;
+ CHECK(CanAllocate(size, alignment));
+ return AllocateBySizeClass(ClassID(size));
+ }
+
+ void Deallocate(void *p) {
+ CHECK(PointerIsMine(p));
+ DeallocateBySizeClass(p, GetSizeClass(p));
+ }
+
+ void *GetMetaData(void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = SizeClassMap::Size(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ // Allocate several chunks of the given class_id.
+ void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ EnsureSizeClassHasAvailableChunks(sci, class_id);
+ CHECK(!sci->free_list.empty());
+ BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list, free_list);
+ }
+
+ // Swallow the entire free_list for the given class_id.
+ void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ sci->free_list.append_front(free_list);
+ }
+
+ bool PointerIsMine(void *p) {
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(void *p) {
+ return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = SizeClassMap::Size(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (state_->possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (state_->possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
+ }
+
+ void PrintStats() {
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ AllocatorFreeList free_list;
+ char padding[kCacheLineSize - sizeof(uptr) - sizeof(AllocatorFreeList)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
+ state_->possible_regions[ComputeRegionId(res)] = class_id;
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &state_->size_class_info_array[class_id];
+ }
+
+ void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) {
+ if (!sci->free_list.empty()) return;
+ uptr size = SizeClassMap::Size(class_id);
+ uptr reg = AllocateRegion(class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ for (uptr i = reg; i < reg + n_chunks * size; i += size)
+ sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i));
+ }
+
+ void *AllocateBySizeClass(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ EnsureSizeClassHasAvailableChunks(sci, class_id);
+ CHECK(!sci->free_list.empty());
+ AllocatorListNode *node = sci->free_list.front();
+ sci->free_list.pop_front();
+ return reinterpret_cast<void*>(node);
}
+
+ void DeallocateBySizeClass(void *p, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ }
+
+ struct State {
+ u8 possible_regions[kNumPossibleRegions];
+ SizeClassInfo size_class_info_array[kNumClasses];
+ };
+ State *state_;
};
// Objects of this type should be used as local caches for SizeClassAllocator64.
@@ -312,6 +671,7 @@ struct SizeClassAllocatorLocalCache {
}
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
AllocatorFreeList *free_list = &free_lists_[class_id];
if (free_list->empty())
@@ -323,6 +683,7 @@ struct SizeClassAllocatorLocalCache {
}
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
AllocatorFreeList *free_list = &free_lists_[class_id];
free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
@@ -358,6 +719,7 @@ struct SizeClassAllocatorLocalCache {
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator {
public:
void Init() {
@@ -372,6 +734,7 @@ class LargeMmapAllocator {
if (map_size < size) return 0; // Overflow.
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
+ MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
@@ -384,11 +747,13 @@ class LargeMmapAllocator {
h->map_size = map_size;
{
SpinMutexLock l(&mutex_);
- h->next = list_;
- h->prev = 0;
- if (list_)
- list_->prev = h;
- list_ = h;
+ uptr idx = n_chunks_++;
+ CHECK_LT(idx, kMaxNumChunks);
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
}
return reinterpret_cast<void*>(res);
}
@@ -397,63 +762,81 @@ class LargeMmapAllocator {
Header *h = GetHeader(p);
{
SpinMutexLock l(&mutex_);
- Header *prev = h->prev;
- Header *next = h->next;
- if (prev)
- prev->next = next;
- if (next)
- next->prev = prev;
- if (h == list_)
- list_ = next;
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx]->chunk_idx = idx;
+ n_chunks_--;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
}
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
}
uptr TotalMemoryUsed() {
SpinMutexLock l(&mutex_);
uptr res = 0;
- for (Header *l = list_; l; l = l->next) {
- res += RoundUpMapSize(l->size);
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
}
return res;
}
bool PointerIsMine(void *p) {
- // Fast check.
- if ((reinterpret_cast<uptr>(p) & (page_size_ - 1))) return false;
- SpinMutexLock l(&mutex_);
- for (Header *l = list_; l; l = l->next) {
- if (GetUser(l) == p) return true;
- }
- return false;
+ return GetBlockBegin(p) != 0;
}
uptr GetActuallyAllocatedSize(void *p) {
- return RoundUpMapSize(GetHeader(p)->size) - page_size_;
+ return RoundUpTo(GetHeader(p)->size, page_size_);
}
// At least page_size_/2 metadata bytes is available.
void *GetMetaData(void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
return GetHeader(p) + 1;
}
- void *GetBlockBegin(void *p) {
+ void *GetBlockBegin(void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
- for (Header *l = list_; l; l = l->next) {
- void *b = GetUser(l);
- if (p >= b && p < (u8*)b + l->size)
- return b;
+ uptr nearest_chunk = 0;
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks_[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
}
- return 0;
+ if (!nearest_chunk)
+ return 0;
+ Header *h = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size < p)
+ return 0;
+ return GetUser(h);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M\n",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
}
private:
+ static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
struct Header {
uptr map_beg;
uptr map_size;
uptr size;
- Header *next;
- Header *prev;
+ uptr chunk_idx;
};
Header *GetHeader(uptr p) {
@@ -472,7 +855,11 @@ class LargeMmapAllocator {
}
uptr page_size_;
- Header *list_;
+ Header *chunks_[kMaxNumChunks];
+ uptr n_chunks_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated;
+ } stats;
SpinMutex mutex_;
};
@@ -501,10 +888,14 @@ class CombinedAllocator {
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
- if (primary_.CanAllocate(size, alignment))
- res = cache->Allocate(&primary_, primary_.ClassID(size));
- else
+ if (primary_.CanAllocate(size, alignment)) {
+ if (cache) // Allocate from cache.
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else // No thread-local cache, allocate directly from primary allocator.
+ res = primary_.Allocate(size, alignment);
+ } else { // Secondary allocator does not use cache.
res = secondary_.Allocate(size, alignment);
+ }
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
if (cleared && res)
@@ -544,6 +935,10 @@ class CombinedAllocator {
return secondary_.PointerIsMine(p);
}
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
void *GetMetaData(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetMetaData(p);
@@ -572,6 +967,11 @@ class CombinedAllocator {
cache->Drain(&primary_);
}
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
private:
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
index 2c02baa954a..55e00e2204c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
@@ -22,9 +22,31 @@ extern "C" void _mm_pause();
extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
-extern "C" void *InterlockedCompareExchangePointer(
+
+#ifdef _WIN64
+extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand);
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+#else
+// There's no _InterlockedCompareExchangePointer intrinsic on x86,
+// so call _InterlockedCompareExchange instead.
+extern "C"
+long __cdecl _InterlockedCompareExchange( // NOLINT
+ long volatile *Destination, // NOLINT
+ long Exchange, long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange)
+
+inline static void *_InterlockedCompareExchangePointer(
+ void *volatile *Destination,
+ void *Exchange, void *Comparand) {
+ return reinterpret_cast<void*>(
+ _InterlockedCompareExchange(
+ reinterpret_cast<long volatile*>(Destination), // NOLINT
+ reinterpret_cast<long>(Exchange), // NOLINT
+ reinterpret_cast<long>(Comparand))); // NOLINT
+}
+#endif
namespace __sanitizer {
@@ -113,7 +135,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr xchg,
memory_order mo) {
uptr cmpv = *cmp;
- uptr prev = (uptr)InterlockedCompareExchangePointer(
+ uptr prev = (uptr)_InterlockedCompareExchangePointer(
(void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
if (prev == cmpv)
return true;
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cc b/libsanitizer/sanitizer_common/sanitizer_common.cc
index 76a55c0f8b4..96e8808f6d1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common.cc
@@ -153,6 +153,27 @@ void SortArray(uptr *array, uptr size) {
}
}
+// We want to map a chunk of address space aligned to 'alignment'.
+// We do it by maping a bit more and then unmaping redundant pieces.
+// We probably can do it with fewer syscalls in some OS-dependent way.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+// uptr PageSize = GetPageSizeCached();
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = size + alignment;
+ uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ if (res & (alignment - 1)) // Not aligned.
+ res = (map_res + alignment) & ~(alignment - 1);
+ uptr end = res + size;
+ if (res != map_res)
+ UnmapOrDie((void*)map_res, res - map_res);
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
+ return (void*)res;
+}
+
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
@@ -178,4 +199,9 @@ void __sanitizer_set_report_fd(int fd) {
internal_close(report_fd);
report_fd = fd;
}
+
+void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
+ (void)reserved;
+ PrepareForSandboxing();
+}
} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 5639134b031..6b104884342 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -42,9 +42,13 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
+void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *Mprotect(uptr fixed_addr, uptr size);
+// Map aligned chunk of address space; size and alignment are powers of two.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
+void FlushUnneededShadowMemory(uptr addr, uptr size);
// Internal allocator
void *InternalAlloc(uptr size);
@@ -119,6 +123,7 @@ const char *GetPwd();
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
+void PrepareForSandboxing();
// Other
void SleepForSeconds(int seconds);
@@ -133,6 +138,13 @@ void NORETURN Die();
void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
+// Set the name of the current thread to 'name', return true on succees.
+// The name may be truncated to a system-dependent limit.
+bool SanitizerSetThreadName(const char *name);
+// Get the name of the current thread (no more than max_len bytes),
+// return true on succees. name should have space for at least max_len+1 bytes.
+bool SanitizerGetThreadName(char *name, int max_len);
+
// Specific tools may override behavior of "Die" and "CheckFailed" functions
// to do tool-specific job.
void SetDieCallback(void (*callback)(void));
@@ -148,6 +160,12 @@ INLINE uptr RoundUpTo(uptr size, uptr boundary) {
CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
+INLINE uptr RoundDownTo(uptr x, uptr boundary) {
+ return x & ~(boundary - 1);
+}
+INLINE bool IsAligned(uptr a, uptr alignment) {
+ return (a & (alignment - 1)) == 0;
+}
// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
template<class T> T Min(T a, T b) { return a < b ? a : b; }
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.h
new file mode 100644
index 00000000000..97c6b6f7beb
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.h
@@ -0,0 +1,77 @@
+//===-- sanitizer_common_interceptors.h -------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_INTERCEPTORS_H
+#define SANITIZER_COMMON_INTERCEPTORS_H
+
+#include "interception/interception.h"
+#include "sanitizer_platform_interceptors.h"
+
+#if SANITIZER_INTERCEPT_READ
+INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
+ COMMON_INTERCEPTOR_ENTER(read, fd, ptr, count);
+ SSIZE_T res = REAL(read)(fd, ptr, count);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
+ return res;
+}
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD
+INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ COMMON_INTERCEPTOR_ENTER(pread, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
+ return res;
+}
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD64
+INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
+ COMMON_INTERCEPTOR_ENTER(pread64, fd, ptr, count, offset);
+ SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
+ return res;
+}
+#endif
+
+#if SANITIZER_INTERCEPT_READ
+# define INIT_READ INTERCEPT_FUNCTION(read)
+#else
+# define INIT_READ
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD
+# define INIT_PREAD INTERCEPT_FUNCTION(pread)
+#else
+# define INIT_PREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD64
+# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
+#else
+# define INIT_PREAD64
+#endif
+
+#define SANITIZER_COMMON_INTERCEPTORS_INIT \
+ INIT_READ; \
+ INIT_PREAD; \
+ INIT_PREAD64; \
+
+#endif // SANITIZER_COMMON_INTERCEPTORS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.cc b/libsanitizer/sanitizer_common/sanitizer_libc.cc
index 4d43cd7d013..b02cbd4aced 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.cc
@@ -203,4 +203,23 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
}
}
+bool mem_is_zero(const char *beg, uptr size) {
+ CHECK_LE(size, 1UL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
+ const char *end = beg + size;
+ uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
+ uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
+ uptr all = 0;
+ // Prologue.
+ for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
+ all |= *mem;
+ // Aligned loop.
+ for (; aligned_beg < aligned_end; aligned_beg++)
+ all |= *aligned_beg;
+ // Epilogue.
+ if ((char*)aligned_end >= beg)
+ for (const char *mem = (char*)aligned_end; mem < end; mem++)
+ all |= *mem;
+ return all == 0;
+}
+
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.h b/libsanitizer/sanitizer_common/sanitizer_libc.h
index 4aa4a279d40..f193017f953 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.h
@@ -45,6 +45,11 @@ char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
+// Return true if all bytes in [mem, mem+size) are zero.
+// Optimized for the case when the result is true.
+bool mem_is_zero(const char *mem, uptr size);
+
+
// Memory
void *internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc
index 75f2ee18a67..1d0bf02192c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -17,6 +17,7 @@
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
#include <fcntl.h>
#include <pthread.h>
@@ -28,7 +29,9 @@
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
+#include <unwind.h>
#include <errno.h>
+#include <sys/prctl.h>
// Are we using 32-bit or 64-bit syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
@@ -215,6 +218,14 @@ void ReExec() {
execv(argv[0], argv.data());
}
+void PrepareForSandboxing() {
+ // Some kinds of sandboxes may forbid filesystem access, so we won't be able
+ // to read the file mappings from /proc/self/maps. Luckily, neither the
+ // process will be able to load additional libraries, so it's fine to use the
+ // cached mappings.
+ MemoryMappingLayout::CacheMemoryMappings();
+}
+
// ----------------- sanitizer_procmaps.h
// Linker initialized.
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
@@ -354,6 +365,75 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
}
+bool SanitizerSetThreadName(const char *name) {
+ return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
+}
+
+bool SanitizerGetThreadName(char *name, int max_len) {
+ char buff[17];
+ if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
+ return false;
+ internal_strncpy(name, buff, max_len);
+ name[max_len] = 0;
+ return true;
+}
+
+#ifndef SANITIZER_GO
+//------------------------- SlowUnwindStack -----------------------------------
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ StackTrace *b = (StackTrace*)param;
+ CHECK(b->size < b->max_size);
+ uptr pc = Unwind_GetIP(ctx);
+ b->trace[b->size++] = pc;
+ if (b->size == b->max_size) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+static bool MatchPc(uptr cur_pc, uptr trace_pc) {
+ return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
+}
+
+void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ this->size = 0;
+ this->max_size = max_depth;
+ if (max_depth > 1) {
+ _Unwind_Backtrace(Unwind_Trace, this);
+ // We need to pop a few frames so that pc is on top.
+ // trace[0] belongs to the current function so we always pop it.
+ int to_pop = 1;
+ /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
+ else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
+ else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
+ else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
+ else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
+ this->PopStackFrames(to_pop);
+ }
+ this->trace[0] = pc;
+}
+
+#endif // #ifndef SANITIZER_GO
+
} // namespace __sanitizer
#endif // __linux__
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc
index 465d0a30121..0f64b306afb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -124,6 +124,10 @@ void ReExec() {
UNIMPLEMENTED();
}
+void PrepareForSandboxing() {
+ // Nothing here for now.
+}
+
// ----------------- sanitizer_procmaps.h
MemoryMappingLayout::MemoryMappingLayout() {
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
new file mode 100644
index 00000000000..e32206cb6d4
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -0,0 +1,27 @@
+//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines macro telling whether sanitizer tools can/should intercept
+// given library functions on a given platform.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_internal_defs.h"
+
+#if !defined(_WIN32)
+# define SANITIZER_INTERCEPT_READ 1
+# define SANITIZER_INTERCEPT_PREAD 1
+#else
+# define SANITIZER_INTERCEPT_READ 0
+# define SANITIZER_INTERCEPT_PREAD 0
+#endif
+
+#if defined(__linux__) && !defined(ANDROID)
+# define SANITIZER_INTERCEPT_PREAD64 1
+#else
+# define SANITIZER_INTERCEPT_PREAD64 0
+#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cc b/libsanitizer/sanitizer_common/sanitizer_posix.cc
index b9601eaa943..17287cd950e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.cc
@@ -91,6 +91,21 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
return p;
}
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ uptr PageSize = GetPageSizeCached();
+ void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ RoundUpTo(size, PageSize),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ -1, 0);
+ if (p == (void*)-1) {
+ Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
+ size, size, fixed_addr, errno);
+ CHECK("unable to mmap" && 0);
+ }
+ return p;
+}
+
void *Mprotect(uptr fixed_addr, uptr size) {
return internal_mmap((void*)fixed_addr, size,
PROT_NONE,
@@ -98,6 +113,10 @@ void *Mprotect(uptr fixed_addr, uptr size) {
-1, 0);
}
+void FlushUnneededShadowMemory(uptr addr, uptr size) {
+ madvise((void*)addr, size, MADV_DONTNEED);
+}
+
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
fd_t fd = internal_open(file_name, false);
CHECK_NE(fd, kInvalidFd);
diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cc b/libsanitizer/sanitizer_common/sanitizer_printf.cc
index 5876fef04f3..7771e1d34a1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_printf.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_printf.cc
@@ -92,7 +92,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %%(0[0-9]*)?(z|ll)?{d,u,x}; %%p; %%s; %%c\n";
+ "Supported Printf formats: %(0[0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
diff --git a/libsanitizer/sanitizer_common/sanitizer_report_decorator.h b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
new file mode 100644
index 00000000000..17f0b2edd2f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_report_decorator.h
@@ -0,0 +1,35 @@
+//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Tags to decorate the sanitizer reports.
+// Currently supported tags:
+// * None.
+// * ANSI color sequences.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+namespace __sanitizer {
+class AnsiColorDecorator {
+ public:
+ explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
+ const char *Black() { return ansi_ ? "\033[1m\033[30m" : ""; }
+ const char *Red() { return ansi_ ? "\033[1m\033[31m" : ""; }
+ const char *Green() { return ansi_ ? "\033[1m\033[32m" : ""; }
+ const char *Yellow() { return ansi_ ? "\033[1m\033[33m" : ""; }
+ const char *Blue() { return ansi_ ? "\033[1m\033[34m" : ""; }
+ const char *Magenta() { return ansi_ ? "\033[1m\033[35m" : ""; }
+ const char *Cyan() { return ansi_ ? "\033[1m\033[36m" : ""; }
+ const char *White() { return ansi_ ? "\033[1m\033[37m" : ""; }
+ const char *Default() { return ansi_ ? "\033[1m\033[0m" : ""; }
+ private:
+ bool ansi_;
+};
+} // namespace __sanitizer
+#endif // SANITIZER_ALLOCATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
index d9c5b69c7a2..2e22155fa75 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc
@@ -40,6 +40,12 @@ static struct {
atomic_uint32_t seq[kPartCount]; // Unique id generators.
} depot;
+static StackDepotStats stats;
+
+StackDepotStats *StackDepotGetStats() {
+ return &stats;
+}
+
static u32 hash(const uptr *stack, uptr size) {
// murmur2
const u32 m = 0x5bd1e995;
@@ -75,7 +81,7 @@ static StackDesc *tryallocDesc(uptr memsz) {
}
static StackDesc *allocDesc(uptr size) {
- // Frist, try to allocate optimisitically.
+ // First, try to allocate optimisitically.
uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
StackDesc *s = tryallocDesc(memsz);
if (s)
@@ -91,6 +97,7 @@ static StackDesc *allocDesc(uptr size) {
if (allocsz < memsz)
allocsz = memsz;
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ stats.mapped += allocsz;
atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
atomic_store(&depot.region_pos, mem, memory_order_release);
}
@@ -154,6 +161,7 @@ u32 StackDepotPut(const uptr *stack, uptr size) {
}
uptr part = (h % kTabSize) / kPartSize;
id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
CHECK_LT(id, kMaxId);
id |= part << kPartShift;
CHECK_NE(id, 0);
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
index c4c388aa74d..1e917eb53bb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h
@@ -22,6 +22,13 @@ u32 StackDepotPut(const uptr *stack, uptr size);
// Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size);
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr mapped;
+};
+
+StackDepotStats *StackDepotGetStats();
+
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
index 308c2d90731..59af1c35292 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -23,10 +23,7 @@ static const char *StripPathPrefix(const char *filepath,
}
// ----------------------- StackTrace ----------------------------- {{{1
-// PCs in stack traces are actually the return addresses, that is,
-// addresses of the next instructions after the call. That's why we
-// decrement them.
-static uptr patch_pc(uptr pc) {
+uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
#ifdef __arm__
// Cancel Thumb bit.
pc = pc & (~1);
@@ -69,7 +66,9 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
InternalScopedBuffer<AddressInfo> addr_frames(64);
uptr frame_num = 0;
for (uptr i = 0; i < size && addr[i]; i++) {
- uptr pc = patch_pc(addr[i]);
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = GetPreviousInstructionPc(addr[i]);
uptr addr_frames_num = 0; // The number of stack frames for current
// instruction address.
if (symbolize_callback) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
index b36a1a082c5..c939644401c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
@@ -42,10 +42,12 @@ struct StackTrace {
}
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
+ void SlowUnwindStack(uptr pc, uptr max_depth);
void PopStackFrames(uptr count);
static uptr GetCurrentPc();
+ static uptr GetPreviousInstructionPc(uptr pc);
static uptr CompressStack(StackTrace *stack,
u32 *compressed, uptr size);
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
index 4d7ec17fe6a..0714b3824fb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -58,6 +58,9 @@ struct AddressInfo {
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
bool SymbolizeData(uptr address, AddressInfo *frame);
+// Attempts to demangle the provided C++ mangled name.
+const char *Demangle(const char *Name);
+
// Starts external symbolizer program in a subprocess. Sanitizer communicates
// with external symbolizer via pipes.
bool InitializeExternalSymbolizer(const char *path_to_symbolizer);
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc
new file mode 100644
index 00000000000..b356f9a09e3
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_itanium.cc
@@ -0,0 +1,40 @@
+//===-- sanitizer_symbolizer_itanium.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between the sanitizer run-time libraries.
+// Itanium C++ ABI-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+#if defined(__APPLE__) || defined(__linux__)
+
+#include "sanitizer_symbolizer.h"
+
+#include <stdlib.h>
+
+// C++ demangling function, as required by Itanium C++ ABI. This is weak,
+// because we do not require a C++ ABI library to be linked to a program
+// using sanitizers; if it's not present, we'll just use the mangled name.
+namespace __cxxabiv1 {
+ extern "C" char *__cxa_demangle(const char *mangled, char *buffer,
+ size_t *length, int *status)
+ SANITIZER_WEAK_ATTRIBUTE;
+}
+
+const char *__sanitizer::Demangle(const char *MangledName) {
+ // FIXME: __cxa_demangle aggressively insists on allocating memory.
+ // There's not much we can do about that, short of providing our
+ // own demangler (libc++abi's implementation could be adapted so that
+ // it does not allocate). For now, we just call it anyway, and we leak
+ // the returned value.
+ if (__cxxabiv1::__cxa_demangle)
+ if (const char *Demangled =
+ __cxxabiv1::__cxa_demangle(MangledName, 0, 0, 0))
+ return Demangled;
+
+ return MangledName;
+}
+
+#endif // __APPLE__ || __linux__
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
index 3b81e794e59..ad0053234f0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -26,6 +26,10 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
UNIMPLEMENTED();
};
+const char *Demangle(const char *MangledName) {
+ return MangledName;
+}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_win.cc
index 15ef7d96826..f7300a18b60 100644
--- a/libsanitizer/sanitizer_common/sanitizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_win.cc
@@ -13,6 +13,7 @@
#define WIN32_LEAN_AND_MEAN
#define NOGDI
#include <stdlib.h>
+#include <io.h>
#include <windows.h>
#include "sanitizer_common.h"
@@ -73,6 +74,8 @@ void UnmapOrDie(void *addr, uptr size) {
}
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
+ // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
+ // but on Win64 it does.
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (p == 0)
@@ -81,6 +84,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
return p;
}
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ return MmapFixedNoReserve(fixed_addr, size);
+}
+
void *Mprotect(uptr fixed_addr, uptr size) {
return VirtualAlloc((LPVOID)fixed_addr, size,
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
@@ -127,6 +134,10 @@ void ReExec() {
UNIMPLEMENTED();
}
+void PrepareForSandboxing() {
+ // Nothing here for now.
+}
+
bool StackSizeIsUnlimited() {
UNIMPLEMENTED();
}
@@ -173,7 +184,7 @@ int internal_close(fd_t fd) {
}
int internal_isatty(fd_t fd) {
- UNIMPLEMENTED();
+ return _isatty(fd);
}
fd_t internal_open(const char *filename, bool write) {