summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2018-08-31 03:18:31 +0000
committerKostya Serebryany <kcc@google.com>2018-08-31 03:18:31 +0000
commit9eba657efa3311c4b7f2a5deb6ad71fcdeb5711a (patch)
tree2f39ae728472819016a661fb36ee2e10385f2656
parentf19416e3ca507b6b9ebf9d110ba9e00804fb9509 (diff)
downloadcompiler-rt-9eba657efa3311c4b7f2a5deb6ad71fcdeb5711a.tar.gz
[hwasan] properly report heap-buffer-overflow
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@341159 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/hwasan/hwasan_allocator.cc12
-rw-r--r--lib/hwasan/hwasan_report.cc45
-rw-r--r--test/hwasan/TestCases/heap-buffer-overflow.c16
3 files changed, 67 insertions, 6 deletions
diff --git a/lib/hwasan/hwasan_allocator.cc b/lib/hwasan/hwasan_allocator.cc
index 6b62b6a34..0c541679b 100644
--- a/lib/hwasan/hwasan_allocator.cc
+++ b/lib/hwasan/hwasan_allocator.cc
@@ -119,11 +119,11 @@ void HwasanThreadLocalMallocStorage::CommitBack() {
allocator.SwallowCache(GetAllocatorCache(this));
}
-static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
- bool zeroise) {
- if (!size) return nullptr;
+static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
+ bool zeroise) {
+ if (!orig_size) return nullptr;
alignment = Max(alignment, kShadowAlignment);
- size = RoundUpTo(size, kShadowAlignment);
+ uptr size = RoundUpTo(orig_size, kShadowAlignment);
if (size > kMaxAllowedMallocSize) {
if (AllocatorMayReturnNull()) {
@@ -152,7 +152,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->state = CHUNK_ALLOCATED;
- meta->requested_size = static_cast<u32>(size);
+ meta->requested_size = static_cast<u32>(orig_size);
meta->alloc_context_id = StackDepotPut(*stack);
if (zeroise) {
internal_memset(allocated, 0, size);
@@ -204,7 +204,7 @@ void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
}
if (flags()->tag_in_free &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
- TagMemoryAligned((uptr)untagged_ptr, size,
+ TagMemoryAligned((uptr)untagged_ptr, RoundUpTo(size, kShadowAlignment),
t ? t->GenerateRandomTag() : kFallbackFreeTag);
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
diff --git a/lib/hwasan/hwasan_report.cc b/lib/hwasan/hwasan_report.cc
index 807d73940..b1be72f6b 100644
--- a/lib/hwasan/hwasan_report.cc
+++ b/lib/hwasan/hwasan_report.cc
@@ -65,6 +65,49 @@ void PrintAddressDescription(uptr tagged_addr, uptr access_size) {
uptr untagged_addr = UntagAddr(tagged_addr);
Thread::VisitAllLiveThreads([&](Thread *t) {
Decorator d;
+ // Check if this looks like a heap buffer overflow by scanning
+ // the shadow left and right and looking for the first adjacent
+ // object with a different memory tag. If that tag matches addr_tag,
+ // check the allocator if it has a live chunk there.
+ tag_t addr_tag = GetTagFromPointer(tagged_addr);
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ if (*tag_ptr != addr_tag) { // should be true usually.
+ tag_t *left = tag_ptr, *right = tag_ptr;
+ // scan left.
+ for (int i = 0; i < 1000 && *left == *tag_ptr; i++, left--){}
+ // scan right.
+ for (int i = 0; i < 1000 && *right == *tag_ptr; i++, right++){}
+ // Chose the object that has addr_tag and that is closer to addr.
+ tag_t *candidate = nullptr;
+ if (*right == addr_tag && *left == addr_tag)
+ candidate = right - tag_ptr < tag_ptr - left ? right : left;
+ else if (*right == addr_tag)
+ candidate = right;
+ else if (*left == addr_tag)
+ candidate = left;
+
+ if (candidate) {
+ uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
+ HwasanChunkView chunk = FindHeapChunkByAddress(mem);
+ if (chunk.IsAllocated()) {
+ Printf("%s", d.Location());
+ Printf(
+ "%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
+ untagged_addr,
+ candidate == left ? untagged_addr - chunk.End()
+ : chunk.Beg() - untagged_addr,
+ candidate == right ? "left" : "right", chunk.UsedSize(),
+ chunk.Beg(), chunk.End());
+ Printf("%s", d.Allocation());
+ Printf("allocated here:\n", t);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ num_descriptions_printed++;
+ }
+ }
+ }
+
+ // Scan all threads' ring buffers to find if it's a heap-use-after-free.
HeapAllocationRecord har;
if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har)) {
Printf("%s", d.Location());
@@ -84,6 +127,8 @@ void PrintAddressDescription(uptr tagged_addr, uptr access_size) {
num_descriptions_printed++;
}
+
+ // Very basic check for stack memory.
if (t->AddrIsInStack(untagged_addr)) {
Printf("%s", d.Location());
Printf("Address %p is located in stack of thread %p\n", untagged_addr, t);
diff --git a/test/hwasan/TestCases/heap-buffer-overflow.c b/test/hwasan/TestCases/heap-buffer-overflow.c
new file mode 100644
index 000000000..36c09b294
--- /dev/null
+++ b/test/hwasan/TestCases/heap-buffer-overflow.c
@@ -0,0 +1,16 @@
+// RUN: %clang_hwasan %s -o %t && not %run %t 2>&1 | FileCheck %s
+
+// REQUIRES: stable-runtime
+// TODO: test more cases.
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <sanitizer/hwasan_interface.h>
+
+int main() {
+ __hwasan_enable_allocator_tagging();
+ char * volatile x = (char*)malloc(30);
+ x[40] = 42;
+// CHECK: is located 10 bytes to the right of 30-byte region
+ free(x);
+}