summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator_secondary.h
diff options
context:
space:
mode:
authorDavid L. Jones <dlj@google.com>2017-11-10 01:07:01 +0000
committerDavid L. Jones <dlj@google.com>2017-11-10 01:07:01 +0000
commit5cdb7458cb1d6fc8fc83dd5a177658f1284f5d29 (patch)
tree1772d0d4f25219059bc98e9c65baeef15d268524 /lib/scudo/scudo_allocator_secondary.h
parent97e140242d3085562afa1340578d5f531a82ad69 (diff)
parentbcc227ee4af1ef3e63033b35dcb1d5627a3b2941 (diff)
downloadcompiler-rt-5cdb7458cb1d6fc8fc83dd5a177658f1284f5d29.tar.gz
Creating branches/google/testing and tags/google/testing/ from r317203
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/branches/google/testing@317856 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator_secondary.h')
-rw-r--r--lib/scudo/scudo_allocator_secondary.h126
1 files changed, 38 insertions, 88 deletions
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index b984f0db4..dbfb22565 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -24,26 +24,24 @@
class ScudoLargeMmapAllocator {
public:
- void Init(bool AllocatorMayReturnNull) {
+ void Init() {
PageSize = GetPageSizeCached();
- atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
+ uptr UserSize = Size - AlignedChunkHeaderSize;
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr MapSize = Size + SecondaryHeaderSize;
+ if (Alignment > MinAlignment)
+ MapSize += Alignment;
MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
- // The size passed to the Secondary comprises the alignment, if large
- // enough. Subtract it here to get the requested size, including header.
- if (Alignment > MinAlignment)
- Size -= Alignment;
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
if (MapBeg == ~static_cast<uptr>(0))
- return ReturnNullOrDieOnOOM();
+ return ReturnNullOrDieOnFailure::OnOOM();
// A page-aligned pointer is assumed after that, so check it now.
CHECK(IsAligned(MapBeg, PageSize));
uptr MapEnd = MapBeg + MapSize;
@@ -51,32 +49,32 @@ class ScudoLargeMmapAllocator {
// initial guard page, and both headers. This is the pointer that has to
// abide by alignment requirements.
uptr UserBeg = MapBeg + PageSize + HeadersSize;
+ uptr UserEnd = UserBeg + UserSize;
// In the rare event of larger alignments, we will attempt to fit the mmap
// area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
if (Alignment > MinAlignment) {
- if (UserBeg & (Alignment - 1))
- UserBeg += Alignment - (UserBeg & (Alignment - 1));
- CHECK_GE(UserBeg, MapBeg);
- uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
- CHECK_GE(NewMapBeg, MapBeg);
- uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
- PageSize) + PageSize;
- CHECK_LE(NewMapEnd, MapEnd);
- // Unmap the extra memory if it's large enough, on both sides.
- uptr Diff = NewMapBeg - MapBeg;
- if (Diff > PageSize)
- UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
- Diff = MapEnd - NewMapEnd;
- if (Diff > PageSize)
- UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
- MapBeg = NewMapBeg;
- MapEnd = NewMapEnd;
- MapSize = NewMapEnd - NewMapBeg;
+ if (!IsAligned(UserBeg, Alignment)) {
+ UserBeg = RoundUpTo(UserBeg, Alignment);
+ CHECK_GE(UserBeg, MapBeg);
+ uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
+ PageSize;
+ CHECK_GE(NewMapBeg, MapBeg);
+ if (NewMapBeg != MapBeg) {
+ UnmapOrDie(reinterpret_cast<void *>(MapBeg), NewMapBeg - MapBeg);
+ MapBeg = NewMapBeg;
+ }
+ UserEnd = UserBeg + UserSize;
+ }
+ uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
+ if (NewMapEnd != MapEnd) {
+ UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd);
+ MapEnd = NewMapEnd;
+ }
+ MapSize = MapEnd - MapBeg;
}
- uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
CHECK_LE(UserEnd, MapEnd - PageSize);
// Actually mmap the memory, preserving the guard pages on either side.
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
@@ -88,83 +86,35 @@ class ScudoLargeMmapAllocator {
// The primary adds the whole class size to the stats when allocating a
// chunk, so we will do something similar here. But we will not account for
// the guard pages.
- Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
- Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
-
- return reinterpret_cast<void *>(UserBeg);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (atomic_load(&MayReturnNull, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (atomic_load(&MayReturnNull, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
+ {
+ SpinMutexLock l(&StatsMutex);
+ Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
+ Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
+ }
- void SetMayReturnNull(bool AllocatorMayReturnNull) {
- atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
+ return reinterpret_cast<void *>(Ptr);
}
void Deallocate(AllocatorStats *Stats, void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
- Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
- Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
+ {
+ SpinMutexLock l(&StatsMutex);
+ Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
+ Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
+ }
UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
}
- uptr TotalMemoryUsed() {
- UNIMPLEMENTED();
- }
-
- bool PointerIsMine(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
uptr GetActuallyAllocatedSize(void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
- // Deduct PageSize as MapEnd includes the trailing guard page.
+ // Deduct PageSize as MapSize includes the trailing guard page.
uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize;
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
- void *GetMetaData(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void *GetBlockBegin(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void *GetBlockBeginFastLocked(void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void PrintStats() {
- UNIMPLEMENTED();
- }
-
- void ForceLock() {
- UNIMPLEMENTED();
- }
-
- void ForceUnlock() {
- UNIMPLEMENTED();
- }
-
- void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
- UNIMPLEMENTED();
- }
-
private:
// A Secondary allocated chunk header contains the base of the mapping and
- // its size. Currently, the base is always a page before the header, but
- // we might want to extend that number in the future based on the size of
- // the allocation.
+ // its size, which comprises the guard pages.
struct SecondaryHeader {
uptr MapBeg;
uptr MapSize;
@@ -182,7 +132,7 @@ class ScudoLargeMmapAllocator {
const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
uptr PageSize;
- atomic_uint8_t MayReturnNull;
+ SpinMutex StatsMutex;
};
#endif // SCUDO_ALLOCATOR_SECONDARY_H_