summaryrefslogtreecommitdiff
path: root/src/page_heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/page_heap.cc')
-rw-r--r--src/page_heap.cc65
1 files changed, 17 insertions, 48 deletions
diff --git a/src/page_heap.cc b/src/page_heap.cc
index 85849cd..44ad654 100644
--- a/src/page_heap.cc
+++ b/src/page_heap.cc
@@ -241,22 +241,12 @@ void PageHeap::CommitSpan(Span* span) {
stats_.total_commit_bytes += (span->length << kPageShift);
}
-bool PageHeap::TryDecommitWithoutLock(Span* span) {
+bool PageHeap::DecommitSpan(Span* span) {
++stats_.decommit_count;
- ASSERT(span->location == Span::ON_NORMAL_FREELIST);
- span->location = Span::IN_USE;
- Static::pageheap_lock()->Unlock();
bool rv = TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
static_cast<size_t>(span->length << kPageShift));
- Static::pageheap_lock()->Lock();
-
- // We're not really on any free list at this point, but lets
- // indicate if we're returned or not.
- span->location = Span::ON_NORMAL_FREELIST;
-
if (rv) {
- span->location = Span::ON_RETURNED_FREELIST;
stats_.committed_bytes -= span->length << kPageShift;
stats_.total_decommit_bytes += (span->length << kPageShift);
}
@@ -330,19 +320,15 @@ Span* PageHeap::CheckAndHandlePreMerge(Span* span, Span* other) {
if (other == NULL) {
return other;
}
-
- // If we're in aggressive decommit mode and span is decommitted,
- // then we try to decommit adjacent span. We also remove from it's
- // free list as part of that.
+ // if we're in aggressive decommit mode and span is decommitted,
+ // then we try to decommit adjacent span.
if (aggressive_decommit_ && other->location == Span::ON_NORMAL_FREELIST
&& span->location == Span::ON_RETURNED_FREELIST) {
- if (ReleaseSpan(other) != 0) {
- return other;
+ bool worked = DecommitSpan(other);
+ if (!worked) {
+ return NULL;
}
- return NULL;
- }
-
- if (other->location != span->location) {
+ } else if (other->location != span->location) {
return NULL;
}
@@ -374,7 +360,7 @@ void PageHeap::MergeIntoFreeList(Span* span) {
const Length n = span->length;
if (aggressive_decommit_ && span->location == Span::ON_NORMAL_FREELIST) {
- if (TryDecommitWithoutLock(span)) {
+ if (DecommitSpan(span)) {
span->location = Span::ON_RETURNED_FREELIST;
}
}
@@ -481,35 +467,18 @@ void PageHeap::IncrementalScavenge(Length n) {
}
}
-Length PageHeap::ReleaseSpan(Span* span) {
- // We're dropping very important and otherwise contended
- // pageheap_lock around call to potentially very slow syscall to
- // release pages. Those syscalls can be slow even with "advanced"
- // things such as MADV_FREE and its better equivalents, because they
- // have to walk actual page tables, and we sometimes deal with large
- // spans, which sometimes takes lots of time to walk. Plus Linux
- // grabs per-address space mmap_sem lock which could be extremely
- // contended at times. So it is best if we avoid holding one
- // contended lock while waiting for another.
- //
- // Note, we set span location to in-use, because our span could be found via
- // pagemap in e.g. MergeIntoFreeList while we're not holding the lock. By
- // marking it in-use we prevent this possibility. So span is removed from free
- // list and marked "unmergable" and that guarantees safety during unlock-ful
- // release.
- ASSERT(span->location == Span::ON_NORMAL_FREELIST);
- RemoveFromFreeList(span);
+Length PageHeap::ReleaseSpan(Span* s) {
+ ASSERT(s->location == Span::ON_NORMAL_FREELIST);
- Length n = span->length;
- if (TryDecommitWithoutLock(span)) {
- span->location = Span::ON_RETURNED_FREELIST;
- } else {
- n = 0; // Mark that we failed to return.
- span->location = Span::ON_NORMAL_FREELIST;
+ if (DecommitSpan(s)) {
+ RemoveFromFreeList(s);
+ const Length n = s->length;
+ s->location = Span::ON_RETURNED_FREELIST;
+ MergeIntoFreeList(s); // Coalesces if possible.
+ return n;
}
- MergeIntoFreeList(span); // Coalesces if possible.
- return n;
+ return 0;
}
Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {