summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/new-spaces.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/new-spaces.cc')
-rw-r--r--deps/v8/src/heap/new-spaces.cc257
1 files changed, 124 insertions, 133 deletions
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index da2abbe95d..8358f3d3f9 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -9,6 +9,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-verifier.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-state-inl.h"
@@ -357,7 +358,7 @@ void SemiSpace::Print() {}
#endif
#ifdef VERIFY_HEAP
-void SemiSpace::Verify() const {
+void SemiSpace::VerifyPageMetadata() const {
bool is_from_space = (id_ == kFromSpace);
size_t external_backing_store_bytes[kNumTypes];
@@ -436,10 +437,6 @@ NewSpace::NewSpace(Heap* heap, LinearAllocationArea& allocation_info)
allocation_counter_, allocation_info,
linear_area_original_data_) {}
-void NewSpace::ResetParkedAllocationBuffers() {
- parked_allocation_buffers_.clear();
-}
-
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
if (allocation_info_.MergeIfAdjacent(info)) {
linear_area_original_data_.set_original_top_release(allocation_info_.top());
@@ -465,82 +462,6 @@ void NewSpace::VerifyTop() const {
}
#endif // DEBUG
-#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceObjectIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
- Address current_address) const {
- DCHECK(current_page->ContainsLimit(current_address));
-
- size_t external_space_bytes[kNumTypes];
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- CHECK(!current_page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- CHECK(!current_page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
-
- PtrComprCageBase cage_base(isolate);
- VerifyPointersVisitor visitor(heap());
- const Page* page = current_page;
- while (true) {
- if (!Page::IsAlignedToPageSize(current_address)) {
- HeapObject object = HeapObject::FromAddress(current_address);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space or read-only space.
- Map map = object.map(cage_base);
- CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->old_space()->Contains(map));
-
- // The object should not be code or a map.
- CHECK(!object.IsMap(cage_base));
- CHECK(!object.IsAbstractCode(cage_base));
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- // All the interior pointers should be contained in the heap.
- int size = object.Size(cage_base);
- object.IterateBody(map, size, &visitor);
-
- if (object.IsExternalString(cage_base)) {
- ExternalString external_string = ExternalString::cast(object);
- size_t string_size = external_string.ExternalPayloadSize();
- external_space_bytes[ExternalBackingStoreType::kExternalString] +=
- string_size;
- }
-
- current_address += ALIGN_TO_ALLOCATION_ALIGNMENT(size);
- } else {
- // At end of page, switch to next page.
- page = page->next_page();
- if (!page) break;
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
- current_address = page->area_start();
- }
- }
-
- for (int i = 0; i < kNumTypes; i++) {
- if (i == ExternalBackingStoreType::kArrayBuffer) continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
-
- if (!v8_flags.concurrent_array_buffer_sweeping) {
- size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- current_page->object_start_bitmap()->Verify();
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-}
-#endif // VERIFY_HEAP
-
void NewSpace::PromotePageToOldSpace(Page* page) {
DCHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
DCHECK(page->InYoungGeneration());
@@ -548,7 +469,7 @@ void NewSpace::PromotePageToOldSpace(Page* page) {
RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InYoungGeneration());
- new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ USE(new_page);
}
// -----------------------------------------------------------------------------
@@ -650,7 +571,7 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
to_space_.AddRangeToActiveSystemPages(top(), limit());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- UpdateInlineAllocationLimit(0);
+ UpdateInlineAllocationLimit();
}
void SemiSpaceNewSpace::ResetLinearAllocationArea() {
@@ -665,7 +586,8 @@ void SemiSpaceNewSpace::ResetLinearAllocationArea() {
}
}
-void SemiSpaceNewSpace::UpdateInlineAllocationLimit(size_t min_size) {
+void SemiSpaceNewSpace::UpdateInlineAllocationLimitForAllocation(
+ size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(),
ALIGN_TO_ALLOCATION_ALIGNMENT(min_size));
DCHECK_LE(top(), new_limit);
@@ -683,6 +605,10 @@ void SemiSpaceNewSpace::UpdateInlineAllocationLimit(size_t min_size) {
#endif
}
+void SemiSpaceNewSpace::UpdateInlineAllocationLimit() {
+ UpdateInlineAllocationLimitForAllocation(0);
+}
+
bool SemiSpaceNewSpace::AddFreshPage() {
Address top = allocation_info_.top();
DCHECK(!OldSpace::IsAtPageStart(top));
@@ -734,10 +660,14 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer(
return false;
}
+void SemiSpaceNewSpace::ResetParkedAllocationBuffers() {
+ parked_allocation_buffers_.clear();
+}
+
void SemiSpaceNewSpace::FreeLinearAllocationArea() {
AdvanceAllocationObservers();
MakeLinearAllocationAreaIterable();
- UpdateInlineAllocationLimit(0);
+ UpdateInlineAllocationLimit();
}
#if DEBUG
@@ -753,22 +683,69 @@ void SemiSpaceNewSpace::VerifyTop() const {
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
-void SemiSpaceNewSpace::Verify(Isolate* isolate) const {
+void SemiSpaceNewSpace::Verify(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const {
// The allocation pointer should be in the space or at the very end.
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.first_page()->area_start();
- CHECK_EQ(current, to_space_.space_start());
-
- VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current);
+ VerifyObjects(isolate, visitor);
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
CHECK_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
+ from_space_.VerifyPageMetadata();
+ to_space_.VerifyPageMetadata();
+}
+
+// We do not use the SemiSpaceObjectIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void SemiSpaceNewSpace::VerifyObjects(Isolate* isolate,
+ SpaceVerificationVisitor* visitor) const {
+ size_t external_space_bytes[kNumTypes];
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ PtrComprCageBase cage_base(isolate);
+ for (const Page* page = to_space_.first_page(); page;
+ page = page->next_page()) {
+ visitor->VerifyPage(page);
+
+ Address current_address = page->area_start();
+
+ while (!Page::IsAlignedToPageSize(current_address)) {
+ HeapObject object = HeapObject::FromAddress(current_address);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space or read-only space.
+ int size = object.Size(cage_base);
+
+ visitor->VerifyObject(object);
+
+ if (object.IsExternalString(cage_base)) {
+ ExternalString external_string = ExternalString::cast(object);
+ size_t string_size = external_string.ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] +=
+ string_size;
+ }
+
+ current_address += ALIGN_TO_ALLOCATION_ALIGNMENT(size);
+ }
+
+ visitor->VerifyPageDone(page);
+ }
+
+ for (int i = 0; i < kNumTypes; i++) {
+ if (i == ExternalBackingStoreType::kArrayBuffer) continue;
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+
+ if (!v8_flags.concurrent_array_buffer_sweeping) {
+ size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
}
#endif // VERIFY_HEAP
@@ -792,9 +769,11 @@ void SemiSpaceNewSpace::MakeUnusedPagesInToSpaceIterable() {
// Fix the current page, above the LAB.
DCHECK_NOT_NULL(*it);
- DCHECK((*it)->Contains(limit()));
- heap()->CreateFillerObjectAt(limit(),
- static_cast<int>((*it)->area_end() - limit()));
+ if (limit() != (*it)->area_end()) {
+ DCHECK((*it)->Contains(limit()));
+ heap()->CreateFillerObjectAt(limit(),
+ static_cast<int>((*it)->area_end() - limit()));
+ }
// Fix the remaining unused pages in the "to" semispace.
for (Page* page = *(++it); page != nullptr; page = *(++it)) {
@@ -803,15 +782,6 @@ void SemiSpaceNewSpace::MakeUnusedPagesInToSpaceIterable() {
}
}
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-void SemiSpaceNewSpace::ClearUnusedObjectStartBitmaps() {
- if (!IsFromSpaceCommitted()) return;
- for (Page* page : PageRange(from_space().first_page(), nullptr)) {
- page->object_start_bitmap()->Clear();
- }
-}
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
-
bool SemiSpaceNewSpace::ShouldBePromoted(Address address) const {
Page* page = Page::FromAddress(address);
Address current_age_mark = age_mark();
@@ -872,7 +842,7 @@ void SemiSpaceNewSpace::EvacuatePrologue() {
DCHECK_EQ(0u, Size());
}
-void SemiSpaceNewSpace::EvacuateEpilogue() { set_age_mark(top()); }
+void SemiSpaceNewSpace::GarbageCollectionEpilogue() { set_age_mark(top()); }
void SemiSpaceNewSpace::ZapUnusedMemory() {
if (!IsFromSpaceCommitted()) return;
@@ -961,7 +931,7 @@ void PagedSpaceForNewSpace::Grow() {
}
bool PagedSpaceForNewSpace::StartShrinking() {
- DCHECK_EQ(current_capacity_, target_capacity_);
+ DCHECK_GE(current_capacity_, target_capacity_);
DCHECK(heap()->tracer()->IsInAtomicPause());
size_t new_target_capacity =
RoundUp(std::max(initial_capacity_, 2 * Size()), Page::kPageSize);
@@ -985,13 +955,14 @@ void PagedSpaceForNewSpace::FinishShrinking() {
}
}
-void PagedSpaceForNewSpace::UpdateInlineAllocationLimit(size_t size_in_bytes) {
- PagedSpaceBase::UpdateInlineAllocationLimit(size_in_bytes);
+void PagedSpaceForNewSpace::UpdateInlineAllocationLimit() {
+ PagedSpaceBase::UpdateInlineAllocationLimit();
}
size_t PagedSpaceForNewSpace::AddPage(Page* page) {
current_capacity_ += Page::kPageSize;
- DCHECK_LE(current_capacity_, target_capacity_);
+ DCHECK_IMPLIES(!force_allocation_success_,
+ UsableCapacity() <= TotalCapacity());
return PagedSpaceBase::AddPage(page);
}
@@ -1011,7 +982,7 @@ bool PagedSpaceForNewSpace::PreallocatePages() {
while (current_capacity_ < target_capacity_) {
if (!TryExpandImpl()) return false;
}
- DCHECK_EQ(current_capacity_, target_capacity_);
+ DCHECK_GE(current_capacity_, target_capacity_);
return true;
}
@@ -1030,18 +1001,50 @@ void PagedSpaceForNewSpace::FreeLinearAllocationArea() {
PagedSpaceBase::FreeLinearAllocationArea();
}
-#ifdef VERIFY_HEAP
-void PagedSpaceForNewSpace::Verify(Isolate* isolate,
- ObjectVisitor* visitor) const {
- PagedSpaceBase::Verify(isolate, visitor);
+bool PagedSpaceForNewSpace::ShouldReleaseEmptyPage() const {
+ return current_capacity_ > target_capacity_;
+}
- DCHECK_EQ(current_capacity_, target_capacity_);
- DCHECK_EQ(current_capacity_, Page::kPageSize * CountTotalPages());
+void PagedSpaceForNewSpace::RefillFreeList() {
+ // New space is not used for concurrent allcations or allocations during
+ // evacuation.
+ DCHECK(heap_->IsMainThread() ||
+ (heap_->IsSharedMainThread() &&
+ !heap_->isolate()->is_shared_space_isolate()));
+ DCHECK(!is_compaction_space());
+
+ Sweeper* sweeper = heap()->sweeper();
+
+ Sweeper::SweptList swept_pages = sweeper->GetAllSweptPagesSafe(this);
+ if (swept_pages.empty()) return;
+
+ base::MutexGuard guard(mutex());
+ for (Page* p : swept_pages) {
+ DCHECK(!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE));
+ RefineAllocatedBytesAfterSweeping(p);
+ RelinkFreeListCategories(p);
+ }
}
-#endif // VERIFY_HEAP
-bool PagedSpaceForNewSpace::ShouldReleasePage() const {
- return current_capacity_ > target_capacity_;
+bool PagedSpaceForNewSpace::AddPageBeyondCapacity(int size_in_bytes,
+ AllocationOrigin origin) {
+ DCHECK(heap()->sweeper()->IsSweepingDoneForSpace(NEW_SPACE));
+ if (!force_allocation_success_ &&
+ ((UsableCapacity() >= TotalCapacity()) ||
+ (TotalCapacity() - UsableCapacity() < Page::kPageSize)))
+ return false;
+ if (!heap()->CanExpandOldGeneration(Size() + heap()->new_lo_space()->Size() +
+ Page::kPageSize)) {
+ // Assuming all of new space if alive, doing a full GC and promoting all
+ // objects should still succeed. Don't let new space grow if it means it
+ // will exceed the available size of old space.
+ return false;
+ }
+ DCHECK_IMPLIES(heap()->incremental_marking()->IsMarking(),
+ heap()->incremental_marking()->IsMajorMarking());
+ if (!TryExpandImpl()) return false;
+ return TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
+ origin);
}
// -----------------------------------------------------------------------------
@@ -1062,17 +1065,5 @@ PagedNewSpace::~PagedNewSpace() {
paged_space_.TearDown();
}
-#ifdef VERIFY_HEAP
-void PagedNewSpace::Verify(Isolate* isolate) const {
- const Page* first_page = paged_space_.first_page();
-
- if (first_page) VerifyImpl(isolate, first_page, first_page->area_start());
-
- // Check paged-spaces.
- VerifyPointersVisitor visitor(heap());
- paged_space_.Verify(isolate, &visitor);
-}
-#endif // VERIFY_HEAP
-
} // namespace internal
} // namespace v8