summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap/spaces-inl.h
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/heap/spaces-inl.h
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/heap/spaces-inl.h')
-rw-r--r--chromium/v8/src/heap/spaces-inl.h405
1 files changed, 2 insertions, 403 deletions
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index cb8b0a54d74..b54b6ac1150 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -6,15 +6,15 @@
#define V8_HEAP_SPACES_INL_H_
#include "src/base/atomic-utils.h"
-#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h"
#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
#include "src/objects/code-inl.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -42,63 +42,6 @@ PageRange::PageRange(Address start, Address limit)
#endif // DEBUG
}
-// -----------------------------------------------------------------------------
-// SemiSpaceObjectIterator
-
-HeapObject SemiSpaceObjectIterator::Next() {
- while (current_ != limit_) {
- if (Page::IsAlignedToPageSize(current_)) {
- Page* page = Page::FromAllocationAreaAddress(current_);
- page = page->next_page();
- DCHECK(page);
- current_ = page->area_start();
- if (current_ == limit_) return HeapObject();
- }
- HeapObject object = HeapObject::FromAddress(current_);
- current_ += object.Size();
- if (!object.IsFreeSpaceOrFiller()) {
- return object;
- }
- }
- return HeapObject();
-}
-
-// -----------------------------------------------------------------------------
-// PagedSpaceObjectIterator
-
-HeapObject PagedSpaceObjectIterator::Next() {
- do {
- HeapObject next_obj = FromCurrentPage();
- if (!next_obj.is_null()) return next_obj;
- } while (AdvanceToNextPage());
- return HeapObject();
-}
-
-HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
- while (cur_addr_ != cur_end_) {
- if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
- cur_addr_ = space_->limit();
- continue;
- }
- HeapObject obj = HeapObject::FromAddress(cur_addr_);
- const int obj_size = obj.Size();
- cur_addr_ += obj_size;
- DCHECK_LE(cur_addr_, cur_end_);
- if (!obj.IsFreeSpaceOrFiller()) {
- if (obj.IsCode()) {
- DCHECK_IMPLIES(
- space_->identity() != CODE_SPACE,
- space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
- DCHECK_CODEOBJECT_SIZE(obj_size, space_);
- } else {
- DCHECK_OBJECT_SIZE(obj_size);
- }
- return obj;
- }
- }
- return HeapObject();
-}
-
void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) {
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
@@ -120,93 +63,6 @@ void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
}
-// -----------------------------------------------------------------------------
-// SemiSpace
-
-bool SemiSpace::Contains(HeapObject o) {
- MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
- if (memory_chunk->IsLargePage()) return false;
- return id_ == kToSpace ? memory_chunk->IsToPage()
- : memory_chunk->IsFromPage();
-}
-
-bool SemiSpace::Contains(Object o) {
- return o.IsHeapObject() && Contains(HeapObject::cast(o));
-}
-
-bool SemiSpace::ContainsSlow(Address a) {
- for (Page* p : *this) {
- if (p == MemoryChunk::FromAddress(a)) return true;
- }
- return false;
-}
-
-// --------------------------------------------------------------------------
-// NewSpace
-
-bool NewSpace::Contains(Object o) {
- return o.IsHeapObject() && Contains(HeapObject::cast(o));
-}
-
-bool NewSpace::Contains(HeapObject o) {
- return MemoryChunk::FromHeapObject(o)->InNewSpace();
-}
-
-bool NewSpace::ContainsSlow(Address a) {
- return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
-}
-
-bool NewSpace::ToSpaceContainsSlow(Address a) {
- return to_space_.ContainsSlow(a);
-}
-
-bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); }
-bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); }
-
-bool PagedSpace::Contains(Address addr) {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- return true;
- }
- return Page::FromAddress(addr)->owner() == this;
-}
-
-bool PagedSpace::Contains(Object o) {
- if (!o.IsHeapObject()) return false;
- return Page::FromAddress(o.ptr())->owner() == this;
-}
-
-void PagedSpace::UnlinkFreeListCategories(Page* page) {
- DCHECK_EQ(this, page->owner());
- page->ForAllFreeListCategories([this](FreeListCategory* category) {
- free_list()->RemoveCategory(category);
- });
-}
-
-size_t PagedSpace::RelinkFreeListCategories(Page* page) {
- DCHECK_EQ(this, page->owner());
- size_t added = 0;
- page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- added += category->available();
- category->Relink(free_list());
- });
-
- DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
- page->AvailableInFreeList() ==
- page->AvailableInFreeListFromAllocatedBytes());
- return added;
-}
-
-bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
- if (allocation_info_.top() != kNullAddress) {
- const Address object_address = object.address();
- if ((allocation_info_.top() - object_size) == object_address) {
- allocation_info_.set_top(object_address);
- return true;
- }
- }
- return false;
-}
-
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -278,53 +134,6 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-bool FreeListCategory::is_linked(FreeList* owner) const {
- return prev_ != nullptr || next_ != nullptr ||
- owner->categories_[type_] == this;
-}
-
-void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
- available_ -= allocation_size;
-}
-
-Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
- FreeListCategory* category_top = top(type);
- if (category_top != nullptr) {
- DCHECK(!category_top->top().is_null());
- return Page::FromHeapObject(category_top->top());
- } else {
- return nullptr;
- }
-}
-
-Page* FreeListLegacy::GetPageForSize(size_t size_in_bytes) {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- if (!page && static_cast<int>(kSmall) >= minimum_category)
- page = GetPageForCategoryType(kSmall);
- if (!page && static_cast<int>(kTiny) >= minimum_category)
- page = GetPageForCategoryType(kTiny);
- if (!page && static_cast<int>(kTiniest) >= minimum_category)
- page = GetPageForCategoryType(kTiniest);
- return page;
-}
-
-Page* FreeListFastAlloc::GetPageForSize(size_t size_in_bytes) {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- return page;
-}
-
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
@@ -343,216 +152,6 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
-bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
- return true;
- }
- return SlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top();
- Address new_top = current_top + size_in_bytes;
- DCHECK_LE(new_top, allocation_info_.limit());
- allocation_info_.set_top(new_top);
- return HeapObject::FromAddress(current_top);
-}
-
-HeapObject PagedSpace::TryAllocateLinearlyAligned(
- int* size_in_bytes, AllocationAlignment alignment) {
- Address current_top = allocation_info_.top();
- int filler_size = Heap::GetFillToAlign(current_top, alignment);
-
- Address new_top = current_top + filler_size + *size_in_bytes;
- if (new_top > allocation_info_.limit()) return HeapObject();
-
- allocation_info_.set_top(new_top);
- if (filler_size > 0) {
- *size_in_bytes += filler_size;
- return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
- HeapObject::FromAddress(current_top),
- filler_size);
- }
-
- return HeapObject::FromAddress(current_top);
-}
-
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
- return AllocationResult::Retry(identity());
- }
- HeapObject object = AllocateLinearly(size_in_bytes);
- DCHECK(!object.is_null());
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return object;
-}
-
-AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
- DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- int allocation_size = size_in_bytes;
- HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- if (object.is_null()) {
- // We don't know exactly how much filler we need to align until space is
- // allocated, so assume the worst case.
- int filler_size = Heap::GetMaximumFillToAlign(alignment);
- allocation_size += filler_size;
- if (!EnsureLinearAllocationArea(allocation_size, origin)) {
- return AllocationResult::Retry(identity());
- }
- allocation_size = size_in_bytes;
- object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- DCHECK(!object.is_null());
- }
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return object;
-}
-
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (top_on_previous_step_ && top() < top_on_previous_step_ &&
- SupportsInlineAllocation()) {
- // Generated code decreased the top() pointer to do folded allocations.
- // The top_on_previous_step_ can be one byte beyond the current page.
- DCHECK_NE(top(), kNullAddress);
- DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
- Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
- top_on_previous_step_ = top();
- }
- size_t bytes_since_last =
- top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
-
- DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
-#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result =
- alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
-#endif
- HeapObject heap_obj;
- if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
- AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
- heap_obj.address(), size_in_bytes);
- StartNextInlineAllocationStep();
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
- }
- return result;
-}
-
-// -----------------------------------------------------------------------------
-// NewSpace
-
-AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- Address top = allocation_info_.top();
- int filler_size = Heap::GetFillToAlign(top, alignment);
- int aligned_size_in_bytes = size_in_bytes + filler_size;
-
- if (allocation_info_.limit() - top <
- static_cast<uintptr_t>(aligned_size_in_bytes)) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- filler_size = Heap::GetFillToAlign(top, alignment);
- aligned_size_in_bytes = size_in_bytes + filler_size;
- }
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + aligned_size_in_bytes);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- if (filler_size > 0) {
- obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
- }
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return obj;
-}
-
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- Address top = allocation_info_.top();
- if (allocation_info_.limit() < top + size_in_bytes) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- }
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + size_in_bytes);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return obj;
-}
-
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (top() < top_on_previous_step_) {
- // Generated code decreased the top() pointer to do folded allocations
- DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
- Page::FromAllocationAreaAddress(top_on_previous_step_));
- top_on_previous_step_ = top();
- }
-#ifdef V8_HOST_ARCH_32_BIT
- return alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-#else
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
-#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes, origin);
-#endif
-}
-
-V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
- base::MutexGuard guard(&mutex_);
- return AllocateRaw(size_in_bytes, alignment, origin);
-}
-
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {