summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap/spaces.cc
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/heap/spaces.cc
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/heap/spaces.cc')
-rw-r--r--chromium/v8/src/heap/spaces.cc3441
1 files changed, 26 insertions, 3415 deletions
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 5e8874fafde..45c1de44c20 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -9,12 +9,9 @@
#include <utility>
#include "src/base/bits.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/macros.h"
-#include "src/base/optional.h"
-#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
-#include "src/execution/vm-state-inl.h"
-#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@@ -26,9 +23,8 @@
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
-#include "src/heap/sweeper.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/free-space-inl.h"
@@ -49,55 +45,6 @@ namespace internal {
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
-// ----------------------------------------------------------------------------
-// PagedSpaceObjectIterator
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
- PagedSpace* space)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(space->first_page(), nullptr),
- current_page_(page_range_.begin()) {
- heap->mark_compact_collector()->EnsureSweepingCompleted();
-}
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
- PagedSpace* space,
- Page* page)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(page),
- current_page_(page_range_.begin()) {
- heap->mark_compact_collector()->EnsureSweepingCompleted();
-#ifdef DEBUG
- AllocationSpace owner = page->owner_identity();
- DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
- owner == CODE_SPACE);
-#endif // DEBUG
-}
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(OffThreadSpace* space)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(space->first_page(), nullptr),
- current_page_(page_range_.begin()) {}
-
-// We have hit the end of the page and should advance to the next block of
-// objects. This happens at the end of the page.
-bool PagedSpaceObjectIterator::AdvanceToNextPage() {
- DCHECK_EQ(cur_addr_, cur_end_);
- if (current_page_ == page_range_.end()) return false;
- Page* cur_page = *(current_page_++);
-
- cur_addr_ = cur_page->area_start();
- cur_end_ = cur_page->area_end();
- DCHECK(cur_page->SweepingDone());
- return true;
-}
-
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
@@ -113,541 +60,6 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
}
}
-static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
- LAZY_INSTANCE_INITIALIZER;
-
-Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- auto it = recently_freed_.find(code_range_size);
- if (it == recently_freed_.end() || it->second.empty()) {
- return reinterpret_cast<Address>(GetRandomMmapAddr());
- }
- Address result = it->second.back();
- it->second.pop_back();
- return result;
-}
-
-void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- recently_freed_[code_range_size].push_back(code_range_start);
-}
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-//
-
-MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
- size_t code_range_size)
- : isolate_(isolate),
- data_page_allocator_(isolate->page_allocator()),
- code_page_allocator_(nullptr),
- capacity_(RoundUp(capacity, Page::kPageSize)),
- size_(0),
- size_executable_(0),
- lowest_ever_allocated_(static_cast<Address>(-1ll)),
- highest_ever_allocated_(kNullAddress),
- unmapper_(isolate->heap(), this) {
- InitializeCodePageAllocator(data_page_allocator_, code_range_size);
-}
-
-void MemoryAllocator::InitializeCodePageAllocator(
- v8::PageAllocator* page_allocator, size_t requested) {
- DCHECK_NULL(code_page_allocator_instance_.get());
-
- code_page_allocator_ = page_allocator;
-
- if (requested == 0) {
- if (!isolate_->RequiresCodeRange()) return;
- // When a target requires the code range feature, we put all code objects
- // in a kMaximalCodeRangeSize range of virtual address space, so that
- // they can call each other with near calls.
- requested = kMaximalCodeRangeSize;
- } else if (requested <= kMinimumCodeRangeSize) {
- requested = kMinimumCodeRangeSize;
- }
-
- const size_t reserved_area =
- kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
- if (requested < (kMaximalCodeRangeSize - reserved_area)) {
- requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
- // Fullfilling both reserved pages requirement and huge code area
- // alignments is not supported (requires re-implementation).
- DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
- }
- DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
-
- Address hint =
- RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
- page_allocator->AllocatePageSize());
- VirtualMemory reservation(
- page_allocator, requested, reinterpret_cast<void*>(hint),
- Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
- if (!reservation.IsReserved()) {
- V8::FatalProcessOutOfMemory(isolate_,
- "CodeRange setup: allocate virtual memory");
- }
- code_range_ = reservation.region();
- isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
-
- // We are sure that we have mapped a block of requested addresses.
- DCHECK_GE(reservation.size(), requested);
- Address base = reservation.address();
-
- // On some platforms, specifically Win64, we need to reserve some pages at
- // the beginning of an executable space. See
- // https://cs.chromium.org/chromium/src/components/crash/content/
- // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
- // for details.
- if (reserved_area > 0) {
- if (!reservation.SetPermissions(base, reserved_area,
- PageAllocator::kReadWrite))
- V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
-
- base += reserved_area;
- }
- Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
- size_t size =
- RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
- MemoryChunk::kPageSize);
- DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
-
- LOG(isolate_,
- NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
- requested));
-
- code_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
- page_allocator, aligned_base, size,
- static_cast<size_t>(MemoryChunk::kAlignment));
- code_page_allocator_ = code_page_allocator_instance_.get();
-}
-
-void MemoryAllocator::TearDown() {
- unmapper()->TearDown();
-
- // Check that spaces were torn down before MemoryAllocator.
- DCHECK_EQ(size_, 0u);
- // TODO(gc) this will be true again when we fix FreeMemory.
- // DCHECK_EQ(0, size_executable_);
- capacity_ = 0;
-
- if (last_chunk_.IsReserved()) {
- last_chunk_.Free();
- }
-
- if (code_page_allocator_instance_.get()) {
- DCHECK(!code_range_.is_empty());
- code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
- code_range_.size());
- code_range_ = base::AddressRegion();
- code_page_allocator_instance_.reset();
- }
- code_page_allocator_ = nullptr;
- data_page_allocator_ = nullptr;
-}
-
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
- public:
- explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate),
- unmapper_(unmapper),
- tracer_(isolate->heap()->tracer()) {}
-
- private:
- void RunInternal() override {
- TRACE_BACKGROUND_GC(tracer_,
- GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- unmapper_->active_unmapping_tasks_--;
- unmapper_->pending_unmapping_tasks_semaphore_.Signal();
- if (FLAG_trace_unmapper) {
- PrintIsolate(unmapper_->heap_->isolate(),
- "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
- }
- }
-
- Unmapper* const unmapper_;
- GCTracer* const tracer_;
- DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
-};
-
-void MemoryAllocator::Unmapper::FreeQueuedChunks() {
- if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
- if (!MakeRoomForNewTasks()) {
- // kMaxUnmapperTasks are already running. Avoid creating any more.
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
- kMaxUnmapperTasks);
- }
- return;
- }
- auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
- task->id());
- }
- DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
- DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
- DCHECK_GE(active_unmapping_tasks_, 0);
- active_unmapping_tasks_++;
- task_ids_[pending_unmapping_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- } else {
- PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- }
-}
-
-void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
- for (int i = 0; i < pending_unmapping_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- TryAbortResult::kTaskAborted) {
- pending_unmapping_tasks_semaphore_.Wait();
- }
- }
- pending_unmapping_tasks_ = 0;
- active_unmapping_tasks_ = 0;
-
- if (FLAG_trace_unmapper) {
- PrintIsolate(
- heap_->isolate(),
- "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
- }
-}
-
-void MemoryAllocator::Unmapper::PrepareForGC() {
- // Free non-regular chunks because they cannot be re-used.
- PerformFreeMemoryOnQueuedNonRegularChunks();
-}
-
-void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
- CancelAndWaitForPendingTasks();
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
-}
-
-bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
- DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
-
- if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
- // All previous unmapping tasks have been run to completion.
- // Finalize those tasks to make room for new ones.
- CancelAndWaitForPendingTasks();
- }
- return pending_unmapping_tasks_ != kMaxUnmapperTasks;
-}
-
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
- MemoryChunk* chunk = nullptr;
- while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
- allocator_->PerformFreeMemory(chunk);
- }
-}
-
-template <MemoryAllocator::Unmapper::FreeMode mode>
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
- MemoryChunk* chunk = nullptr;
- if (FLAG_trace_unmapper) {
- PrintIsolate(
- heap_->isolate(),
- "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
- NumberOfChunks());
- }
- // Regular chunks.
- while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
- bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
- allocator_->PerformFreeMemory(chunk);
- if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
- }
- if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
- // The previous loop uncommitted any pages marked as pooled and added them
- // to the pooled list. In case of kReleasePooled we need to free them
- // though.
- while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
- allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
- }
- }
- PerformFreeMemoryOnQueuedNonRegularChunks();
-}
-
-void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, pending_unmapping_tasks_);
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
- DCHECK(chunks_[i].empty());
- }
-}
-
-size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
- base::MutexGuard guard(&mutex_);
- return chunks_[kRegular].size() + chunks_[kNonRegular].size();
-}
-
-int MemoryAllocator::Unmapper::NumberOfChunks() {
- base::MutexGuard guard(&mutex_);
- size_t result = 0;
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
- result += chunks_[i].size();
- }
- return static_cast<int>(result);
-}
-
-size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
- base::MutexGuard guard(&mutex_);
-
- size_t sum = 0;
- // kPooled chunks are already uncommited. We only have to account for
- // kRegular and kNonRegular chunks.
- for (auto& chunk : chunks_[kRegular]) {
- sum += chunk->size();
- }
- for (auto& chunk : chunks_[kNonRegular]) {
- sum += chunk->size();
- }
- return sum;
-}
-
-bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
- Address base = reservation->address();
- size_t size = reservation->size();
- if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
- return false;
- }
- UpdateAllocatedSpaceLimits(base, base + size);
- return true;
-}
-
-bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
- size_t size = reservation->size();
- if (!reservation->SetPermissions(reservation->address(), size,
- PageAllocator::kNoAccess)) {
- return false;
- }
- return true;
-}
-
-void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
- Address base, size_t size) {
- CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
-}
-
-Address MemoryAllocator::AllocateAlignedMemory(
- size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, void* hint, VirtualMemory* controller) {
- v8::PageAllocator* page_allocator = this->page_allocator(executable);
- DCHECK(commit_size <= reserve_size);
- VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
- if (!reservation.IsReserved()) return kNullAddress;
- Address base = reservation.address();
- size_ += reservation.size();
-
- if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation, base, commit_size,
- reserve_size)) {
- base = kNullAddress;
- }
- } else {
- if (reservation.SetPermissions(base, commit_size,
- PageAllocator::kReadWrite)) {
- UpdateAllocatedSpaceLimits(base, base + commit_size);
- } else {
- base = kNullAddress;
- }
- }
-
- if (base == kNullAddress) {
- // Failed to commit the body. Free the mapping and any partially committed
- // regions inside it.
- reservation.Free();
- size_ -= reserve_size;
- return kNullAddress;
- }
-
- *controller = std::move(reservation);
- return base;
-}
-
-void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
- auto result = code_object_registry_newly_allocated_.insert(code);
- USE(result);
- DCHECK(result.second);
-}
-
-void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
- code_object_registry_already_existing_.push_back(code);
-}
-
-void CodeObjectRegistry::Clear() {
- code_object_registry_already_existing_.clear();
- code_object_registry_newly_allocated_.clear();
-}
-
-void CodeObjectRegistry::Finalize() {
- code_object_registry_already_existing_.shrink_to_fit();
-}
-
-bool CodeObjectRegistry::Contains(Address object) const {
- return (code_object_registry_newly_allocated_.find(object) !=
- code_object_registry_newly_allocated_.end()) ||
- (std::binary_search(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(),
- object));
-}
-
-Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
- Address address) const {
- // Let's first find the object which comes right before address in the vector
- // of already existing code objects.
- Address already_existing_set_ = 0;
- Address newly_allocated_set_ = 0;
- if (!code_object_registry_already_existing_.empty()) {
- auto it =
- std::upper_bound(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(), address);
- if (it != code_object_registry_already_existing_.begin()) {
- already_existing_set_ = *(--it);
- }
- }
-
- // Next, let's find the object which comes right before address in the set
- // of newly allocated code objects.
- if (!code_object_registry_newly_allocated_.empty()) {
- auto it = code_object_registry_newly_allocated_.upper_bound(address);
- if (it != code_object_registry_newly_allocated_.begin()) {
- newly_allocated_set_ = *(--it);
- }
- }
-
- // The code objects which contains address has to be in one of the two
- // data structures.
- DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
-
- // The address which is closest to the given address is the code object.
- return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
- : newly_allocated_set_;
-}
-
-namespace {
-
-PageAllocator::Permission DefaultWritableCodePermissions() {
- return FLAG_jitless ? PageAllocator::kReadWrite
- : PageAllocator::kReadWriteExecute;
-}
-
-} // namespace
-
-MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner,
- VirtualMemory reservation) {
- MemoryChunk* chunk = FromAddress(base);
- DCHECK_EQ(base, chunk->address());
- new (chunk) BasicMemoryChunk(size, area_start, area_end);
-
- chunk->heap_ = heap;
- chunk->set_owner(owner);
- chunk->InitializeReservedMemory();
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
- nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
- nullptr);
- chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
- chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
- chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
- chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
- chunk->page_protection_change_mutex_ = new base::Mutex();
- chunk->write_unprotect_counter_ = 0;
- chunk->mutex_ = new base::Mutex();
- chunk->allocated_bytes_ = chunk->area_size();
- chunk->wasted_memory_ = 0;
- chunk->young_generation_bitmap_ = nullptr;
- chunk->local_tracker_ = nullptr;
-
- chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
- 0;
- chunk->external_backing_store_bytes_
- [ExternalBackingStoreType::kExternalString] = 0;
-
- chunk->categories_ = nullptr;
-
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
- 0);
- if (owner->identity() == RO_SPACE) {
- heap->incremental_marking()
- ->non_atomic_marking_state()
- ->bitmap(chunk)
- ->MarkAllBits();
- chunk->SetFlag(READ_ONLY_HEAP);
- }
-
- if (executable == EXECUTABLE) {
- chunk->SetFlag(IS_EXECUTABLE);
- if (heap->write_protect_code_memory()) {
- chunk->write_unprotect_counter_ =
- heap->code_space_memory_modification_scope_depth();
- } else {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAligned(area_start, page_size));
- size_t area_size = RoundUp(area_end - area_start, page_size);
- CHECK(reservation.SetPermissions(area_start, area_size,
- DefaultWritableCodePermissions()));
- }
- }
-
- chunk->reservation_ = std::move(reservation);
-
- if (owner->identity() == CODE_SPACE) {
- chunk->code_object_registry_ = new CodeObjectRegistry();
- } else {
- chunk->code_object_registry_ = nullptr;
- }
-
- chunk->possibly_empty_buckets_.Initialize();
-
- return chunk;
-}
-
-Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
- Page* page = static_cast<Page*>(chunk);
- DCHECK_EQ(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
- page->area_size());
- // Make sure that categories are initialized before freeing the area.
- page->ResetAllocationStatistics();
- page->SetOldGenerationPageFlags(!is_off_thread_space() &&
- heap()->incremental_marking()->IsMarking());
- page->AllocateFreeListCategories();
- page->InitializeFreeListCategories();
- page->list_node().Initialize();
- page->InitializationMemoryFence();
- return page;
-}
-
-Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
- bool in_to_space = (id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
- Page* page = static_cast<Page*>(chunk);
- page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- page->AllocateLocalTracker();
- page->list_node().Initialize();
-#ifdef ENABLE_MINOR_MC
- if (FLAG_minor_mc) {
- page->AllocateYoungGenerationBitmap();
- heap()
- ->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->ClearLiveness(page);
- }
-#endif // ENABLE_MINOR_MC
- page->InitializationMemoryFence();
- return page;
-}
-
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
@@ -718,169 +130,6 @@ void Page::MergeOldToNewRememberedSets() {
sweeping_slot_set_ = nullptr;
}
-size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
- return size();
- return high_water_mark_;
-}
-
-bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
-
-bool MemoryChunk::InLargeObjectSpace() const {
- return owner_identity() == LO_SPACE;
-}
-
-MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable,
- Space* owner) {
- DCHECK_LE(commit_area_size, reserve_area_size);
-
- size_t chunk_size;
- Heap* heap = isolate_->heap();
- Address base = kNullAddress;
- VirtualMemory reservation;
- Address area_start = kNullAddress;
- Address area_end = kNullAddress;
- void* address_hint =
- AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
-
- //
- // MemoryChunk layout:
- //
- // Executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- base + CodePageGuardStartOffset
- // | Guard |
- // +----------------------------+<- area_start_
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- aligned at OS page boundary
- // | Guard |
- // +----------------------------+<- base + chunk_size
- //
- // Non-executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- area_start_ (base + area_start_)
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- base + chunk_size
- //
-
- if (executable == EXECUTABLE) {
- chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
- reserve_area_size +
- MemoryChunkLayout::CodePageGuardSize(),
- GetCommitPageSize());
-
- // Size of header (not executable) plus area (executable).
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
- if (base == kNullAddress) return nullptr;
- // Update executable memory size.
- size_executable_ += reservation.size();
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
- ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
- commit_area_size, kZapValue);
- }
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
- area_end = area_start + commit_area_size;
- } else {
- chunk_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
- GetCommitPageSize());
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
-
- if (base == kNullAddress) return nullptr;
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(
- base,
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- kZapValue);
- }
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
- area_end = area_start + commit_area_size;
- }
-
- // Use chunk_size for statistics because we assume that treat reserved but
- // not-yet committed memory regions of chunks as allocated.
- LOG(isolate_,
- NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
-
- // We cannot use the last chunk in the address space because we would
- // overflow when comparing top and limit if this chunk is used for a
- // linear allocation area.
- if ((base + chunk_size) == 0u) {
- CHECK(!last_chunk_.IsReserved());
- last_chunk_ = std::move(reservation);
- UncommitMemory(&last_chunk_);
- size_ -= chunk_size;
- if (executable == EXECUTABLE) {
- size_executable_ -= chunk_size;
- }
- CHECK(last_chunk_.IsReserved());
- return AllocateChunk(reserve_area_size, commit_area_size, executable,
- owner);
- }
-
- MemoryChunk* chunk =
- MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- executable, owner, std::move(reservation));
-
- if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
- return chunk;
-}
-
-void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
- if (is_marking) {
- SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::INCREMENTAL_MARKING);
- } else {
- ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
- }
-}
-
-void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
- SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- if (is_marking) {
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::INCREMENTAL_MARKING);
- } else {
- ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
- }
-}
-
-void Page::ResetAllocationStatistics() {
- allocated_bytes_ = area_size();
- wasted_memory_ = 0;
-}
-
void Page::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(this);
@@ -972,6 +221,19 @@ void Page::CreateBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
}
+void Page::CreateBlackAreaBackground(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_LT(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ IncrementalMarking::AtomicMarkingState* marking_state =
+ heap()->incremental_marking()->atomic_marking_state();
+ marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ heap()->incremental_marking()->IncrementLiveBytesBackground(
+ this, static_cast<intptr_t>(end - start));
+}
+
void Page::DestroyBlackArea(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
@@ -984,441 +246,17 @@ void Page::DestroyBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
}
-void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
- size_t bytes_to_free,
- Address new_area_end) {
- VirtualMemory* reservation = chunk->reserved_memory();
- DCHECK(reservation->IsReserved());
- chunk->set_size(chunk->size() - bytes_to_free);
- chunk->set_area_end(new_area_end);
- if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- // Add guard page at the end.
- size_t page_size = GetCommitPageSize();
- DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
- DCHECK_EQ(chunk->address() + chunk->size(),
- chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
- reservation->SetPermissions(chunk->area_end(), page_size,
- PageAllocator::kNoAccess);
- }
- // On e.g. Windows, a reservation may be larger than a page and releasing
- // partially starting at |start_free| will also release the potentially
- // unused part behind the current page.
- const size_t released_bytes = reservation->Release(start_free);
- DCHECK_GE(size_, released_bytes);
- size_ -= released_bytes;
-}
-
-void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
- DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
- VirtualMemory* reservation = chunk->reserved_memory();
- const size_t size =
- reservation->IsReserved() ? reservation->size() : chunk->size();
- DCHECK_GE(size_, static_cast<size_t>(size));
- size_ -= size;
- if (chunk->executable() == EXECUTABLE) {
- DCHECK_GE(size_executable_, size);
- size_executable_ -= size;
- }
-
- if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
- chunk->SetFlag(MemoryChunk::UNREGISTERED);
-}
-
-void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
- DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- UnregisterMemory(chunk);
- isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
- chunk->IsEvacuationCandidate());
- chunk->SetFlag(MemoryChunk::PRE_FREED);
-}
-
-void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
- DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
- DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- chunk->ReleaseAllAllocatedMemory();
-
- VirtualMemory* reservation = chunk->reserved_memory();
- if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
- UncommitMemory(reservation);
- } else {
- if (reservation->IsReserved()) {
- reservation->Free();
- } else {
- // Only read-only pages can have non-initialized reservation object.
- DCHECK_EQ(RO_SPACE, chunk->owner_identity());
- FreeMemory(page_allocator(chunk->executable()), chunk->address(),
- chunk->size());
- }
- }
-}
-
-template <MemoryAllocator::FreeMode mode>
-void MemoryAllocator::Free(MemoryChunk* chunk) {
- switch (mode) {
- case kFull:
- PreFreeMemory(chunk);
- PerformFreeMemory(chunk);
- break;
- case kAlreadyPooled:
- // Pooled pages cannot be touched anymore as their memory is uncommitted.
- // Pooled pages are not-executable.
- FreeMemory(data_page_allocator(), chunk->address(),
- static_cast<size_t>(MemoryChunk::kPageSize));
- break;
- case kPooledAndQueue:
- DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
- DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
- chunk->SetFlag(MemoryChunk::POOLED);
- V8_FALLTHROUGH;
- case kPreFreeAndQueue:
- PreFreeMemory(chunk);
- // The chunks added to this queue will be freed by a concurrent thread.
- unmapper()->AddMemoryChunkSafe(chunk);
- break;
- }
-}
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kFull>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
-
-template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
- Executability executable) {
- MemoryChunk* chunk = nullptr;
- if (alloc_mode == kPooled) {
- DCHECK_EQ(size, static_cast<size_t>(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- owner->identity())));
- DCHECK_EQ(executable, NOT_EXECUTABLE);
- chunk = AllocatePagePooled(owner);
- }
- if (chunk == nullptr) {
- chunk = AllocateChunk(size, size, executable, owner);
- }
- if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk);
-}
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
-LargePage* MemoryAllocator::AllocateLargePage(size_t size,
- LargeObjectSpace* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
- if (chunk == nullptr) return nullptr;
- return LargePage::Initialize(isolate_->heap(), chunk, executable);
-}
-
-template <typename SpaceType>
-MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
- MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
- if (chunk == nullptr) return nullptr;
- const int size = MemoryChunk::kPageSize;
- const Address start = reinterpret_cast<Address>(chunk);
- const Address area_start =
- start +
- MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
- const Address area_end = start + size;
- // Pooled pages are always regular data pages.
- DCHECK_NE(CODE_SPACE, owner->identity());
- VirtualMemory reservation(data_page_allocator(), start, size);
- if (!CommitMemory(&reservation)) return nullptr;
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size, kZapValue);
- }
- MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
- NOT_EXECUTABLE, owner, std::move(reservation));
- size_ += size;
- return chunk;
-}
-
-void MemoryAllocator::ZapBlock(Address start, size_t size,
- uintptr_t zap_value) {
- DCHECK(IsAligned(start, kTaggedSize));
- DCHECK(IsAligned(size, kTaggedSize));
- MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
- size >> kTaggedSizeLog2);
-}
-
-intptr_t MemoryAllocator::GetCommitPageSize() {
- if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
- return FLAG_v8_os_page_size * KB;
- } else {
- return CommitPageSize();
- }
-}
-
-base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
- size_t size) {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- if (size < page_size + FreeSpace::kSize) {
- return base::AddressRegion(0, 0);
- }
- Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
- Address discardable_end = RoundDown(addr + size, page_size);
- if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
- return base::AddressRegion(discardable_start,
- discardable_end - discardable_start);
-}
-
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
- size_t commit_size,
- size_t reserved_size) {
- const size_t page_size = GetCommitPageSize();
- // All addresses and sizes must be aligned to the commit page size.
- DCHECK(IsAligned(start, page_size));
- DCHECK_EQ(0, commit_size % page_size);
- DCHECK_EQ(0, reserved_size % page_size);
- const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
- const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
- const size_t code_area_offset =
- MemoryChunkLayout::ObjectStartOffsetInCodePage();
- // reserved_size includes two guard regions, commit_size does not.
- DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
- const Address pre_guard_page = start + pre_guard_offset;
- const Address code_area = start + code_area_offset;
- const Address post_guard_page = start + reserved_size - guard_size;
- // Commit the non-executable header, from start to pre-code guard page.
- if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
- // Create the pre-code guard page, following the header.
- if (vm->SetPermissions(pre_guard_page, page_size,
- PageAllocator::kNoAccess)) {
- // Commit the executable code body.
- if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
- PageAllocator::kReadWrite)) {
- // Create the post-code guard page.
- if (vm->SetPermissions(post_guard_page, page_size,
- PageAllocator::kNoAccess)) {
- UpdateAllocatedSpaceLimits(start, code_area + commit_size);
- return true;
- }
- vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
- }
- }
- vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
- }
- return false;
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryChunk implementation
-
-void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
- if (mutex_ != nullptr) {
- delete mutex_;
- mutex_ = nullptr;
- }
- if (page_protection_change_mutex_ != nullptr) {
- delete page_protection_change_mutex_;
- page_protection_change_mutex_ = nullptr;
- }
- if (code_object_registry_ != nullptr) {
- delete code_object_registry_;
- code_object_registry_ = nullptr;
- }
-
- possibly_empty_buckets_.Release();
- ReleaseSlotSet<OLD_TO_NEW>();
- ReleaseSweepingSlotSet();
- ReleaseSlotSet<OLD_TO_OLD>();
- ReleaseTypedSlotSet<OLD_TO_NEW>();
- ReleaseTypedSlotSet<OLD_TO_OLD>();
- ReleaseInvalidatedSlots<OLD_TO_NEW>();
- ReleaseInvalidatedSlots<OLD_TO_OLD>();
-
- if (local_tracker_ != nullptr) ReleaseLocalTracker();
- if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
-
- if (!IsLargePage()) {
- Page* page = static_cast<Page*>(this);
- page->ReleaseFreeListCategories();
- }
-}
-
-void MemoryChunk::ReleaseAllAllocatedMemory() {
- ReleaseAllocatedMemoryNeededForWritableChunk();
- if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
-}
-
-template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
-template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-SlotSet* MemoryChunk::AllocateSlotSet() {
- return AllocateSlotSet(&slot_set_[type]);
-}
-
-SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
- return AllocateSlotSet(&sweeping_slot_set_);
-}
-
-SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
- SlotSet* new_slot_set = SlotSet::Allocate(buckets());
- SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
- slot_set, nullptr, new_slot_set);
- if (old_slot_set != nullptr) {
- SlotSet::Delete(new_slot_set, buckets());
- new_slot_set = old_slot_set;
- }
- DCHECK(new_slot_set);
- return new_slot_set;
-}
-
-template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseSlotSet() {
- ReleaseSlotSet(&slot_set_[type]);
-}
-
-void MemoryChunk::ReleaseSweepingSlotSet() {
- ReleaseSlotSet(&sweeping_slot_set_);
-}
-
-void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
- if (*slot_set) {
- SlotSet::Delete(*slot_set, buckets());
- *slot_set = nullptr;
- }
-}
-
-template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
-template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
- TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
- TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
- &typed_slot_set_[type], nullptr, typed_slot_set);
- if (old_value != nullptr) {
- delete typed_slot_set;
- typed_slot_set = old_value;
- }
- DCHECK(typed_slot_set);
- return typed_slot_set;
-}
-
-template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseTypedSlotSet() {
- TypedSlotSet* typed_slot_set = typed_slot_set_[type];
- if (typed_slot_set) {
- typed_slot_set_[type] = nullptr;
- delete typed_slot_set;
- }
-}
-
-template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
-template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
- DCHECK_NULL(invalidated_slots_[type]);
- invalidated_slots_[type] = new InvalidatedSlots();
- return invalidated_slots_[type];
-}
-
-template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseInvalidatedSlots() {
- if (invalidated_slots_[type]) {
- delete invalidated_slots_[type];
- invalidated_slots_[type] = nullptr;
- }
-}
-
-template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
-template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
-
-template <RememberedSetType type>
-void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
- bool skip_slot_recording;
-
- if (type == OLD_TO_NEW) {
- skip_slot_recording = InYoungGeneration();
- } else {
- skip_slot_recording = ShouldSkipEvacuationSlotRecording();
- }
-
- if (skip_slot_recording) {
- return;
- }
-
- if (invalidated_slots<type>() == nullptr) {
- AllocateInvalidatedSlots<type>();
- }
-
- invalidated_slots<type>()->insert(object);
-}
-
-void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
- if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
- if (heap()->incremental_marking()->IsCompacting()) {
- // We cannot check slot_set_[OLD_TO_OLD] here, since the
- // concurrent markers might insert slots concurrently.
- RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
- }
-
- if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
- RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
-}
-
-template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
- HeapObject object);
-template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
- HeapObject object);
-
-template <RememberedSetType type>
-bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
- if (invalidated_slots<type>() == nullptr) {
- return false;
- }
- return invalidated_slots<type>()->find(object) !=
- invalidated_slots<type>()->end();
-}
-
-void MemoryChunk::ReleaseLocalTracker() {
- DCHECK_NOT_NULL(local_tracker_);
- delete local_tracker_;
- local_tracker_ = nullptr;
-}
-
-void MemoryChunk::AllocateYoungGenerationBitmap() {
- DCHECK_NULL(young_generation_bitmap_);
- young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
-}
-
-void MemoryChunk::ReleaseYoungGenerationBitmap() {
- DCHECK_NOT_NULL(young_generation_bitmap_);
- free(young_generation_bitmap_);
- young_generation_bitmap_ = nullptr;
+void Page::DestroyBlackAreaBackground(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_LT(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ IncrementalMarking::AtomicMarkingState* marking_state =
+ heap()->incremental_marking()->atomic_marking_state();
+ marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ heap()->incremental_marking()->IncrementLiveBytesBackground(
+ this, -static_cast<intptr_t>(end - start));
}
// -----------------------------------------------------------------------------
@@ -1481,293 +319,6 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
return next_step;
}
-PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
- Executability executable, FreeList* free_list,
- LocalSpaceKind local_space_kind)
- : SpaceWithLinearArea(heap, space, free_list),
- executable_(executable),
- local_space_kind_(local_space_kind) {
- area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
- accounting_stats_.Clear();
-}
-
-void PagedSpace::TearDown() {
- while (!memory_chunk_list_.Empty()) {
- MemoryChunk* chunk = memory_chunk_list_.front();
- memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
- }
- accounting_stats_.Clear();
-}
-
-void PagedSpace::RefillFreeList() {
- // Any PagedSpace might invoke RefillFreeList. We filter all but our old
- // generation spaces out.
- if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
- identity() != MAP_SPACE && identity() != RO_SPACE) {
- return;
- }
- DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
- DCHECK_IMPLIES(is_local_space(), is_compaction_space());
- DCHECK(!IsDetached());
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- size_t added = 0;
-
- {
- Page* p = nullptr;
- while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
- // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
- // entries here to make them unavailable for allocations.
- if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
- p->ForAllFreeListCategories([this](FreeListCategory* category) {
- category->Reset(free_list());
- });
- }
-
- // Also merge old-to-new remembered sets if not scavenging because of
- // data races: One thread might iterate remembered set, while another
- // thread merges them.
- if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
- p->MergeOldToNewRememberedSets();
- }
-
- // Only during compaction pages can actually change ownership. This is
- // safe because there exists no other competing action on the page links
- // during compaction.
- if (is_compaction_space()) {
- DCHECK_NE(this, p->owner());
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
- base::MutexGuard guard(owner->mutex());
- owner->RefineAllocatedBytesAfterSweeping(p);
- owner->RemovePage(p);
- added += AddPage(p);
- } else {
- base::MutexGuard guard(mutex());
- DCHECK_EQ(this, p->owner());
- RefineAllocatedBytesAfterSweeping(p);
- added += RelinkFreeListCategories(p);
- }
- added += p->wasted_memory();
- if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
- }
- }
-}
-
-void OffThreadSpace::RefillFreeList() {
- // We should never try to refill the free list in off-thread space, because
- // we know it will always be fully linear.
- UNREACHABLE();
-}
-
-void PagedSpace::MergeLocalSpace(LocalSpace* other) {
- base::MutexGuard guard(mutex());
-
- DCHECK(identity() == other->identity());
-
- // Unmerged fields:
- // area_size_
- other->FreeLinearAllocationArea();
-
- for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
- i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
- allocations_origins_[i] += other->allocations_origins_[i];
- }
-
- // The linear allocation area of {other} should be destroyed now.
- DCHECK_EQ(kNullAddress, other->top());
- DCHECK_EQ(kNullAddress, other->limit());
-
- bool merging_from_off_thread = other->is_off_thread_space();
-
- // Move over pages.
- for (auto it = other->begin(); it != other->end();) {
- Page* p = *(it++);
-
- if (merging_from_off_thread) {
- DCHECK_NULL(p->sweeping_slot_set());
- p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- if (heap()->incremental_marking()->black_allocation()) {
- p->CreateBlackArea(p->area_start(), p->HighWaterMark());
- }
- } else {
- p->MergeOldToNewRememberedSets();
- }
-
- // Relinking requires the category to be unlinked.
- other->RemovePage(p);
- AddPage(p);
- // These code pages were allocated by the CompactionSpace.
- if (identity() == CODE_SPACE) heap()->isolate()->AddCodeMemoryChunk(p);
- DCHECK_IMPLIES(
- !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
- p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
-
- // TODO(leszeks): Here we should allocation step, but:
- // 1. Allocation groups are currently not handled properly by the sampling
- // allocation profiler, and
- // 2. Observers might try to take the space lock, which isn't reentrant.
- // We'll have to come up with a better solution for allocation stepping
- // before shipping, which will likely be using LocalHeap.
- }
-
- DCHECK_EQ(0u, other->Size());
- DCHECK_EQ(0u, other->Capacity());
-}
-
-size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- size_t size = 0;
- for (Page* page : *this) {
- size += page->CommittedPhysicalMemory();
- }
- return size;
-}
-
-bool PagedSpace::ContainsSlow(Address addr) {
- Page* p = Page::FromAddress(addr);
- for (Page* page : *this) {
- if (page == p) return true;
- }
- return false;
-}
-
-void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
- CHECK(page->SweepingDone());
- auto marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- // The live_byte on the page was accounted in the space allocated
- // bytes counter. After sweeping allocated_bytes() contains the
- // accurate live byte count on the page.
- size_t old_counter = marking_state->live_bytes(page);
- size_t new_counter = page->allocated_bytes();
- DCHECK_GE(old_counter, new_counter);
- if (old_counter > new_counter) {
- DecreaseAllocatedBytes(old_counter - new_counter, page);
- // Give the heap a chance to adjust counters in response to the
- // more precise and smaller old generation size.
- heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
- }
- marking_state->SetLiveBytes(page, 0);
-}
-
-Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
- base::MutexGuard guard(mutex());
- Page* page = free_list()->GetPageForSize(size_in_bytes);
- if (!page) return nullptr;
- RemovePage(page);
- return page;
-}
-
-size_t PagedSpace::AddPage(Page* page) {
- CHECK(page->SweepingDone());
- page->set_owner(this);
- memory_chunk_list_.PushBack(page);
- AccountCommitted(page->size());
- IncreaseCapacity(page->area_size());
- IncreaseAllocatedBytes(page->allocated_bytes(), page);
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
- return RelinkFreeListCategories(page);
-}
-
-void PagedSpace::RemovePage(Page* page) {
- CHECK(page->SweepingDone());
- memory_chunk_list_.Remove(page);
- UnlinkFreeListCategories(page);
- DecreaseAllocatedBytes(page->allocated_bytes(), page);
- DecreaseCapacity(page->area_size());
- AccountUncommitted(page->size());
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
- size_t unused = page->ShrinkToHighWaterMark();
- accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
- AccountUncommitted(unused);
- return unused;
-}
-
-void PagedSpace::ResetFreeList() {
- for (Page* page : *this) {
- free_list_->EvictFreeListItems(page);
- }
- DCHECK(free_list_->IsEmpty());
-}
-
-void PagedSpace::ShrinkImmortalImmovablePages() {
- DCHECK(!heap()->deserialization_complete());
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- FreeLinearAllocationArea();
- ResetFreeList();
- for (Page* page : *this) {
- DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
- ShrinkPageToHighWaterMark(page);
- }
-}
-
-bool PagedSpace::Expand() {
- // Always lock against the main space as we can only adjust capacity and
- // pages concurrently for the main paged space.
- base::MutexGuard guard(heap()->paged_space(identity())->mutex());
-
- const int size = AreaSize();
-
- if (!heap()->CanExpandOldGeneration(size)) return false;
-
- Page* page =
- heap()->memory_allocator()->AllocatePage(size, this, executable());
- if (page == nullptr) return false;
- // Pages created during bootstrapping may contain immortal immovable objects.
- if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
- AddPage(page);
- // If this is a non-compaction code space, this is a previously unseen page.
- if (identity() == CODE_SPACE && !is_compaction_space()) {
- heap()->isolate()->AddCodeMemoryChunk(page);
- }
- Free(page->area_start(), page->area_size(),
- SpaceAccountingMode::kSpaceAccounted);
- heap()->NotifyOldGenerationExpansion();
- return true;
-}
-
-int PagedSpace::CountTotalPages() {
- int count = 0;
- for (Page* page : *this) {
- count++;
- USE(page);
- }
- return count;
-}
-
-void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
- SetTopAndLimit(top, limit);
- if (top != kNullAddress && top != limit && !is_off_thread_space() &&
- heap()->incremental_marking()->black_allocation()) {
- Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
- }
-}
-
-void PagedSpace::DecreaseLimit(Address new_limit) {
- Address old_limit = limit();
- DCHECK_LE(top(), new_limit);
- DCHECK_GE(old_limit, new_limit);
- if (new_limit != old_limit) {
- SetTopAndLimit(top(), new_limit);
- Free(new_limit, old_limit - new_limit,
- SpaceAccountingMode::kSpaceAccounted);
- if (heap()->incremental_marking()->black_allocation()) {
- Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
- old_limit);
- }
- }
-}
-
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
@@ -1802,560 +353,6 @@ void SpaceWithLinearArea::PrintAllocationsOrigins() {
allocations_origins_[2]);
}
-void PagedSpace::MarkLinearAllocationAreaBlack() {
- DCHECK(heap()->incremental_marking()->black_allocation());
- Address current_top = top();
- Address current_limit = limit();
- if (current_top != kNullAddress && current_top != current_limit) {
- Page::FromAllocationAreaAddress(current_top)
- ->CreateBlackArea(current_top, current_limit);
- }
-}
-
-void PagedSpace::UnmarkLinearAllocationArea() {
- Address current_top = top();
- Address current_limit = limit();
- if (current_top != kNullAddress && current_top != current_limit) {
- Page::FromAllocationAreaAddress(current_top)
- ->DestroyBlackArea(current_top, current_limit);
- }
-}
-
-void PagedSpace::FreeLinearAllocationArea() {
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- Address current_top = top();
- Address current_limit = limit();
- if (current_top == kNullAddress) {
- DCHECK_EQ(kNullAddress, current_limit);
- return;
- }
-
- if (!is_off_thread_space() &&
- heap()->incremental_marking()->black_allocation()) {
- Page* page = Page::FromAllocationAreaAddress(current_top);
-
- // Clear the bits in the unused black area.
- if (current_top != current_limit) {
- IncrementalMarking::MarkingState* marking_state =
- heap()->incremental_marking()->marking_state();
- marking_state->bitmap(page)->ClearRange(
- page->AddressToMarkbitIndex(current_top),
- page->AddressToMarkbitIndex(current_limit));
- marking_state->IncrementLiveBytes(
- page, -static_cast<int>(current_limit - current_top));
- }
- }
-
- InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
- SetTopAndLimit(kNullAddress, kNullAddress);
- DCHECK_GE(current_limit, current_top);
-
- // The code page of the linear allocation area needs to be unprotected
- // because we are going to write a filler into that memory area below.
- if (identity() == CODE_SPACE) {
- heap()->UnprotectAndRegisterMemoryChunk(
- MemoryChunk::FromAddress(current_top));
- }
- Free(current_top, current_limit - current_top,
- SpaceAccountingMode::kSpaceAccounted);
-}
-
-void PagedSpace::ReleasePage(Page* page) {
- DCHECK_EQ(
- 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
- page));
- DCHECK_EQ(page->owner(), this);
-
- free_list_->EvictFreeListItems(page);
-
- if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
- DCHECK(!top_on_previous_step_);
- allocation_info_.Reset(kNullAddress, kNullAddress);
- }
-
- heap()->isolate()->RemoveCodeMemoryChunk(page);
-
- AccountUncommitted(page->size());
- accounting_stats_.DecreaseCapacity(page->area_size());
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
-}
-
-void PagedSpace::SetReadable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadable();
- }
-}
-
-void PagedSpace::SetReadAndExecutable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndExecutable();
- }
-}
-
-void PagedSpace::SetReadAndWritable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
- }
-}
-
-std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
- return std::unique_ptr<ObjectIterator>(
- new PagedSpaceObjectIterator(heap, this));
-}
-
-bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
- size_t size_in_bytes, AllocationOrigin origin) {
- DCHECK(IsAligned(size_in_bytes, kTaggedSize));
- DCHECK_LE(top(), limit());
-#ifdef DEBUG
- if (top() != limit()) {
- DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
- }
-#endif
- // Don't free list allocate if there is linear space available.
- DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
-
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- FreeLinearAllocationArea();
-
- if (!is_local_space()) {
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap()->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
- }
-
- size_t new_node_size = 0;
- FreeSpace new_node =
- free_list_->Allocate(size_in_bytes, &new_node_size, origin);
- if (new_node.is_null()) return false;
- DCHECK_GE(new_node_size, size_in_bytes);
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- Page* page = Page::FromHeapObject(new_node);
- IncreaseAllocatedBytes(new_node_size, page);
-
- Address start = new_node.address();
- Address end = new_node.address() + new_node_size;
- Address limit = ComputeLimit(start, end, size_in_bytes);
- DCHECK_LE(limit, end);
- DCHECK_LE(size_in_bytes, limit - start);
- if (limit != end) {
- if (identity() == CODE_SPACE) {
- heap()->UnprotectAndRegisterMemoryChunk(page);
- }
- Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
- }
- SetLinearAllocationArea(start, limit);
-
- return true;
-}
-
-base::Optional<std::pair<Address, size_t>>
-PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(!is_local_space() && identity() == OLD_SPACE);
- DCHECK_EQ(origin, AllocationOrigin::kRuntime);
- base::MutexGuard lock(&allocation_mutex_);
-
- auto result = TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
- RefillFreeList();
-
- // Retry the free list allocation.
- auto result = TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
-
- Sweeper::FreeSpaceMayContainInvalidatedSlots
- invalidated_slots_in_free_space =
- Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- const int kMaxPagesToSweep = 1;
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
- invalidated_slots_in_free_space);
- RefillFreeList();
- if (static_cast<size_t>(max_freed) >= min_size_in_bytes)
- return TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- }
-
- if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
- Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (min_size_in_bytes <= free_list_->Available()));
- return TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- }
-
- // TODO(dinfuehr): Complete sweeping here and try allocation again.
-
- return {};
-}
-
-base::Optional<std::pair<Address, size_t>>
-PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
- DCHECK_EQ(identity(), OLD_SPACE);
-
- size_t new_node_size = 0;
- FreeSpace new_node =
- free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
- if (new_node.is_null()) return {};
- DCHECK_GE(new_node_size, min_size_in_bytes);
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- Page* page = Page::FromHeapObject(new_node);
- IncreaseAllocatedBytes(new_node_size, page);
-
- heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
-
- size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
-
- Address start = new_node.address();
- Address end = new_node.address() + new_node_size;
- Address limit = new_node.address() + used_size_in_bytes;
- DCHECK_LE(limit, end);
- DCHECK_LE(min_size_in_bytes, limit - start);
- if (limit != end) {
- Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
- }
-
- return std::make_pair(start, used_size_in_bytes);
-}
-
-#ifdef DEBUG
-void PagedSpace::Print() {}
-#endif
-
-#ifdef VERIFY_HEAP
-void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
- bool allocation_pointer_found_in_space =
- (allocation_info_.top() == allocation_info_.limit());
- size_t external_space_bytes[kNumTypes];
- size_t external_page_bytes[kNumTypes];
-
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- for (Page* page : *this) {
-#ifdef V8_SHARED_RO_HEAP
- if (identity() == RO_SPACE) {
- CHECK_NULL(page->owner());
- } else {
- CHECK_EQ(page->owner(), this);
- }
-#else
- CHECK_EQ(page->owner(), this);
-#endif
-
- for (int i = 0; i < kNumTypes; i++) {
- external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
- allocation_pointer_found_in_space = true;
- }
- CHECK(page->SweepingDone());
- PagedSpaceObjectIterator it(isolate->heap(), this, page);
- Address end_of_previous_object = page->area_start();
- Address top = page->area_end();
-
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- CHECK(end_of_previous_object <= object.address());
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map map = object.map();
- CHECK(map.IsMap());
- CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->map_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- if (!FLAG_verify_heap_skip_remembered_set) {
- isolate->heap()->VerifyRememberedSetFor(object);
- }
-
- // All the interior pointers should be contained in the heap.
- int size = object.Size();
- object.IterateBody(map, size, visitor);
- CHECK(object.address() + size <= top);
- end_of_previous_object = object.address() + size;
-
- if (object.IsExternalString()) {
- ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object.IsJSArrayBuffer()) {
- JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
- if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size =
- ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
- ->PerIsolateAccountingLength();
- external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
- }
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
- external_space_bytes[t] += external_page_bytes[t];
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- i == ExternalBackingStoreType::kArrayBuffer)
- continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
- CHECK(allocation_pointer_found_in_space);
-
- if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
-#ifdef DEBUG
- VerifyCountersAfterSweeping(isolate->heap());
-#endif
-}
-
-void PagedSpace::VerifyLiveBytes() {
- DCHECK_NE(identity(), RO_SPACE);
- IncrementalMarking::MarkingState* marking_state =
- heap()->incremental_marking()->marking_state();
- for (Page* page : *this) {
- CHECK(page->SweepingDone());
- PagedSpaceObjectIterator it(heap(), this, page);
- int black_size = 0;
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- // All the interior pointers should be contained in the heap.
- if (marking_state->IsBlack(object)) {
- black_size += object.Size();
- }
- }
- CHECK_LE(black_size, marking_state->live_bytes(page));
- }
-}
-#endif // VERIFY_HEAP
-
-#ifdef DEBUG
-void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
- size_t total_capacity = 0;
- size_t total_allocated = 0;
- for (Page* page : *this) {
- DCHECK(page->SweepingDone());
- total_capacity += page->area_size();
- PagedSpaceObjectIterator it(heap, this, page);
- size_t real_allocated = 0;
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- if (!object.IsFreeSpaceOrFiller()) {
- real_allocated += object.Size();
- }
- }
- total_allocated += page->allocated_bytes();
- // The real size can be smaller than the accounted size if array trimming,
- // object slack tracking happened after sweeping.
- DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
- DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
- }
- DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
- DCHECK_EQ(total_allocated, accounting_stats_.Size());
-}
-
-void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
- // We need to refine the counters on pages that are already swept and have
- // not been moved over to the actual space. Otherwise, the AccountingStats
- // are just an over approximation.
- RefillFreeList();
-
- size_t total_capacity = 0;
- size_t total_allocated = 0;
- auto marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (Page* page : *this) {
- size_t page_allocated =
- page->SweepingDone()
- ? page->allocated_bytes()
- : static_cast<size_t>(marking_state->live_bytes(page));
- total_capacity += page->area_size();
- total_allocated += page_allocated;
- DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
- }
- DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
- DCHECK_EQ(total_allocated, accounting_stats_.Size());
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// NewSpace implementation
-
-NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
- size_t initial_semispace_capacity,
- size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace) {
- DCHECK(initial_semispace_capacity <= max_semispace_capacity);
-
- to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
- from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
- if (!to_space_.Commit()) {
- V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
- }
- DCHECK(!from_space_.is_committed()); // No need to use memory yet.
- ResetLinearAllocationArea();
-}
-
-void NewSpace::TearDown() {
- allocation_info_.Reset(kNullAddress, kNullAddress);
-
- to_space_.TearDown();
- from_space_.TearDown();
-}
-
-void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
-
-
-void NewSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
- DCHECK(TotalCapacity() < MaximumCapacity());
- size_t new_capacity =
- Min(MaximumCapacity(),
- static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
- if (to_space_.GrowTo(new_capacity)) {
- // Only grow from space if we managed to grow to-space.
- if (!from_space_.GrowTo(new_capacity)) {
- // If we managed to grow to-space but couldn't grow from-space,
- // attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- FATAL("inconsistent state");
- }
- }
- }
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::Shrink() {
- size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
- size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
- if (rounded_new_capacity < TotalCapacity() &&
- to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from-space if we managed to shrink to-space.
- from_space_.Reset();
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to-space but couldn't shrink from
- // space, attempt to grow to-space again.
- if (!to_space_.GrowTo(from_space_.current_capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- FATAL("inconsistent state");
- }
- }
- }
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-bool NewSpace::Rebalance() {
- // Order here is important to make use of the page pool.
- return to_space_.EnsureCurrentCapacity() &&
- from_space_.EnsureCurrentCapacity();
-}
-
-bool SemiSpace::EnsureCurrentCapacity() {
- if (is_committed()) {
- const int expected_pages =
- static_cast<int>(current_capacity_ / Page::kPageSize);
- MemoryChunk* current_page = first_page();
- int actual_pages = 0;
-
- // First iterate through the pages list until expected pages if so many
- // pages exist.
- while (current_page != nullptr && actual_pages < expected_pages) {
- actual_pages++;
- current_page = current_page->list_node().next();
- }
-
- // Free all overallocated pages which are behind current_page.
- while (current_page) {
- MemoryChunk* next_current = current_page->list_node().next();
- memory_chunk_list_.Remove(current_page);
- // Clear new space flags to avoid this page being treated as a new
- // space page that is potentially being swept.
- current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- current_page);
- current_page = next_current;
- }
-
- // Add more pages if we have less than expected_pages.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- while (actual_pages < expected_pages) {
- actual_pages++;
- current_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (current_page == nullptr) return false;
- DCHECK_NOT_NULL(current_page);
- memory_chunk_list_.PushBack(current_page);
- marking_state->ClearLiveness(current_page);
- current_page->SetFlags(first_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
- heap()->CreateFillerObjectAt(current_page->area_start(),
- static_cast<int>(current_page->area_size()),
- ClearRecordedSlots::kNo);
- }
- }
- return true;
-}
-
LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
if (IsValid()) {
MakeIterable();
@@ -2400,110 +397,6 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this;
}
-
-void NewSpace::UpdateLinearAllocationArea() {
- // Make sure there is no unaccounted allocations.
- DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
-
- Address new_top = to_space_.page_low();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(new_top, to_space_.page_high());
- // The order of the following two stores is important.
- // See the corresponding loads in ConcurrentMarking::Run.
- original_limit_.store(limit(), std::memory_order_relaxed);
- original_top_.store(top(), std::memory_order_release);
- StartNextInlineAllocationStep();
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-void NewSpace::ResetLinearAllocationArea() {
- // Do a step to account for memory allocated so far before resetting.
- InlineAllocationStep(top(), top(), kNullAddress, 0);
- to_space_.Reset();
- UpdateLinearAllocationArea();
- // Clear all mark-bits in the to-space.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (Page* p : to_space_) {
- marking_state->ClearLiveness(p);
- // Concurrent marking may have local live bytes for this page.
- heap()->concurrent_marking()->ClearMemoryChunkData(p);
- }
-}
-
-void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
- Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
- allocation_info_.set_limit(new_limit);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
- Address new_limit = ComputeLimit(top(), limit(), min_size);
- DCHECK_LE(new_limit, limit());
- DecreaseLimit(new_limit);
-}
-
-bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top();
- DCHECK(!OldSpace::IsAtPageStart(top));
-
- // Do a step to account for memory allocated on previous page.
- InlineAllocationStep(top, top, kNullAddress, 0);
-
- if (!to_space_.AdvancePage()) {
- // No more pages left to advance.
- return false;
- }
-
- // Clear remainder of current page.
- Address limit = Page::FromAllocationAreaAddress(top)->area_end();
- int remaining_in_page = static_cast<int>(limit - top);
- heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
- UpdateLinearAllocationArea();
-
- return true;
-}
-
-
-bool NewSpace::AddFreshPageSynchronized() {
- base::MutexGuard guard(&mutex_);
- return AddFreshPage();
-}
-
-
-bool NewSpace::EnsureAllocation(int size_in_bytes,
- AllocationAlignment alignment) {
- Address old_top = allocation_info_.top();
- Address high = to_space_.page_high();
- int filler_size = Heap::GetFillToAlign(old_top, alignment);
- int aligned_size_in_bytes = size_in_bytes + filler_size;
-
- if (old_top + aligned_size_in_bytes > high) {
- // Not enough room in the page, try to allocate a new one.
- if (!AddFreshPage()) {
- return false;
- }
-
- old_top = allocation_info_.top();
- high = to_space_.page_high();
- filler_size = Heap::GetFillToAlign(old_top, alignment);
- }
-
- DCHECK(old_top + aligned_size_in_bytes <= high);
-
- if (allocation_info_.limit() < high) {
- // Either the limit has been lowered because linear allocation was disabled
- // or because incremental marking wants to get a chance to do a step,
- // or because idle scavenge job wants to get a chance to post a task.
- // Set the new limit accordingly.
- Address new_top = old_top + aligned_size_in_bytes;
- Address soon_object = old_top + filler_size;
- InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
- UpdateInlineAllocationLimit(aligned_size_in_bytes);
- }
- return true;
-}
-
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@@ -2570,1043 +463,6 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
}
}
-std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
- return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
-}
-
-#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceObjectIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify(Isolate* isolate) {
- // The allocation pointer should be in the space or at the very end.
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.first_page()->area_start();
- CHECK_EQ(current, to_space_.space_start());
-
- size_t external_space_bytes[kNumTypes];
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- while (current != top()) {
- if (!Page::IsAlignedToPageSize(current)) {
- // The allocation pointer should not be in the middle of an object.
- CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
- current < top());
-
- HeapObject object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
-
- // The object should not be code or a map.
- CHECK(!object.IsMap());
- CHECK(!object.IsAbstractCode());
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor(heap());
- int size = object.Size();
- object.IterateBody(map, size, &visitor);
-
- if (object.IsExternalString()) {
- ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object.IsJSArrayBuffer()) {
- JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
- if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
- ->PerIsolateAccountingLength();
- external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
- }
- }
-
- current += size;
- } else {
- // At end of page, switch to next page.
- Page* page = Page::FromAllocationAreaAddress(current)->next_page();
- current = page->area_start();
- }
- }
-
- for (int i = 0; i < kNumTypes; i++) {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- i == ExternalBackingStoreType::kArrayBuffer)
- continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
-
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
- // Check semi-spaces.
- CHECK_EQ(from_space_.id(), kFromSpace);
- CHECK_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpace implementation
-
-void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
- DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
- minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
- current_capacity_ = minimum_capacity_;
- maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- committed_ = false;
-}
-
-
-void SemiSpace::TearDown() {
- // Properly uncommit memory to keep the allocator counters in sync.
- if (is_committed()) {
- Uncommit();
- }
- current_capacity_ = maximum_capacity_ = 0;
-}
-
-
-bool SemiSpace::Commit() {
- DCHECK(!is_committed());
- const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
- for (int pages_added = 0; pages_added < num_pages; pages_added++) {
- // Pages in the new spaces can be moved to the old space by the full
- // collector. Therefore, they must be initialized with the same FreeList as
- // old pages.
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (new_page == nullptr) {
- if (pages_added) RewindPages(pages_added);
- return false;
- }
- memory_chunk_list_.PushBack(new_page);
- }
- Reset();
- AccountCommitted(current_capacity_);
- if (age_mark_ == kNullAddress) {
- age_mark_ = first_page()->area_start();
- }
- committed_ = true;
- return true;
-}
-
-
-bool SemiSpace::Uncommit() {
- DCHECK(is_committed());
- while (!memory_chunk_list_.Empty()) {
- MemoryChunk* chunk = memory_chunk_list_.front();
- memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
- }
- current_page_ = nullptr;
- AccountUncommitted(current_capacity_);
- committed_ = false;
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
- return true;
-}
-
-
-size_t SemiSpace::CommittedPhysicalMemory() {
- if (!is_committed()) return 0;
- size_t size = 0;
- for (Page* p : *this) {
- size += p->CommittedPhysicalMemory();
- }
- return size;
-}
-
-bool SemiSpace::GrowTo(size_t new_capacity) {
- if (!is_committed()) {
- if (!Commit()) return false;
- }
- DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
- DCHECK_LE(new_capacity, maximum_capacity_);
- DCHECK_GT(new_capacity, current_capacity_);
- const size_t delta = new_capacity - current_capacity_;
- DCHECK(IsAligned(delta, AllocatePageSize()));
- const int delta_pages = static_cast<int>(delta / Page::kPageSize);
- DCHECK(last_page());
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (new_page == nullptr) {
- if (pages_added) RewindPages(pages_added);
- return false;
- }
- memory_chunk_list_.PushBack(new_page);
- marking_state->ClearLiveness(new_page);
- // Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
- }
- AccountCommitted(delta);
- current_capacity_ = new_capacity;
- return true;
-}
-
-void SemiSpace::RewindPages(int num_pages) {
- DCHECK_GT(num_pages, 0);
- DCHECK(last_page());
- while (num_pages > 0) {
- MemoryChunk* last = last_page();
- memory_chunk_list_.Remove(last);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
- num_pages--;
- }
-}
-
-bool SemiSpace::ShrinkTo(size_t new_capacity) {
- DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
- DCHECK_GE(new_capacity, minimum_capacity_);
- DCHECK_LT(new_capacity, current_capacity_);
- if (is_committed()) {
- const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, Page::kPageSize));
- int delta_pages = static_cast<int>(delta / Page::kPageSize);
- RewindPages(delta_pages);
- AccountUncommitted(delta);
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
- }
- current_capacity_ = new_capacity;
- return true;
-}
-
-void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
- for (Page* page : *this) {
- page->set_owner(this);
- page->SetFlags(flags, mask);
- if (id_ == kToSpace) {
- page->ClearFlag(MemoryChunk::FROM_PAGE);
- page->SetFlag(MemoryChunk::TO_PAGE);
- page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
- page, 0);
- } else {
- page->SetFlag(MemoryChunk::FROM_PAGE);
- page->ClearFlag(MemoryChunk::TO_PAGE);
- }
- DCHECK(page->InYoungGeneration());
- }
-}
-
-
-void SemiSpace::Reset() {
- DCHECK(first_page());
- DCHECK(last_page());
- current_page_ = first_page();
- pages_used_ = 0;
-}
-
-void SemiSpace::RemovePage(Page* page) {
- if (current_page_ == page) {
- if (page->prev_page()) {
- current_page_ = page->prev_page();
- }
- }
- memory_chunk_list_.Remove(page);
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-void SemiSpace::PrependPage(Page* page) {
- page->SetFlags(current_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
- page->set_owner(this);
- memory_chunk_list_.PushFront(page);
- pages_used_++;
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
- // We won't be swapping semispaces without data in them.
- DCHECK(from->first_page());
- DCHECK(to->first_page());
-
- intptr_t saved_to_space_flags = to->current_page()->GetFlags();
-
- // We swap all properties but id_.
- std::swap(from->current_capacity_, to->current_capacity_);
- std::swap(from->maximum_capacity_, to->maximum_capacity_);
- std::swap(from->minimum_capacity_, to->minimum_capacity_);
- std::swap(from->age_mark_, to->age_mark_);
- std::swap(from->committed_, to->committed_);
- std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
- std::swap(from->current_page_, to->current_page_);
- std::swap(from->external_backing_store_bytes_,
- to->external_backing_store_bytes_);
-
- to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
- from->FixPagesFlags(0, 0);
-}
-
-void SemiSpace::set_age_mark(Address mark) {
- DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
- age_mark_ = mark;
- // Mark all pages up to the one containing mark.
- for (Page* p : PageRange(space_start(), mark)) {
- p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- }
-}
-
-std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
- // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
- UNREACHABLE();
-}
-
-#ifdef DEBUG
-void SemiSpace::Print() {}
-#endif
-
-#ifdef VERIFY_HEAP
-void SemiSpace::Verify() {
- bool is_from_space = (id_ == kFromSpace);
- size_t external_backing_store_bytes[kNumTypes];
-
- for (int i = 0; i < kNumTypes; i++) {
- external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- for (Page* page : *this) {
- CHECK_EQ(page->owner(), this);
- CHECK(page->InNewSpace());
- CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
- : MemoryChunk::TO_PAGE));
- CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
- : MemoryChunk::FROM_PAGE));
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
- if (!is_from_space) {
- // The pointers-from-here-are-interesting flag isn't updated dynamically
- // on from-space pages, so it might be out of sync with the marking state.
- if (page->heap()->incremental_marking()->IsMarking()) {
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- } else {
- CHECK(
- !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
- }
-
- CHECK_IMPLIES(page->list_node().prev(),
- page->list_node().prev()->list_node().next() == page);
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
- }
-}
-#endif
-
-#ifdef DEBUG
-void SemiSpace::AssertValidRange(Address start, Address end) {
- // Addresses belong to same semi-space
- Page* page = Page::FromAllocationAreaAddress(start);
- Page* end_page = Page::FromAllocationAreaAddress(end);
- SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
- DCHECK_EQ(space, end_page->owner());
- // Start address is before end address, either on same page,
- // or end address is on a later page in the linked list of
- // semi-space pages.
- if (page == end_page) {
- DCHECK_LE(start, end);
- } else {
- while (page != end_page) {
- page = page->next_page();
- }
- DCHECK(page);
- }
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpaceObjectIterator implementation.
-
-SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
- Initialize(space->first_allocatable_address(), space->top());
-}
-
-void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
- SemiSpace::AssertValidRange(start, end);
- current_ = start;
- limit_ = end;
-}
-
-size_t NewSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- size_t size = to_space_.CommittedPhysicalMemory();
- if (from_space_.is_committed()) {
- size += from_space_.CommittedPhysicalMemory();
- }
- return size;
-}
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces implementation
-
-void FreeListCategory::Reset(FreeList* owner) {
- if (is_linked(owner) && !top().is_null()) {
- owner->DecreaseAvailableBytes(available_);
- }
- set_top(FreeSpace());
- set_prev(nullptr);
- set_next(nullptr);
- available_ = 0;
-}
-
-FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
- size_t* node_size) {
- FreeSpace node = top();
- DCHECK(!node.is_null());
- DCHECK(Page::FromHeapObject(node)->CanAllocate());
- if (static_cast<size_t>(node.Size()) < minimum_size) {
- *node_size = 0;
- return FreeSpace();
- }
- set_top(node.next());
- *node_size = node.Size();
- UpdateCountersAfterAllocation(*node_size);
- return node;
-}
-
-FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
- size_t* node_size) {
- FreeSpace prev_non_evac_node;
- for (FreeSpace cur_node = top(); !cur_node.is_null();
- cur_node = cur_node.next()) {
- DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
- size_t size = cur_node.size();
- if (size >= minimum_size) {
- DCHECK_GE(available_, size);
- UpdateCountersAfterAllocation(size);
- if (cur_node == top()) {
- set_top(cur_node.next());
- }
- if (!prev_non_evac_node.is_null()) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
- if (chunk->owner_identity() == CODE_SPACE) {
- chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
- }
- prev_non_evac_node.set_next(cur_node.next());
- }
- *node_size = size;
- return cur_node;
- }
-
- prev_non_evac_node = cur_node;
- }
- return FreeSpace();
-}
-
-void FreeListCategory::Free(Address start, size_t size_in_bytes, FreeMode mode,
- FreeList* owner) {
- FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
- free_space.set_next(top());
- set_top(free_space);
- available_ += size_in_bytes;
- if (mode == kLinkCategory) {
- if (is_linked(owner)) {
- owner->IncreaseAvailableBytes(size_in_bytes);
- } else {
- owner->AddCategory(this);
- }
- }
-}
-
-void FreeListCategory::RepairFreeList(Heap* heap) {
- Map free_space_map = ReadOnlyRoots(heap).free_space_map();
- FreeSpace n = top();
- while (!n.is_null()) {
- ObjectSlot map_slot = n.map_slot();
- if (map_slot.contains_value(kNullAddress)) {
- map_slot.store(free_space_map);
- } else {
- DCHECK(map_slot.contains_value(free_space_map.ptr()));
- }
- n = n.next();
- }
-}
-
-void FreeListCategory::Relink(FreeList* owner) {
- DCHECK(!is_linked(owner));
- owner->AddCategory(this);
-}
-
-// ------------------------------------------------
-// Generic FreeList methods (alloc/free related)
-
-FreeList* FreeList::CreateFreeList() {
- switch (FLAG_gc_freelist_strategy) {
- case 0:
- return new FreeListLegacy();
- case 1:
- return new FreeListFastAlloc();
- case 2:
- return new FreeListMany();
- case 3:
- return new FreeListManyCached();
- case 4:
- return new FreeListManyCachedFastPath();
- case 5:
- return new FreeListManyCachedOrigin();
- default:
- FATAL("Invalid FreeList strategy");
- }
-}
-
-FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
- size_t minimum_size, size_t* node_size) {
- FreeListCategory* category = categories_[type];
- if (category == nullptr) return FreeSpace();
- FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DecreaseAvailableBytes(*node_size);
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- }
- if (category->is_empty()) {
- RemoveCategory(category);
- }
- return node;
-}
-
-FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
- size_t minimum_size,
- size_t* node_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->SearchForNodeInList(minimum_size, node_size);
- if (!node.is_null()) {
- DecreaseAvailableBytes(*node_size);
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- if (current->is_empty()) {
- RemoveCategory(current);
- }
- return node;
- }
- }
- return node;
-}
-
-size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
- Page* page = Page::FromAddress(start);
- page->DecreaseAllocatedBytes(size_in_bytes);
-
- // Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < min_block_size_) {
- page->add_wasted_memory(size_in_bytes);
- wasted_bytes_ += size_in_bytes;
- return size_in_bytes;
- }
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
- DCHECK_EQ(page->AvailableInFreeList(),
- page->AvailableInFreeListFromAllocatedBytes());
- return 0;
-}
-
-// ------------------------------------------------
-// FreeListLegacy implementation
-
-FreeListLegacy::FreeListLegacy() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kHuge + 1;
- last_category_ = kHuge;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
-
-FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- // First try the allocation fast path: try to allocate the minimum element
- // size of a free list category. This operation is constant time.
- FreeListCategoryType type =
- SelectFastAllocationFreeListCategoryType(size_in_bytes);
- for (int i = type; i < kHuge && node.is_null(); i++) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (node.is_null()) {
- // Next search the huge list for free list nodes. This takes linear time in
- // the number of huge elements.
- node = SearchForNodeInList(kHuge, size_in_bytes, node_size);
- }
-
- if (node.is_null() && type != kHuge) {
- // We didn't find anything in the huge list.
- type = SelectFreeListCategoryType(size_in_bytes);
-
- if (type == kTiniest) {
- // For this tiniest object, the tiny list hasn't been searched yet.
- // Now searching the tiny list.
- node = TryFindNodeIn(kTiny, size_in_bytes, node_size);
- }
-
- if (node.is_null()) {
- // Now search the best fitting free list for a node that has at least the
- // requested size.
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- }
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListFastAlloc implementation
-
-FreeListFastAlloc::FreeListFastAlloc() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kHuge + 1;
- last_category_ = kHuge;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
-
-FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- // Try to allocate the biggest element possible (to make the most of later
- // bump-pointer allocations).
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- for (int i = kHuge; i >= type && node.is_null(); i--) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListMany implementation
-
-constexpr unsigned int FreeListMany::categories_min[kNumberOfCategories];
-
-FreeListMany::FreeListMany() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kNumberOfCategories;
- last_category_ = number_of_categories_ - 1;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListMany::~FreeListMany() { delete[] categories_; }
-
-size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
- if (maximum_freed < categories_min[0]) {
- return 0;
- }
- for (int cat = kFirstCategory + 1; cat <= last_category_; cat++) {
- if (maximum_freed < categories_min[cat]) {
- return categories_min[cat - 1];
- }
- }
- return maximum_freed;
-}
-
-Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
- FreeListCategoryType minimum_category =
- SelectFreeListCategoryType(size_in_bytes);
- Page* page = nullptr;
- for (int cat = minimum_category + 1; !page && cat <= last_category_; cat++) {
- page = GetPageForCategoryType(cat);
- }
- if (!page) {
- // Might return a page in which |size_in_bytes| will not fit.
- page = GetPageForCategoryType(minimum_category);
- }
- return page;
-}
-
-FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- for (int i = type; i < last_category_ && node.is_null(); i++) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (node.is_null()) {
- // Searching each element of the last category.
- node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCached implementation
-
-FreeListManyCached::FreeListManyCached() { ResetCache(); }
-
-void FreeListManyCached::Reset() {
- ResetCache();
- FreeListMany::Reset();
-}
-
-bool FreeListManyCached::AddCategory(FreeListCategory* category) {
- bool was_added = FreeList::AddCategory(category);
-
- // Updating cache
- if (was_added) {
- UpdateCacheAfterAddition(category->type_);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- return was_added;
-}
-
-void FreeListManyCached::RemoveCategory(FreeListCategory* category) {
- FreeList::RemoveCategory(category);
-
- // Updating cache
- int type = category->type_;
- if (categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-}
-
-size_t FreeListManyCached::Free(Address start, size_t size_in_bytes,
- FreeMode mode) {
- Page* page = Page::FromAddress(start);
- page->DecreaseAllocatedBytes(size_in_bytes);
-
- // Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < min_block_size_) {
- page->add_wasted_memory(size_in_bytes);
- wasted_bytes_ += size_in_bytes;
- return size_in_bytes;
- }
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
-
- // Updating cache
- if (mode == kLinkCategory) {
- UpdateCacheAfterAddition(type);
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
- }
-
- DCHECK_EQ(page->AvailableInFreeList(),
- page->AvailableInFreeListFromAllocatedBytes());
- return 0;
-}
-
-FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- USE(origin);
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- FreeSpace node;
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- type = next_nonempty_category[type];
- for (; type < last_category_; type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
-
- if (node.is_null()) {
- // Searching each element of the last category.
- type = last_category_;
- node = SearchForNodeInList(type, size_in_bytes, node_size);
- }
-
- // Updating cache
- if (!node.is_null() && categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCachedFastPath implementation
-
-FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) {
- USE(origin);
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
-
- // Fast path part 1: searching the last categories
- FreeListCategoryType first_category =
- SelectFastAllocationFreeListCategoryType(size_in_bytes);
- FreeListCategoryType type = first_category;
- for (type = next_nonempty_category[type]; type <= last_category_;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
-
- // Fast path part 2: searching the medium categories for tiny objects
- if (node.is_null()) {
- if (size_in_bytes <= kTinyObjectMaxSize) {
- for (type = next_nonempty_category[kFastPathFallBackTiny];
- type < kFastPathFirstCategory;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
- }
- }
-
- // Searching the last category
- if (node.is_null()) {
- // Searching each element of the last category.
- type = last_category_;
- node = SearchForNodeInList(type, size_in_bytes, node_size);
- }
-
- // Finally, search the most precise category
- if (node.is_null()) {
- type = SelectFreeListCategoryType(size_in_bytes);
- for (type = next_nonempty_category[type]; type < first_category;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
- }
-
- // Updating cache
- if (!node.is_null() && categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCachedOrigin implementation
-
-FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) {
- if (origin == AllocationOrigin::kGC) {
- return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
- } else {
- return FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size,
- origin);
- }
-}
-
-// ------------------------------------------------
-// FreeListMap implementation
-
-FreeListMap::FreeListMap() {
- // Initializing base (FreeList) fields
- number_of_categories_ = 1;
- last_category_ = kOnlyCategory;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
- return maximum_freed;
-}
-
-Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
- return GetPageForCategoryType(kOnlyCategory);
-}
-
-FreeListMap::~FreeListMap() { delete[] categories_; }
-
-FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- // The following DCHECK ensures that maps are allocated one by one (ie,
- // without folding). This assumption currently holds. However, if it were to
- // become untrue in the future, you'll get an error here. To fix it, I would
- // suggest removing the DCHECK, and replacing TryFindNodeIn by
- // SearchForNodeInList below.
- DCHECK_EQ(size_in_bytes, Map::kSize);
-
- FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK_IMPLIES(node.is_null(), IsEmpty());
- return node;
-}
-
-// ------------------------------------------------
-// Generic FreeList methods (non alloc/free related)
-
-void FreeList::Reset() {
- ForAllFreeListCategories(
- [this](FreeListCategory* category) { category->Reset(this); });
- for (int i = kFirstCategory; i < number_of_categories_; i++) {
- categories_[i] = nullptr;
- }
- wasted_bytes_ = 0;
- available_ = 0;
-}
-
-size_t FreeList::EvictFreeListItems(Page* page) {
- size_t sum = 0;
- page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
- sum += category->available();
- RemoveCategory(category);
- category->Reset(this);
- });
- return sum;
-}
-
-void FreeList::RepairLists(Heap* heap) {
- ForAllFreeListCategories(
- [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
-}
-
-bool FreeList::AddCategory(FreeListCategory* category) {
- FreeListCategoryType type = category->type_;
- DCHECK_LT(type, number_of_categories_);
- FreeListCategory* top = categories_[type];
-
- if (category->is_empty()) return false;
- DCHECK_NE(top, category);
-
- // Common double-linked list insertion.
- if (top != nullptr) {
- top->set_prev(category);
- }
- category->set_next(top);
- categories_[type] = category;
-
- IncreaseAvailableBytes(category->available());
- return true;
-}
-
-void FreeList::RemoveCategory(FreeListCategory* category) {
- FreeListCategoryType type = category->type_;
- DCHECK_LT(type, number_of_categories_);
- FreeListCategory* top = categories_[type];
-
- if (category->is_linked(this)) {
- DecreaseAvailableBytes(category->available());
- }
-
- // Common double-linked list removal.
- if (top == category) {
- categories_[type] = category->next();
- }
- if (category->prev() != nullptr) {
- category->prev()->set_next(category->next());
- }
- if (category->next() != nullptr) {
- category->next()->set_prev(category->prev());
- }
- category->set_next(nullptr);
- category->set_prev(nullptr);
-}
-
-void FreeList::PrintCategories(FreeListCategoryType type) {
- FreeListCategoryIterator it(this, type);
- PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
- static_cast<void*>(categories_[type]), type);
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- PrintF("%p -> ", static_cast<void*>(current));
- }
- PrintF("null\n");
-}
int MemoryChunk::FreeListsLength() {
int length = 0;
@@ -3619,250 +475,5 @@ int MemoryChunk::FreeListsLength() {
return length;
}
-size_t FreeListCategory::SumFreeList() {
- size_t sum = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- // We can't use "cur->map()" here because both cur's map and the
- // root can be null during bootstrapping.
- DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
- ->heap()
- ->isolate()
- ->root(RootIndex::kFreeSpaceMap)
- .ptr()));
- sum += cur.relaxed_read_size();
- cur = cur.next();
- }
- return sum;
-}
-int FreeListCategory::FreeListLength() {
- int length = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- length++;
- cur = cur.next();
- }
- return length;
-}
-
-#ifdef DEBUG
-bool FreeList::IsVeryLong() {
- int len = 0;
- for (int i = kFirstCategory; i < number_of_categories_; i++) {
- FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
- while (it.HasNext()) {
- len += it.Next()->FreeListLength();
- if (len >= FreeListCategory::kVeryLongFreeList) return true;
- }
- }
- return false;
-}
-
-
-// This can take a very long time because it is linear in the number of entries
-// on the free list, so it should not be called if FreeListLength returns
-// kVeryLongFreeList.
-size_t FreeList::SumFreeLists() {
- size_t sum = 0;
- ForAllFreeListCategories(
- [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
- return sum;
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// OldSpace implementation
-
-void PagedSpace::PrepareForMarkCompact() {
- // We don't have a linear allocation area while sweeping. It will be restored
- // on the first allocation after the sweep.
- FreeLinearAllocationArea();
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_->Reset();
-}
-
-size_t PagedSpace::SizeOfObjects() {
- CHECK_GE(limit(), top());
- DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
- return Size() - (limit() - top());
-}
-
-bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK(!is_local_space());
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- // Wait for the sweeper threads here and complete the sweeping phase.
- collector->EnsureSweepingCompleted();
-
- // After waiting for the sweeper threads, there may be new free-list
- // entries.
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
- }
- return false;
-}
-
-bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- VMState<GC> state(heap()->isolate());
- RuntimeCallTimerScope runtime_timer(
- heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
- base::Optional<base::MutexGuard> optional_mutex;
-
- if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
- identity() == OLD_SPACE) {
- optional_mutex.emplace(&allocation_mutex_);
- }
-
- return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
- return true;
-
- if (Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin);
- }
-
- return false;
-}
-
-bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- // Non-compaction local spaces are not supported.
- DCHECK_IMPLIES(is_local_space(), is_compaction_space());
-
- // Allocation in this space has failed.
- DCHECK_GE(size_in_bytes, 0);
- const int kMaxPagesToSweep = 1;
-
- if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
- return true;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- if (FLAG_concurrent_sweeping && !is_compaction_space() &&
- !collector->sweeper()->AreSweeperTasksRunning()) {
- collector->EnsureSweepingCompleted();
- }
-
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
- RefillFreeList();
-
- // Retry the free list allocation.
- if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin))
- return true;
-
- if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
- origin))
- return true;
- }
-
- if (is_compaction_space()) {
- // The main thread may have acquired all swept pages. Try to steal from
- // it. This can only happen during young generation evacuation.
- PagedSpace* main_space = heap()->paged_space(identity());
- Page* page = main_space->RemovePageSafe(size_in_bytes);
- if (page != nullptr) {
- AddPage(page);
- if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin))
- return true;
- }
- }
-
- if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin);
- }
-
- if (is_compaction_space()) {
- return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
-
- } else {
- // If sweeper threads are active, wait for them at that point and steal
- // elements from their free-lists. Allocation may still fail here which
- // would indicate that there is not enough memory for the given allocation.
- return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
- }
-}
-
-bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
- int max_pages, int size_in_bytes,
- AllocationOrigin origin) {
- // Cleanup invalidated old-to-new refs for compaction space in the
- // final atomic pause.
- Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
- is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
- : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), required_freed_bytes, max_pages,
- invalidated_slots_in_free_space);
- RefillFreeList();
- if (max_freed >= size_in_bytes)
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
- }
- return false;
-}
-
-// -----------------------------------------------------------------------------
-// MapSpace implementation
-
-// TODO(dmercadier): use a heap instead of sorting like that.
-// Using a heap will have multiple benefits:
-// - for now, SortFreeList is only called after sweeping, which is somewhat
-// late. Using a heap, sorting could be done online: FreeListCategories would
-// be inserted in a heap (ie, in a sorted manner).
-// - SortFreeList is a bit fragile: any change to FreeListMap (or to
-// MapSpace::free_list_) could break it.
-void MapSpace::SortFreeList() {
- using LiveBytesPagePair = std::pair<size_t, Page*>;
- std::vector<LiveBytesPagePair> pages;
- pages.reserve(CountTotalPages());
-
- for (Page* p : *this) {
- free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
- pages.push_back(std::make_pair(p->allocated_bytes(), p));
- }
-
- // Sorting by least-allocated-bytes first.
- std::sort(pages.begin(), pages.end(),
- [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
- return a.first < b.first;
- });
-
- for (LiveBytesPagePair const& p : pages) {
- // Since AddCategory inserts in head position, it reverts the order produced
- // by the sort above: least-allocated-bytes will be Added first, and will
- // therefore be the last element (and the first one will be
- // most-allocated-bytes).
- free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
- }
-}
-
-#ifdef VERIFY_HEAP
-void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
-#endif
-
} // namespace internal
} // namespace v8