diff options
author | Michaƫl Zasso <targos@protonmail.com> | 2018-01-24 20:16:06 +0100 |
---|---|---|
committer | Myles Borins <mylesborins@google.com> | 2018-01-24 15:02:20 -0800 |
commit | 4c4af643e5042d615a60c6bbc05aee9d81b903e5 (patch) | |
tree | 3fb0a97988fe4439ae3ae06f26915d1dcf8cab92 /deps/v8/src/heap/spaces.cc | |
parent | fa9f31a4fda5a3782c652e56e394465805ebb50f (diff) | |
download | node-new-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.gz |
deps: update V8 to 6.4.388.40
PR-URL: https://github.com/nodejs/node/pull/17489
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/heap/spaces.cc')
-rw-r--r-- | deps/v8/src/heap/spaces.cc | 372 |
1 files changed, 202 insertions, 170 deletions
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc index f654c6689e..7657e1e6ec 100644 --- a/deps/v8/src/heap/spaces.cc +++ b/deps/v8/src/heap/spaces.cc @@ -15,6 +15,7 @@ #include "src/heap/incremental-marking.h" #include "src/heap/mark-compact.h" #include "src/heap/slot-set.h" +#include "src/heap/sweeper.h" #include "src/msan.h" #include "src/objects-inl.h" #include "src/snapshot/snapshot.h" @@ -56,7 +57,7 @@ bool HeapObjectIterator::AdvanceToNextPage() { Page* cur_page = *(current_page_++); Heap* heap = space_->heap(); - heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted( + heap->mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted( cur_page); if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE)) heap->minor_mark_compact_collector()->MakeIterable( @@ -71,14 +72,14 @@ bool HeapObjectIterator::AdvanceToNextPage() { PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap) : heap_(heap) { AllSpaces spaces(heap_); - for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { + for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) { space->PauseAllocationObservers(); } } PauseAllocationObserversScope::~PauseAllocationObserversScope() { AllSpaces spaces(heap_); - for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { + for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) { space->ResumeAllocationObservers(); } } @@ -119,21 +120,21 @@ bool CodeRange::SetUp(size_t requested) { VirtualMemory reservation; if (!AlignedAllocVirtualMemory( - requested, - Max(kCodeRangeAreaAlignment, - static_cast<size_t>(base::OS::AllocateAlignment())), - v8::internal::GetRandomMmapAddr(), &reservation)) { + requested, Max(kCodeRangeAreaAlignment, base::OS::AllocatePageSize()), + base::OS::GetRandomMmapAddr(), &reservation)) { return false; } // We are sure that we have mapped a block of requested addresses. - DCHECK(reservation.size() == requested); + DCHECK_GE(reservation.size(), requested); Address base = reinterpret_cast<Address>(reservation.address()); // On some platforms, specifically Win64, we need to reserve some pages at // the beginning of an executable space. if (reserved_area > 0) { - if (!reservation.Commit(base, reserved_area, true)) return false; + if (!reservation.SetPermissions(base, reserved_area, + base::OS::MemoryPermission::kReadWrite)) + return false; base += reserved_area; } @@ -198,23 +199,22 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) { Address CodeRange::AllocateRawMemory(const size_t requested_size, const size_t commit_size, size_t* allocated) { - // request_size includes guards while committed_size does not. Make sure - // callers know about the invariant. - CHECK_LE(commit_size, - requested_size - 2 * MemoryAllocator::CodePageGuardSize()); + // requested_size includes the header and two guard regions, while commit_size + // only includes the header. + DCHECK_LE(commit_size, + requested_size - 2 * MemoryAllocator::CodePageGuardSize()); FreeBlock current; if (!ReserveBlock(requested_size, ¤t)) { *allocated = 0; - return NULL; + return nullptr; } *allocated = current.size; - DCHECK(*allocated <= current.size); DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory( &virtual_memory_, current.start, commit_size, *allocated)) { *allocated = 0; ReleaseBlock(¤t); - return NULL; + return nullptr; } return current.start; } @@ -227,7 +227,8 @@ bool CodeRange::CommitRawMemory(Address start, size_t length) { bool CodeRange::UncommitRawMemory(Address start, size_t length) { - return virtual_memory_.Uncommit(start, length); + return virtual_memory_.SetPermissions(start, length, + base::OS::MemoryPermission::kNoAccess); } @@ -235,7 +236,8 @@ void CodeRange::FreeRawMemory(Address address, size_t length) { DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); base::LockGuard<base::Mutex> guard(&code_range_mutex_); free_list_.emplace_back(address, length); - virtual_memory_.Uncommit(address, length); + virtual_memory_.SetPermissions(address, length, + base::OS::MemoryPermission::kNoAccess); } bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { @@ -304,7 +306,7 @@ void MemoryAllocator::TearDown() { capacity_ = 0; if (last_chunk_.IsReserved()) { - last_chunk_.Release(); + last_chunk_.Free(); } delete code_range_; @@ -411,12 +413,13 @@ bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) { // Chunks in old generation are unmapped if they are empty. DCHECK(chunk->InNewSpace() || chunk->SweepingDone()); return !chunk->InNewSpace() || mc == nullptr || - !mc->sweeper().sweeping_in_progress(); + !mc->sweeper()->sweeping_in_progress(); } bool MemoryAllocator::CommitMemory(Address base, size_t size, Executability executable) { - if (!base::OS::CommitRegion(base, size, executable == EXECUTABLE)) { + if (!base::OS::SetPermissions(base, size, + base::OS::MemoryPermission::kReadWrite)) { return false; } UpdateAllocatedSpaceLimits(base, base + size); @@ -427,27 +430,25 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation, Executability executable) { // TODO(gc) make code_range part of memory allocator? // Code which is part of the code-range does not have its own VirtualMemory. - DCHECK(code_range() == NULL || + DCHECK(code_range() == nullptr || !code_range()->contains(static_cast<Address>(reservation->address()))); DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() || reservation->size() <= Page::kPageSize); - reservation->Release(); + reservation->Free(); } void MemoryAllocator::FreeMemory(Address base, size_t size, Executability executable) { // TODO(gc) make code_range part of memory allocator? - if (code_range() != NULL && + if (code_range() != nullptr && code_range()->contains(static_cast<Address>(base))) { DCHECK(executable == EXECUTABLE); code_range()->FreeRawMemory(base, size); } else { DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid()); - bool result = base::OS::ReleaseRegion(base, size); - USE(result); - DCHECK(result); + CHECK(base::OS::Free(base, size)); } } @@ -458,15 +459,10 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, if (!AlignedAllocVirtualMemory(size, alignment, hint, &reservation)) return nullptr; - const Address base = - ::RoundUp(static_cast<Address>(reservation.address()), alignment); - if (base + size != reservation.end()) { - const Address unused_start = ::RoundUp(base + size, GetCommitPageSize()); - reservation.ReleasePartial(unused_start); - } + Address result = static_cast<Address>(reservation.address()); size_.Increment(reservation.size()); controller->TakeControl(&reservation); - return base; + return result; } Address MemoryAllocator::AllocateAlignedMemory( @@ -476,27 +472,28 @@ Address MemoryAllocator::AllocateAlignedMemory( VirtualMemory reservation; Address base = ReserveAlignedMemory(reserve_size, alignment, hint, &reservation); - if (base == NULL) return NULL; + if (base == nullptr) return nullptr; if (executable == EXECUTABLE) { if (!CommitExecutableMemory(&reservation, base, commit_size, reserve_size)) { - base = NULL; + base = nullptr; } } else { - if (reservation.Commit(base, commit_size, false)) { + if (reservation.SetPermissions(base, commit_size, + base::OS::MemoryPermission::kReadWrite)) { UpdateAllocatedSpaceLimits(base, base + commit_size); } else { - base = NULL; + base = nullptr; } } - if (base == NULL) { - // Failed to commit the body. Release the mapping and any partially - // committed regions inside it. - reservation.Release(); + if (base == nullptr) { + // Failed to commit the body. Free the mapping and any partially committed + // regions inside it. + reservation.Free(); size_.Decrement(reserve_size); - return NULL; + return nullptr; } controller->TakeControl(&reservation); @@ -528,6 +525,50 @@ void MemoryChunk::InitializationMemoryFence() { #endif } +void MemoryChunk::SetReadAndExecutable() { + DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); + DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE); + // Decrementing the write_unprotect_counter_ and changing the page + // protection mode has to be atomic. + base::LockGuard<base::Mutex> guard(page_protection_change_mutex_); + if (write_unprotect_counter_ == 0) { + // This is a corner case that may happen when we have a + // CodeSpaceMemoryModificationScope open and this page was newly + // added. + return; + } + write_unprotect_counter_--; + DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter); + if (write_unprotect_counter_ == 0) { + Address protect_start = + address() + MemoryAllocator::CodePageAreaStartOffset(); + size_t page_size = MemoryAllocator::GetCommitPageSize(); + DCHECK(IsAddressAligned(protect_start, page_size)); + size_t protect_size = RoundUp(area_size(), page_size); + CHECK(base::OS::SetPermissions(protect_start, protect_size, + base::OS::MemoryPermission::kReadExecute)); + } +} + +void MemoryChunk::SetReadAndWritable() { + DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); + DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE); + // Incrementing the write_unprotect_counter_ and changing the page + // protection mode has to be atomic. + base::LockGuard<base::Mutex> guard(page_protection_change_mutex_); + write_unprotect_counter_++; + DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter); + if (write_unprotect_counter_ == 1) { + Address unprotect_start = + address() + MemoryAllocator::CodePageAreaStartOffset(); + size_t page_size = MemoryAllocator::GetCommitPageSize(); + DCHECK(IsAddressAligned(unprotect_start, page_size)); + size_t unprotect_size = RoundUp(area_size(), page_size); + CHECK(base::OS::SetPermissions(unprotect_start, unprotect_size, + base::OS::MemoryPermission::kReadWrite)); + } +} + MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, Address area_start, Address area_end, Executability executable, Space* owner, @@ -554,7 +595,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, chunk->progress_bar_ = 0; chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); chunk->concurrent_sweeping_state().SetValue(kSweepingDone); - chunk->mutex_ = new base::RecursiveMutex(); + chunk->page_protection_change_mutex_ = new base::Mutex(); + chunk->write_unprotect_counter_ = 0; + chunk->mutex_ = new base::Mutex(); chunk->allocated_bytes_ = chunk->area_size(); chunk->wasted_memory_ = 0; chunk->young_generation_bitmap_ = nullptr; @@ -568,6 +611,17 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, if (executable == EXECUTABLE) { chunk->SetFlag(IS_EXECUTABLE); + if (heap->write_protect_code_memory()) { + chunk->write_unprotect_counter_ = + heap->code_space_memory_modification_scope_depth(); + } else { + size_t page_size = MemoryAllocator::GetCommitPageSize(); + DCHECK(IsAddressAligned(area_start, page_size)); + size_t area_size = RoundUp(area_end - area_start, page_size); + CHECK(base::OS::SetPermissions( + area_start, area_size, + base::OS::MemoryPermission::kReadWriteExecute)); + } } if (reservation != nullptr) { @@ -641,56 +695,6 @@ Page* Page::ConvertNewToOld(Page* old_page) { return new_page; } -// Commit MemoryChunk area to the requested size. -bool MemoryChunk::CommitArea(size_t requested) { - size_t guard_size = - IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; - size_t header_size = area_start() - address() - guard_size; - size_t commit_size = - ::RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize()); - size_t committed_size = ::RoundUp(header_size + (area_end() - area_start()), - MemoryAllocator::GetCommitPageSize()); - - if (commit_size > committed_size) { - // Commit size should be less or equal than the reserved size. - DCHECK(commit_size <= size() - 2 * guard_size); - // Append the committed area. - Address start = address() + committed_size + guard_size; - size_t length = commit_size - committed_size; - if (reservation_.IsReserved()) { - Executability executable = - IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; - if (!heap()->memory_allocator()->CommitMemory(start, length, - executable)) { - return false; - } - } else { - CodeRange* code_range = heap_->memory_allocator()->code_range(); - DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); - if (!code_range->CommitRawMemory(start, length)) return false; - } - - if (Heap::ShouldZapGarbage()) { - heap_->memory_allocator()->ZapBlock(start, length); - } - } else if (commit_size < committed_size) { - DCHECK_LT(0, commit_size); - // Shrink the committed area. - size_t length = committed_size - commit_size; - Address start = address() + committed_size + guard_size - length; - if (reservation_.IsReserved()) { - if (!reservation_.Uncommit(start, length)) return false; - } else { - CodeRange* code_range = heap_->memory_allocator()->code_range(); - DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE)); - if (!code_range->UncommitRawMemory(start, length)) return false; - } - } - - area_end_ = area_start_ + requested; - return true; -} - size_t MemoryChunk::CommittedPhysicalMemory() { if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE) return size(); @@ -712,8 +716,8 @@ void MemoryChunk::Unlink() { MemoryChunk* prev_element = prev_chunk(); next_element->set_prev_chunk(prev_element); prev_element->set_next_chunk(next_element); - set_prev_chunk(NULL); - set_next_chunk(NULL); + set_prev_chunk(nullptr); + set_next_chunk(nullptr); } MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, @@ -761,15 +765,14 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, // if (executable == EXECUTABLE) { - chunk_size = ::RoundUp(CodePageAreaStartOffset() + reserve_area_size, - GetCommitPageSize()) + - CodePageGuardSize(); + chunk_size = ::RoundUp( + CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(), + GetCommitPageSize()); // Size of header (not executable) plus area (executable). size_t commit_size = ::RoundUp( CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize()); - // Allocate executable memory either from code range or from the - // OS. +// Allocate executable memory either from code range or from the OS. #ifdef V8_TARGET_ARCH_MIPS64 // Use code range only for large object space on mips64 to keep address // range within 256-MB memory region. @@ -781,7 +784,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size); DCHECK( IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); - if (base == NULL) return NULL; + if (base == nullptr) return nullptr; size_.Increment(chunk_size); // Update executable memory size. size_executable_.Increment(chunk_size); @@ -789,7 +792,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, base = AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, executable, address_hint, &reservation); - if (base == NULL) return NULL; + if (base == nullptr) return nullptr; // Update executable memory size. size_executable_.Increment(reservation.size()); } @@ -811,7 +814,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, executable, address_hint, &reservation); - if (base == NULL) return NULL; + if (base == nullptr) return nullptr; if (Heap::ShouldZapGarbage()) { ZapBlock(base, Page::kObjectStartOffset + commit_area_size); @@ -949,16 +952,19 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free, chunk->size_ -= bytes_to_free; chunk->area_end_ = new_area_end; if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { + // Add guard page at the end. + size_t page_size = GetCommitPageSize(); DCHECK_EQ(0, reinterpret_cast<uintptr_t>(chunk->area_end_) % - static_cast<uintptr_t>(GetCommitPageSize())); + static_cast<uintptr_t>(page_size)); DCHECK_EQ(chunk->address() + chunk->size(), chunk->area_end() + CodePageGuardSize()); - reservation->Guard(chunk->area_end_); + reservation->SetPermissions(chunk->area_end_, page_size, + base::OS::MemoryPermission::kNoAccess); } // On e.g. Windows, a reservation may be larger than a page and releasing // partially starting at |start_free| will also release the potentially // unused part behind the current page. - const size_t released_bytes = reservation->ReleasePartial(start_free); + const size_t released_bytes = reservation->Release(start_free); DCHECK_GE(size_.Value(), released_bytes); size_.Decrement(released_bytes); isolate_->counters()->memory_allocated()->Decrement( @@ -1105,7 +1111,9 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size, bool MemoryAllocator::UncommitBlock(Address start, size_t size) { - if (!base::OS::UncommitRegion(start, size)) return false; + if (!base::OS::SetPermissions(start, size, + base::OS::MemoryPermission::kNoAccess)) + return false; isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); return true; } @@ -1132,9 +1140,7 @@ size_t MemoryAllocator::CodePageGuardStartOffset() { return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize()); } -size_t MemoryAllocator::CodePageGuardSize() { - return static_cast<int>(GetCommitPageSize()); -} +size_t MemoryAllocator::CodePageGuardSize() { return GetCommitPageSize(); } size_t MemoryAllocator::CodePageAreaStartOffset() { // We are guarding code pages: the first OS page after the header @@ -1160,27 +1166,40 @@ intptr_t MemoryAllocator::GetCommitPageSize() { bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start, size_t commit_size, size_t reserved_size) { - // Commit page header (not executable). - Address header = start; - size_t header_size = CodePageGuardStartOffset(); - if (vm->Commit(header, header_size, false)) { - // Create guard page after the header. - if (vm->Guard(start + CodePageGuardStartOffset())) { - // Commit page body (executable). - Address body = start + CodePageAreaStartOffset(); - size_t body_size = commit_size - CodePageGuardStartOffset(); - if (vm->Commit(body, body_size, true)) { - // Create guard page before the end. - if (vm->Guard(start + reserved_size - CodePageGuardSize())) { - UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() + - commit_size - - CodePageGuardStartOffset()); + const size_t page_size = GetCommitPageSize(); + // All addresses and sizes must be aligned to the commit page size. + DCHECK(IsAddressAligned(start, page_size)); + DCHECK_EQ(0, commit_size % page_size); + DCHECK_EQ(0, reserved_size % page_size); + const size_t guard_size = CodePageGuardSize(); + const size_t pre_guard_offset = CodePageGuardStartOffset(); + const size_t code_area_offset = CodePageAreaStartOffset(); + // reserved_size includes two guard regions, commit_size does not. + DCHECK_LE(commit_size, reserved_size - 2 * guard_size); + const Address pre_guard_page = start + pre_guard_offset; + const Address code_area = start + code_area_offset; + const Address post_guard_page = start + reserved_size - guard_size; + // Commit the non-executable header, from start to pre-code guard page. + if (vm->SetPermissions(start, pre_guard_offset, + base::OS::MemoryPermission::kReadWrite)) { + // Create the pre-code guard page, following the header. + if (vm->SetPermissions(pre_guard_page, page_size, + base::OS::MemoryPermission::kNoAccess)) { + // Commit the executable code body. + if (vm->SetPermissions(code_area, commit_size - pre_guard_offset, + base::OS::MemoryPermission::kReadWrite)) { + // Create the post-code guard page. + if (vm->SetPermissions(post_guard_page, page_size, + base::OS::MemoryPermission::kNoAccess)) { + UpdateAllocatedSpaceLimits(start, code_area + commit_size); return true; } - vm->Uncommit(body, body_size); + vm->SetPermissions(code_area, commit_size, + base::OS::MemoryPermission::kNoAccess); } } - vm->Uncommit(header, header_size); + vm->SetPermissions(start, pre_guard_offset, + base::OS::MemoryPermission::kNoAccess); } return false; } @@ -1202,6 +1221,10 @@ void MemoryChunk::ReleaseAllocatedMemory() { delete mutex_; mutex_ = nullptr; } + if (page_protection_change_mutex_ != nullptr) { + delete page_protection_change_mutex_; + page_protection_change_mutex_ = nullptr; + } ReleaseSlotSet<OLD_TO_NEW>(); ReleaseSlotSet<OLD_TO_OLD>(); ReleaseTypedSlotSet<OLD_TO_NEW>(); @@ -1379,7 +1402,6 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, : Space(heap, space, executable), anchor_(this), free_list_(this), - locked_page_(nullptr), top_on_previous_step_(0) { area_size_ = MemoryAllocator::PageAreaSize(space); accounting_stats_.Clear(); @@ -1416,7 +1438,7 @@ void PagedSpace::RefillFreeList() { size_t added = 0; { Page* p = nullptr; - while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) { + while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) { // Only during compaction pages can actually change ownership. This is // safe because there exists no other competing action on the page links // during compaction. @@ -1702,7 +1724,7 @@ void PagedSpace::EmptyAllocationInfo() { nullptr, 0); top_on_previous_step_ = 0; } - SetTopAndLimit(NULL, NULL); + SetTopAndLimit(nullptr, nullptr); DCHECK_GE(current_limit, current_top); Free(current_top, current_limit - current_top); } @@ -1722,8 +1744,8 @@ void PagedSpace::ReleasePage(Page* page) { } // If page is still in a list, unlink it from that list. - if (page->next_chunk() != NULL) { - DCHECK(page->prev_chunk() != NULL); + if (page->next_chunk() != nullptr) { + DCHECK_NOT_NULL(page->prev_chunk()); page->Unlink(); } AccountUncommitted(page->size()); @@ -1731,6 +1753,20 @@ void PagedSpace::ReleasePage(Page* page) { heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page); } +void PagedSpace::SetReadAndExecutable() { + DCHECK(identity() == CODE_SPACE); + for (Page* page : *this) { + page->SetReadAndExecutable(); + } +} + +void PagedSpace::SetReadAndWritable() { + DCHECK(identity() == CODE_SPACE); + for (Page* page : *this) { + page->SetReadAndWritable(); + } +} + std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() { return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this)); } @@ -1752,7 +1788,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { HeapObjectIterator it(page); Address end_of_previous_object = page->area_start(); Address top = page->area_end(); - for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + for (HeapObject* object = it.Next(); object != nullptr; + object = it.Next()) { CHECK(end_of_previous_object <= object->address()); // The first word should be a map, and we expect all map pointers to @@ -1791,7 +1828,8 @@ void PagedSpace::VerifyLiveBytes() { CHECK(page->SweepingDone()); HeapObjectIterator it(page); int black_size = 0; - for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + for (HeapObject* object = it.Next(); object != nullptr; + object = it.Next()) { // All the interior pointers should be contained in the heap. if (marking_state->IsBlack(object)) { black_size += object->Size(); @@ -1811,7 +1849,8 @@ void PagedSpace::VerifyCountersAfterSweeping() { total_capacity += page->area_size(); HeapObjectIterator it(page); size_t real_allocated = 0; - for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + for (HeapObject* object = it.Next(); object != nullptr; + object = it.Next()) { if (!object->IsFiller()) { real_allocated += object->Size(); } @@ -1883,11 +1922,11 @@ bool NewSpace::SetUp(size_t initial_semispace_capacity, void NewSpace::TearDown() { if (allocated_histogram_) { DeleteArray(allocated_histogram_); - allocated_histogram_ = NULL; + allocated_histogram_ = nullptr; } if (promoted_histogram_) { DeleteArray(promoted_histogram_); - promoted_histogram_ = NULL; + promoted_histogram_ = nullptr; } allocation_info_.Reset(nullptr, nullptr); @@ -2176,7 +2215,7 @@ void NewSpace::ResumeAllocationObservers() { // TODO(ofrobots): refactor into SpaceWithLinearArea void PagedSpace::ResumeAllocationObservers() { - DCHECK(top_on_previous_step_ == 0); + DCHECK_NULL(top_on_previous_step_); Space::ResumeAllocationObservers(); StartNextInlineAllocationStep(); } @@ -2333,7 +2372,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) { DCHECK_LE(new_capacity, maximum_capacity_); DCHECK_GT(new_capacity, current_capacity_); const size_t delta = new_capacity - current_capacity_; - DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); + DCHECK(IsAligned(delta, base::OS::AllocatePageSize())); const int delta_pages = static_cast<int>(delta / Page::kPageSize); Page* last_page = anchor()->prev_page(); DCHECK_NE(last_page, anchor()); @@ -2377,7 +2416,7 @@ bool SemiSpace::ShrinkTo(size_t new_capacity) { DCHECK_LT(new_capacity, current_capacity_); if (is_committed()) { const size_t delta = current_capacity_ - new_capacity; - DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); + DCHECK(IsAligned(delta, base::OS::AllocatePageSize())); int delta_pages = static_cast<int>(delta / Page::kPageSize); Page* new_last_page; Page* last_page; @@ -2564,7 +2603,7 @@ static int CollectHistogramInfo(HeapObject* obj) { Isolate* isolate = obj->GetIsolate(); InstanceType type = obj->map()->instance_type(); DCHECK(0 <= type && type <= LAST_TYPE); - DCHECK(isolate->heap_histograms()[type].name() != NULL); + DCHECK_NOT_NULL(isolate->heap_histograms()[type].name()); isolate->heap_histograms()[type].increment_number(1); isolate->heap_histograms()[type].increment_bytes(obj->Size()); @@ -2624,7 +2663,7 @@ void NewSpace::ClearHistograms() { void NewSpace::CollectStatistics() { ClearHistograms(); SemiSpaceIterator it(this); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) + for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) RecordAllocation(obj); } @@ -2784,9 +2823,9 @@ void FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes, void FreeListCategory::RepairFreeList(Heap* heap) { FreeSpace* n = top(); - while (n != NULL) { + while (n != nullptr) { Map** map_location = reinterpret_cast<Map**>(n->address()); - if (*map_location == NULL) { + if (*map_location == nullptr) { *map_location = heap->free_space_map(); } else { DCHECK(*map_location == heap->free_space_map()); @@ -3063,7 +3102,7 @@ void FreeList::PrintCategories(FreeListCategoryType type) { size_t FreeListCategory::SumFreeList() { size_t sum = 0; FreeSpace* cur = top(); - while (cur != NULL) { + while (cur != nullptr) { DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex)); sum += cur->relaxed_read_size(); cur = cur->next(); @@ -3074,7 +3113,7 @@ size_t FreeListCategory::SumFreeList() { int FreeListCategory::FreeListLength() { int length = 0; FreeSpace* cur = top(); - while (cur != NULL) { + while (cur != nullptr) { length++; cur = cur->next(); if (length == kVeryLongFreeList) return length; @@ -3125,10 +3164,9 @@ size_t PagedSpace::SizeOfObjects() { return Size() - (limit() - top()); } - // After we have booted, we have created a map which represents free space // on the heap. If there was already a free list then the elements on it -// were created with the wrong FreeSpaceMap (normally NULL), so we need to +// were created with the wrong FreeSpaceMap (normally nullptr), so we need to // fix them. void PagedSpace::RepairFreeListsAfterDeserialization() { free_list_.RepairLists(heap()); @@ -3163,8 +3201,9 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) { bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) { MarkCompactCollector* collector = heap()->mark_compact_collector(); - if (collector->sweeping_in_progress()) { - collector->SweepAndRefill(this); + if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) { + collector->sweeper()->ParallelSweepSpace(identity(), 0); + RefillFreeList(); return free_list_.Allocate(size_in_bytes); } return false; @@ -3190,7 +3229,7 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) { // Sweeping is still in progress. if (collector->sweeping_in_progress()) { if (FLAG_concurrent_sweeping && !is_local() && - !collector->sweeper().AreSweeperTasksRunning()) { + !collector->sweeper()->AreSweeperTasksRunning()) { collector->EnsureSweepingCompleted(); } @@ -3201,15 +3240,8 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) { // Retry the free list allocation. if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true; - if (locked_page_ != nullptr) { - DCHECK_EQ(locked_page_->owner()->identity(), identity()); - collector->sweeper().ParallelSweepPage(locked_page_, identity()); - locked_page_ = nullptr; - if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true; - } - // If sweeping is still in progress try to sweep pages. - int max_freed = collector->sweeper().ParallelSweepSpace( + int max_freed = collector->sweeper()->ParallelSweepSpace( identity(), size_in_bytes, kMaxPagesToSweep); RefillFreeList(); if (max_freed >= size_in_bytes) { @@ -3248,7 +3280,7 @@ void PagedSpace::ReportStatistics() { heap()->mark_compact_collector()->EnsureSweepingCompleted(); ClearHistograms(heap()->isolate()); HeapObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) + for (HeapObject* obj = obj_it.Next(); obj != nullptr; obj = obj_it.Next()) CollectHistogramInfo(obj); ReportHistogram(heap()->isolate(), true); } @@ -3293,7 +3325,7 @@ LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { HeapObject* LargeObjectIterator::Next() { - if (current_ == NULL) return NULL; + if (current_ == nullptr) return nullptr; HeapObject* object = current_->GetObject(); current_ = current_->next_page(); @@ -3319,7 +3351,7 @@ bool LargeObjectSpace::SetUp() { } void LargeObjectSpace::TearDown() { - while (first_page_ != NULL) { + while (first_page_ != nullptr) { LargePage* page = first_page_; first_page_ = first_page_->next_page(); LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); @@ -3340,7 +3372,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size, LargePage* page = heap()->memory_allocator()->AllocateLargePage( object_size, this, executable); - if (page == NULL) return AllocationResult::Retry(identity()); + if (page == nullptr) return AllocationResult::Retry(identity()); DCHECK_GE(page->area_size(), static_cast<size_t>(object_size)); size_ += static_cast<int>(page->size()); @@ -3388,7 +3420,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() { // GC support Object* LargeObjectSpace::FindObject(Address a) { LargePage* page = FindPage(a); - if (page != NULL) { + if (page != nullptr) { return page->GetObject(); } return Smi::kZero; // Signaling not found. @@ -3417,7 +3449,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { IncrementalMarking::NonAtomicMarkingState* marking_state = heap()->incremental_marking()->non_atomic_marking_state(); LargeObjectIterator it(this); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { if (marking_state->IsBlackOrGrey(obj)) { Marking::MarkWhite(marking_state->MarkBitFrom(obj)); MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); @@ -3523,7 +3555,7 @@ std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() { // We do not assume that the large object iterator works, because it depends // on the invariants we are checking during verification. void LargeObjectSpace::Verify() { - for (LargePage* chunk = first_page_; chunk != NULL; + for (LargePage* chunk = first_page_; chunk != nullptr; chunk = chunk->next_page()) { // Each chunk contains an object that starts at the large object page's // object area start. @@ -3588,7 +3620,7 @@ void LargeObjectSpace::Verify() { void LargeObjectSpace::Print() { OFStream os(stdout); LargeObjectIterator it(this); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { obj->Print(os); } } @@ -3599,7 +3631,7 @@ void LargeObjectSpace::ReportStatistics() { int num_objects = 0; ClearHistograms(heap()->isolate()); LargeObjectIterator it(this); - for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) { num_objects++; CollectHistogramInfo(obj); } @@ -3619,7 +3651,7 @@ void Page::Print() { printf(" --------------------------------------\n"); HeapObjectIterator objects(this); unsigned mark_size = 0; - for (HeapObject* object = objects.Next(); object != NULL; + for (HeapObject* object = objects.Next(); object != nullptr; object = objects.Next()) { bool is_marked = heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object); |