summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-08-29 14:20:49 +0200
committerMichaël Zasso <targos@protonmail.com>2021-08-30 21:02:51 +0200
commit50930a0fa08297d0ce7e67fa6594fe47937b99ff (patch)
tree96bd30c0c63790bc1992a2f241a3df94d563b283 /deps/v8/src/heap/cppgc
parentb63e449b2eade1111b52f6559669400a4e855903 (diff)
downloadnode-new-50930a0fa08297d0ce7e67fa6594fe47937b99ff.tar.gz
deps: update V8 to 9.3.345.16
PR-URL: https://github.com/nodejs/node/pull/39469 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/heap/cppgc')
-rw-r--r--deps/v8/src/heap/cppgc/allocation.cc3
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc13
-rw-r--r--deps/v8/src/heap/cppgc/concurrent-marker.cc14
-rw-r--r--deps/v8/src/heap/cppgc/explicit-management.cc16
-rw-r--r--deps/v8/src/heap/cppgc/free-list.cc5
-rw-r--r--deps/v8/src/heap/cppgc/free-list.h4
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h2
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc24
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h15
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc84
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h30
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h20
-rw-r--r--deps/v8/src/heap/cppgc/heap-statistics-collector.cc124
-rw-r--r--deps/v8/src/heap/cppgc/heap-statistics-collector.h19
-rw-r--r--deps/v8/src/heap/cppgc/heap-visitor.h56
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc17
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc11
-rw-r--r--deps/v8/src/heap/cppgc/marker.h17
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h19
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc18
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h2
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.cc1
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.h6
-rw-r--r--deps/v8/src/heap/cppgc/memory.h73
-rw-r--r--deps/v8/src/heap/cppgc/metric-recorder.h32
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc63
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h20
-rw-r--r--deps/v8/src/heap/cppgc/object-poisoner.h13
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc36
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc4
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.h3
-rw-r--r--deps/v8/src/heap/cppgc/process-heap.cc5
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc88
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h19
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc285
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h7
-rw-r--r--deps/v8/src/heap/cppgc/visitor.cc2
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc25
38 files changed, 824 insertions, 371 deletions
diff --git a/deps/v8/src/heap/cppgc/allocation.cc b/deps/v8/src/heap/cppgc/allocation.cc
index 149212736b..22f4703982 100644
--- a/deps/v8/src/heap/cppgc/allocation.cc
+++ b/deps/v8/src/heap/cppgc/allocation.cc
@@ -4,8 +4,9 @@
#include "include/cppgc/allocation.h"
-#include "src/base/logging.h"
+#include "include/cppgc/internal/api-constants.h"
#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/object-allocator.h"
namespace cppgc {
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index 5f68705014..91f30445a3 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -103,7 +103,7 @@ void MovableReferences::AddOrFilter(MovableReference* slot) {
// The following cases are not compacted and do not require recording:
// - Compactable object on large pages.
// - Compactable object on non-compactable spaces.
- if (value_page->is_large() || !value_page->space()->is_compactable()) return;
+ if (value_page->is_large() || !value_page->space().is_compactable()) return;
// Slots must reside in and values must point to live objects at this
// point. |value| usually points to a separate object but can also point
@@ -124,7 +124,7 @@ void MovableReferences::AddOrFilter(MovableReference* slot) {
movable_references_.emplace(value, slot);
// Check whether the slot itself resides on a page that is compacted.
- if (V8_LIKELY(!slot_page->space()->is_compactable())) return;
+ if (V8_LIKELY(!slot_page->space().is_compactable())) return;
CHECK_EQ(interior_movable_references_.end(),
interior_movable_references_.find(slot));
@@ -227,7 +227,7 @@ class CompactionState final {
: space_(space), movable_references_(movable_references) {}
void AddPage(NormalPage* page) {
- DCHECK_EQ(space_, page->space());
+ DCHECK_EQ(space_, &page->space());
// If not the first page, add |page| onto the available pages chain.
if (!current_page_)
current_page_ = page;
@@ -296,7 +296,7 @@ class CompactionState final {
private:
void ReturnCurrentPageToSpace() {
- DCHECK_EQ(space_, current_page_->space());
+ DCHECK_EQ(space_, &current_page_->space());
space_->AddPage(current_page_);
if (used_bytes_in_current_page_ != current_page_->PayloadSize()) {
// Put the remainder of the page onto the free list.
@@ -362,6 +362,9 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
#if !defined(CPPGC_YOUNG_GENERATION)
header->Unmark();
#endif
+ // Potentially unpoison the live object as well as it is the source of
+ // the copy.
+ ASAN_UNPOISON_MEMORY_REGION(header->ObjectStart(), header->ObjectSize());
compaction_state.RelocateObject(page, header_address, size);
header_address += size;
}
@@ -374,7 +377,7 @@ void CompactSpace(NormalPageSpace* space,
using Pages = NormalPageSpace::Pages;
#ifdef V8_USE_ADDRESS_SANITIZER
- UnmarkedObjectsPoisoner().Traverse(space);
+ UnmarkedObjectsPoisoner().Traverse(*space);
#endif // V8_USE_ADDRESS_SANITIZER
DCHECK(space->is_compactable());
diff --git a/deps/v8/src/heap/cppgc/concurrent-marker.cc b/deps/v8/src/heap/cppgc/concurrent-marker.cc
index 6763515f28..04d2c65aaa 100644
--- a/deps/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/deps/v8/src/heap/cppgc/concurrent-marker.cc
@@ -149,6 +149,20 @@ void ConcurrentMarkingTask::ProcessWorklists(
return;
}
+ if (!DrainWorklistWithYielding(
+ job_delegate, concurrent_marking_state,
+ concurrent_marker_.incremental_marking_schedule(),
+ concurrent_marking_state.retrace_marked_objects_worklist(),
+ [&concurrent_marking_visitor](HeapObjectHeader* header) {
+ BasePage::FromPayload(header)->SynchronizedLoad();
+ // Retracing does not increment marked bytes as the object has
+ // already been processed before.
+ DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
+ concurrent_marking_visitor, *header);
+ })) {
+ return;
+ }
+
{
StatsCollector::DisabledConcurrentScope stats_scope(
concurrent_marker_.heap().stats_collector(),
diff --git a/deps/v8/src/heap/cppgc/explicit-management.cc b/deps/v8/src/heap/cppgc/explicit-management.cc
index 6e327339aa..ccc5840af4 100644
--- a/deps/v8/src/heap/cppgc/explicit-management.cc
+++ b/deps/v8/src/heap/cppgc/explicit-management.cc
@@ -38,14 +38,14 @@ void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
if (base_page->is_large()) { // Large object.
- base_page->space()->RemovePage(base_page);
- base_page->heap()->stats_collector()->NotifyExplicitFree(
+ base_page->space().RemovePage(base_page);
+ base_page->heap().stats_collector()->NotifyExplicitFree(
LargePage::From(base_page)->PayloadSize());
LargePage::Destroy(LargePage::From(base_page));
} else { // Regular object.
const size_t header_size = header.AllocatedSize();
auto* normal_page = NormalPage::From(base_page);
- auto& normal_space = *static_cast<NormalPageSpace*>(base_page->space());
+ auto& normal_space = *static_cast<NormalPageSpace*>(&base_page->space());
auto& lab = normal_space.linear_allocation_buffer();
ConstAddress payload_end = header.ObjectEnd();
SetMemoryInaccessible(&header, header_size);
@@ -53,7 +53,7 @@ void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
lab.Set(reinterpret_cast<Address>(&header), lab.size() + header_size);
normal_page->object_start_bitmap().ClearBit(lab.start());
} else { // Returning to free list.
- base_page->heap()->stats_collector()->NotifyExplicitFree(header_size);
+ base_page->heap().stats_collector()->NotifyExplicitFree(header_size);
normal_space.free_list().Add({&header, header_size});
// No need to update the bitmap as the same bit is reused for the free
// list entry.
@@ -69,7 +69,7 @@ bool Grow(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
DCHECK_GE(size_delta, kAllocationGranularity);
DCHECK(!base_page.is_large());
- auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
+ auto& normal_space = *static_cast<NormalPageSpace*>(&base_page.space());
auto& lab = normal_space.linear_allocation_buffer();
if (lab.start() == header.ObjectEnd() && lab.size() >= size_delta) {
// LABs are considered used memory which means that no allocated size
@@ -88,7 +88,7 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
DCHECK_GE(size_delta, kAllocationGranularity);
DCHECK(!base_page.is_large());
- auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
+ auto& normal_space = *static_cast<NormalPageSpace*>(&base_page.space());
auto& lab = normal_space.linear_allocation_buffer();
Address free_start = header.ObjectEnd() - size_delta;
if (lab.start() == header.ObjectEnd()) {
@@ -104,7 +104,7 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
// the smallest size class.
if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
SetMemoryInaccessible(free_start, size_delta);
- base_page.heap()->stats_collector()->NotifyExplicitFree(size_delta);
+ base_page.heap().stats_collector()->NotifyExplicitFree(size_delta);
normal_space.free_list().Add({free_start, size_delta});
NormalPage::From(&base_page)->object_start_bitmap().SetBit(free_start);
header.SetAllocatedSize(new_size);
@@ -121,7 +121,7 @@ bool Resize(void* object, size_t new_object_size) {
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
- if (InGC(*base_page->heap())) {
+ if (InGC(base_page->heap())) {
return false;
}
diff --git a/deps/v8/src/heap/cppgc/free-list.cc b/deps/v8/src/heap/cppgc/free-list.cc
index 600e15312c..da3aa3410e 100644
--- a/deps/v8/src/heap/cppgc/free-list.cc
+++ b/deps/v8/src/heap/cppgc/free-list.cc
@@ -60,7 +60,7 @@ FreeList& FreeList::operator=(FreeList&& other) V8_NOEXCEPT {
return *this;
}
-void FreeList::Add(FreeList::Block block) {
+Address FreeList::Add(FreeList::Block block) {
const size_t size = block.size;
DCHECK_GT(kPageSize, size);
DCHECK_LE(sizeof(HeapObjectHeader), size);
@@ -73,7 +73,7 @@ void FreeList::Add(FreeList::Block block) {
// zeroing it out.
ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(HeapObjectHeader));
new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
- return;
+ return reinterpret_cast<Address>(block.address) + block.size;
}
// Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not
@@ -86,6 +86,7 @@ void FreeList::Add(FreeList::Block block) {
if (!entry->Next()) {
free_list_tails_[index] = entry;
}
+ return reinterpret_cast<Address>(block.address) + sizeof(Entry);
}
void FreeList::Append(FreeList&& other) {
diff --git a/deps/v8/src/heap/cppgc/free-list.h b/deps/v8/src/heap/cppgc/free-list.h
index 184030a9e8..0a7dae44fd 100644
--- a/deps/v8/src/heap/cppgc/free-list.h
+++ b/deps/v8/src/heap/cppgc/free-list.h
@@ -34,7 +34,9 @@ class V8_EXPORT_PRIVATE FreeList {
Block Allocate(size_t);
// Adds block to the freelist. The minimal block size is two words.
- void Add(Block);
+ // Returns the start of the free list payload that will not be accessed by
+ // the free list itself.
+ Address Add(Block);
// Append other freelist into this.
void Append(FreeList&&);
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index 7abf17df5f..b8e52452ee 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -20,6 +20,7 @@ class GarbageCollector {
using StackState = cppgc::Heap::StackState;
using MarkingType = Marker::MarkingConfig::MarkingType;
using SweepingType = Sweeper::SweepingConfig::SweepingType;
+ using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
using IsForcedGC = Marker::MarkingConfig::IsForcedGC;
static constexpr Config ConservativeAtomicConfig() {
@@ -58,6 +59,7 @@ class GarbageCollector {
StackState stack_state = StackState::kMayContainHeapPointers;
MarkingType marking_type = MarkingType::kAtomic;
SweepingType sweeping_type = SweepingType::kAtomic;
+ FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
};
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index c7664f09c6..c89c2842f9 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -29,18 +29,18 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
friend class HeapVisitor<ObjectSizeCounter>;
public:
- size_t GetSize(RawHeap* heap) {
+ size_t GetSize(RawHeap& heap) {
Traverse(heap);
return accumulated_size_;
}
private:
- static size_t ObjectSize(const HeapObjectHeader* header) {
- return ObjectView(*header).Size();
+ static size_t ObjectSize(const HeapObjectHeader& header) {
+ return ObjectView(header).Size();
}
- bool VisitHeapObjectHeader(HeapObjectHeader* header) {
- if (header->IsFree()) return true;
+ bool VisitHeapObjectHeader(HeapObjectHeader& header) {
+ if (header.IsFree()) return true;
accumulated_size_ += ObjectSize(header);
return true;
}
@@ -53,8 +53,7 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
HeapBase::HeapBase(
std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support,
- std::unique_ptr<MetricRecorder> histogram_recorder)
+ StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
#if defined(LEAK_SANITIZER)
@@ -67,8 +66,7 @@ HeapBase::HeapBase(
#else // !CPPGC_CAGED_HEAP
page_backend_(std::make_unique<PageBackend>(page_allocator())),
#endif // !CPPGC_CAGED_HEAP
- stats_collector_(std::make_unique<StatsCollector>(
- std::move(histogram_recorder), platform_.get())),
+ stats_collector_(std::make_unique<StatsCollector>(platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
@@ -92,7 +90,7 @@ PageAllocator* HeapBase::page_allocator() const {
}
size_t HeapBase::ObjectPayloadSize() const {
- return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
+ return ObjectSizeCounter().GetSize(const_cast<RawHeap&>(raw_heap()));
}
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
@@ -127,8 +125,8 @@ void HeapBase::Terminate() {
stats_collector()->NotifyMarkingStarted(
GarbageCollector::Config::CollectionType::kMajor,
GarbageCollector::Config::IsForcedGC::kForced);
- stats_collector()->NotifyMarkingCompleted(0);
object_allocator().ResetLinearAllocationBuffers();
+ stats_collector()->NotifyMarkingCompleted(0);
ExecutePreFinalizers();
sweeper().Start(
{Sweeper::SweepingConfig::SweepingType::kAtomic,
@@ -149,14 +147,16 @@ HeapStatistics HeapBase::CollectStatistics(
HeapStatistics::DetailLevel detail_level) {
if (detail_level == HeapStatistics::DetailLevel::kBrief) {
return {stats_collector_->allocated_memory_size(),
+ stats_collector_->resident_memory_size(),
stats_collector_->allocated_object_size(),
HeapStatistics::DetailLevel::kBrief,
+ {},
{}};
}
sweeper_.FinishIfRunning();
object_allocator_.ResetLinearAllocationBuffers();
- return HeapStatisticsCollector().CollectStatistics(this);
+ return HeapStatisticsCollector().CollectDetailedStatistics(this);
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 8136541718..91f99b39cc 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -83,8 +83,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
HeapBase(std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support,
- std::unique_ptr<MetricRecorder> histogram_recorder);
+ StackSupport stack_support);
virtual ~HeapBase();
HeapBase(const HeapBase&) = delete;
@@ -114,6 +113,9 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
PreFinalizerHandler* prefinalizer_handler() {
return prefinalizer_handler_.get();
}
+ const PreFinalizerHandler* prefinalizer_handler() const {
+ return prefinalizer_handler_.get();
+ }
MarkerBase* marker() const { return marker_.get(); }
@@ -193,6 +195,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
virtual void FinalizeIncrementalGarbageCollectionForTesting(
EmbedderStackState) = 0;
+ void SetMetricRecorder(std::unique_ptr<MetricRecorder> histogram_recorder) {
+ stats_collector_->SetMetricRecorder(std::move(histogram_recorder));
+ }
+
protected:
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
@@ -213,13 +219,14 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
std::unique_ptr<v8::base::LsanPageAllocator> lsan_page_allocator_;
#endif // LEAK_SANITIZER
- HeapRegistry::Subscription heap_registry_subscription_{*this};
-
#if defined(CPPGC_CAGED_HEAP)
CagedHeap caged_heap_;
#endif // CPPGC_CAGED_HEAP
std::unique_ptr<PageBackend> page_backend_;
+ // HeapRegistry requires access to page_backend_.
+ HeapRegistry::Subscription heap_registry_subscription_{*this};
+
std::unique_ptr<StatsCollector> stats_collector_;
std::unique_ptr<heap::base::Stack> stack_;
std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index f65b3fed9b..9d6fdaaf41 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -12,6 +12,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/raw-heap.h"
@@ -44,6 +45,13 @@ const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
// static
void BasePage::Destroy(BasePage* page) {
+ if (page->discarded_memory()) {
+ page->space()
+ .raw_heap()
+ ->heap()
+ ->stats_collector()
+ ->DecrementDiscardedMemory(page->discarded_memory());
+ }
if (page->is_large()) {
LargePage::Destroy(LargePage::From(page));
} else {
@@ -92,7 +100,7 @@ const HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
return nullptr;
// Check that the space has no linear allocation buffer.
DCHECK(!NormalPageSpace::From(normal_page->space())
- ->linear_allocation_buffer()
+ .linear_allocation_buffer()
.size());
}
@@ -104,38 +112,54 @@ const HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
return header;
}
-BasePage::BasePage(HeapBase* heap, BaseSpace* space, PageType type)
+BasePage::BasePage(HeapBase& heap, BaseSpace& space, PageType type)
: heap_(heap), space_(space), type_(type) {
DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
kPageOffsetMask);
- DCHECK_EQ(&heap_->raw_heap(), space_->raw_heap());
+ DCHECK_EQ(&heap_.raw_heap(), space_.raw_heap());
}
// static
-NormalPage* NormalPage::Create(PageBackend* page_backend,
- NormalPageSpace* space) {
- DCHECK_NOT_NULL(page_backend);
- DCHECK_NOT_NULL(space);
- void* memory = page_backend->AllocateNormalPageMemory(space->index());
- auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
+NormalPage* NormalPage::Create(PageBackend& page_backend,
+ NormalPageSpace& space) {
+ void* memory = page_backend.AllocateNormalPageMemory(space.index());
+ auto* normal_page = new (memory) NormalPage(*space.raw_heap()->heap(), space);
normal_page->SynchronizedStore();
- normal_page->heap()->stats_collector()->NotifyAllocatedMemory(kPageSize);
+ normal_page->heap().stats_collector()->NotifyAllocatedMemory(kPageSize);
+ // Memory is zero initialized as
+ // a) memory retrieved from the OS is zeroed;
+ // b) memory retrieved from the page pool was swept and thus is zeroed except
+ // for the first header which will anyways serve as header again.
+ //
+ // The following is a subset of SetMemoryInaccessible() to establish the
+ // invariant that memory is in the same state as it would be after sweeping.
+ // This allows to return newly allocated pages to go into that LAB and back
+ // into the free list.
+ Address begin = normal_page->PayloadStart() + sizeof(HeapObjectHeader);
+ const size_t size = normal_page->PayloadSize() - sizeof(HeapObjectHeader);
+#if defined(V8_USE_MEMORY_SANITIZER)
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(begin, size);
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+ ASAN_POISON_MEMORY_REGION(begin, size);
+#elif DEBUG
+ cppgc::internal::ZapMemory(begin, size);
+#endif // Release builds.
+ CheckMemoryIsInaccessible(begin, size);
return normal_page;
}
// static
void NormalPage::Destroy(NormalPage* page) {
DCHECK(page);
- BaseSpace* space = page->space();
- DCHECK_EQ(space->end(), std::find(space->begin(), space->end(), page));
+ const BaseSpace& space = page->space();
+ DCHECK_EQ(space.end(), std::find(space.begin(), space.end(), page));
page->~NormalPage();
- PageBackend* backend = page->heap()->page_backend();
- page->heap()->stats_collector()->NotifyFreedMemory(kPageSize);
- backend->FreeNormalPageMemory(space->index(),
- reinterpret_cast<Address>(page));
+ PageBackend* backend = page->heap().page_backend();
+ page->heap().stats_collector()->NotifyFreedMemory(kPageSize);
+ backend->FreeNormalPageMemory(space.index(), reinterpret_cast<Address>(page));
}
-NormalPage::NormalPage(HeapBase* heap, BaseSpace* space)
+NormalPage::NormalPage(HeapBase& heap, BaseSpace& space)
: BasePage(heap, space, PageType::kNormal),
object_start_bitmap_(PayloadStart()) {
DCHECK_LT(kLargeObjectSizeThreshold,
@@ -145,13 +169,13 @@ NormalPage::NormalPage(HeapBase* heap, BaseSpace* space)
NormalPage::~NormalPage() = default;
NormalPage::iterator NormalPage::begin() {
- const auto& lab = NormalPageSpace::From(space())->linear_allocation_buffer();
+ const auto& lab = NormalPageSpace::From(space()).linear_allocation_buffer();
return iterator(reinterpret_cast<HeapObjectHeader*>(PayloadStart()),
lab.start(), lab.size());
}
NormalPage::const_iterator NormalPage::begin() const {
- const auto& lab = NormalPageSpace::From(space())->linear_allocation_buffer();
+ const auto& lab = NormalPageSpace::From(space()).linear_allocation_buffer();
return const_iterator(
reinterpret_cast<const HeapObjectHeader*>(PayloadStart()), lab.start(),
lab.size());
@@ -179,7 +203,7 @@ size_t NormalPage::PayloadSize() {
return kPageSize - 2 * kGuardPageSize - header_size;
}
-LargePage::LargePage(HeapBase* heap, BaseSpace* space, size_t size)
+LargePage::LargePage(HeapBase& heap, BaseSpace& space, size_t size)
: BasePage(heap, space, PageType::kLarge), payload_size_(size) {}
LargePage::~LargePage() = default;
@@ -192,19 +216,17 @@ size_t LargePage::AllocationSize(size_t payload_size) {
}
// static
-LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
+LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
size_t size) {
- DCHECK_NOT_NULL(page_backend);
- DCHECK_NOT_NULL(space);
DCHECK_LE(kLargeObjectSizeThreshold, size);
const size_t allocation_size = AllocationSize(size);
- auto* heap = space->raw_heap()->heap();
- void* memory = page_backend->AllocateLargePageMemory(allocation_size);
- LargePage* page = new (memory) LargePage(heap, space, size);
+ auto* heap = space.raw_heap()->heap();
+ void* memory = page_backend.AllocateLargePageMemory(allocation_size);
+ LargePage* page = new (memory) LargePage(*heap, space, size);
page->SynchronizedStore();
- page->heap()->stats_collector()->NotifyAllocatedMemory(allocation_size);
+ page->heap().stats_collector()->NotifyAllocatedMemory(allocation_size);
return page;
}
@@ -212,12 +234,12 @@ LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
void LargePage::Destroy(LargePage* page) {
DCHECK(page);
#if DEBUG
- BaseSpace* space = page->space();
- DCHECK_EQ(space->end(), std::find(space->begin(), space->end(), page));
+ const BaseSpace& space = page->space();
+ DCHECK_EQ(space.end(), std::find(space.begin(), space.end(), page));
#endif
page->~LargePage();
- PageBackend* backend = page->heap()->page_backend();
- page->heap()->stats_collector()->NotifyFreedMemory(
+ PageBackend* backend = page->heap().page_backend();
+ page->heap().stats_collector()->NotifyFreedMemory(
AllocationSize(page->PayloadSize()));
backend->FreeLargePageMemory(reinterpret_cast<Address>(page));
}
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index 39d5e644ee..5e3cf8f1d5 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -33,11 +33,9 @@ class V8_EXPORT_PRIVATE BasePage {
BasePage(const BasePage&) = delete;
BasePage& operator=(const BasePage&) = delete;
- HeapBase* heap() const { return heap_; }
+ HeapBase& heap() const { return heap_; }
- BaseSpace* space() { return space_; }
- const BaseSpace* space() const { return space_; }
- void set_space(BaseSpace* space) { space_ = space; }
+ BaseSpace& space() const { return space_; }
bool is_large() const { return type_ == PageType::kLarge; }
@@ -80,14 +78,22 @@ class V8_EXPORT_PRIVATE BasePage {
#endif
}
+ void IncrementDiscardedMemory(size_t value) {
+ DCHECK_GE(discarded_memory_ + value, discarded_memory_);
+ discarded_memory_ += value;
+ }
+ void ResetDiscardedMemory() { discarded_memory_ = 0; }
+ size_t discarded_memory() const { return discarded_memory_; }
+
protected:
enum class PageType : uint8_t { kNormal, kLarge };
- BasePage(HeapBase*, BaseSpace*, PageType);
+ BasePage(HeapBase&, BaseSpace&, PageType);
private:
- HeapBase* heap_;
- BaseSpace* space_;
+ HeapBase& heap_;
+ BaseSpace& space_;
PageType type_;
+ size_t discarded_memory_ = 0;
};
class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
@@ -138,7 +144,7 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
using const_iterator = IteratorImpl<const HeapObjectHeader>;
// Allocates a new page in the detached state.
- static NormalPage* Create(PageBackend*, NormalPageSpace*);
+ static NormalPage* Create(PageBackend&, NormalPageSpace&);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(NormalPage*);
@@ -187,7 +193,7 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
}
private:
- NormalPage(HeapBase* heap, BaseSpace* space);
+ NormalPage(HeapBase& heap, BaseSpace& space);
~NormalPage();
size_t allocated_bytes_at_last_gc_ = 0;
@@ -199,7 +205,7 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
// Returns the allocation size required for a payload of size |size|.
static size_t AllocationSize(size_t size);
// Allocates a new page in the detached state.
- static LargePage* Create(PageBackend*, LargePageSpace*, size_t);
+ static LargePage* Create(PageBackend&, LargePageSpace&, size_t);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(LargePage*);
@@ -233,7 +239,7 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
}
private:
- LargePage(HeapBase* heap, BaseSpace* space, size_t);
+ LargePage(HeapBase& heap, BaseSpace& space, size_t);
~LargePage();
size_t payload_size_;
@@ -286,7 +292,7 @@ const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
SynchronizedLoad();
const HeapObjectHeader* header =
ObjectHeaderFromInnerAddressImpl<mode>(this, address);
- DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
+ DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex<mode>());
return *header;
}
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
index ac6dbba65c..0c640e653f 100644
--- a/deps/v8/src/heap/cppgc/heap-space.h
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -88,12 +88,12 @@ class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
size_t size_ = 0;
};
- static NormalPageSpace* From(BaseSpace* space) {
- DCHECK(!space->is_large());
- return static_cast<NormalPageSpace*>(space);
+ static NormalPageSpace& From(BaseSpace& space) {
+ DCHECK(!space.is_large());
+ return static_cast<NormalPageSpace&>(space);
}
- static const NormalPageSpace* From(const BaseSpace* space) {
- return From(const_cast<BaseSpace*>(space));
+ static const NormalPageSpace& From(const BaseSpace& space) {
+ return From(const_cast<BaseSpace&>(space));
}
NormalPageSpace(RawHeap* heap, size_t index, bool is_compactable);
@@ -113,12 +113,12 @@ class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
class V8_EXPORT_PRIVATE LargePageSpace final : public BaseSpace {
public:
- static LargePageSpace* From(BaseSpace* space) {
- DCHECK(space->is_large());
- return static_cast<LargePageSpace*>(space);
+ static LargePageSpace& From(BaseSpace& space) {
+ DCHECK(space.is_large());
+ return static_cast<LargePageSpace&>(space);
}
- static const LargePageSpace* From(const BaseSpace* space) {
- return From(const_cast<BaseSpace*>(space));
+ static const LargePageSpace& From(const BaseSpace& space) {
+ return From(const_cast<BaseSpace&>(space));
}
LargePageSpace(RawHeap* heap, size_t index);
diff --git a/deps/v8/src/heap/cppgc/heap-statistics-collector.cc b/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
index ef283e856a..5833211fcb 100644
--- a/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
+++ b/deps/v8/src/heap/cppgc/heap-statistics-collector.cc
@@ -5,9 +5,12 @@
#include "src/heap/cppgc/heap-statistics-collector.h"
#include <string>
+#include <unordered_map>
+#include "include/cppgc/heap-statistics.h"
#include "include/cppgc/name-provider.h"
#include "src/heap/cppgc/free-list.h"
+#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/raw-heap.h"
@@ -35,23 +38,22 @@ HeapStatistics::SpaceStatistics* InitializeSpace(HeapStatistics* stats,
stats->space_stats.emplace_back();
HeapStatistics::SpaceStatistics* space_stats = &stats->space_stats.back();
space_stats->name = std::move(name);
-
- if (!NameProvider::HideInternalNames()) {
- const size_t num_types = GlobalGCInfoTable::Get().NumberOfGCInfos();
- space_stats->object_stats.num_types = num_types;
- space_stats->object_stats.type_name.resize(num_types);
- space_stats->object_stats.type_count.resize(num_types);
- space_stats->object_stats.type_bytes.resize(num_types);
- }
-
return space_stats;
}
+HeapStatistics::PageStatistics* InitializePage(
+ HeapStatistics::SpaceStatistics* stats) {
+ stats->page_stats.emplace_back();
+ HeapStatistics::PageStatistics* page_stats = &stats->page_stats.back();
+ return page_stats;
+}
+
void FinalizePage(HeapStatistics::SpaceStatistics* space_stats,
HeapStatistics::PageStatistics** page_stats) {
if (*page_stats) {
DCHECK_NOT_NULL(space_stats);
- space_stats->physical_size_bytes += (*page_stats)->physical_size_bytes;
+ space_stats->committed_size_bytes += (*page_stats)->committed_size_bytes;
+ space_stats->resident_size_bytes += (*page_stats)->resident_size_bytes;
space_stats->used_size_bytes += (*page_stats)->used_size_bytes;
}
*page_stats = nullptr;
@@ -63,55 +65,73 @@ void FinalizeSpace(HeapStatistics* stats,
FinalizePage(*space_stats, page_stats);
if (*space_stats) {
DCHECK_NOT_NULL(stats);
- stats->physical_size_bytes += (*space_stats)->physical_size_bytes;
+ stats->committed_size_bytes += (*space_stats)->committed_size_bytes;
+ stats->resident_size_bytes += (*space_stats)->resident_size_bytes;
stats->used_size_bytes += (*space_stats)->used_size_bytes;
}
*space_stats = nullptr;
}
-void RecordObjectType(HeapStatistics::SpaceStatistics* space_stats,
- HeapObjectHeader* header, size_t object_size) {
+void RecordObjectType(
+ std::unordered_map<const char*, size_t>& type_map,
+ std::vector<HeapStatistics::ObjectStatsEntry>& object_statistics,
+ HeapObjectHeader* header, size_t object_size) {
if (!NameProvider::HideInternalNames()) {
- // Detailed names available.
- GCInfoIndex gc_info_index = header->GetGCInfoIndex();
- space_stats->object_stats.type_count[gc_info_index]++;
- space_stats->object_stats.type_bytes[gc_info_index] += object_size;
- if (space_stats->object_stats.type_name[gc_info_index].empty()) {
- space_stats->object_stats.type_name[gc_info_index] =
- header->GetName().value;
+ // Tries to insert a new entry into the typemap with a running counter. If
+ // the entry is already present, just returns the old one.
+ const auto it = type_map.insert({header->GetName().value, type_map.size()});
+ const size_t type_index = it.first->second;
+ if (object_statistics.size() <= type_index) {
+ object_statistics.resize(type_index + 1);
}
+ object_statistics[type_index].allocated_bytes += object_size;
+ object_statistics[type_index].object_count++;
}
}
} // namespace
-HeapStatistics HeapStatisticsCollector::CollectStatistics(HeapBase* heap) {
+HeapStatistics HeapStatisticsCollector::CollectDetailedStatistics(
+ HeapBase* heap) {
HeapStatistics stats;
stats.detail_level = HeapStatistics::DetailLevel::kDetailed;
current_stats_ = &stats;
- Traverse(&heap->raw_heap());
+ if (!NameProvider::HideInternalNames()) {
+ // Add a dummy type so that a type index of zero has a valid mapping but
+ // shows an invalid type.
+ type_name_to_index_map_.insert({"Invalid type", 0});
+ }
+
+ Traverse(heap->raw_heap());
FinalizeSpace(current_stats_, &current_space_stats_, &current_page_stats_);
+ if (!NameProvider::HideInternalNames()) {
+ stats.type_names.resize(type_name_to_index_map_.size());
+ for (auto& it : type_name_to_index_map_) {
+ stats.type_names[it.second] = it.first;
+ }
+ }
+
DCHECK_EQ(heap->stats_collector()->allocated_memory_size(),
- stats.physical_size_bytes);
+ stats.resident_size_bytes);
return stats;
}
-bool HeapStatisticsCollector::VisitNormalPageSpace(NormalPageSpace* space) {
- DCHECK_EQ(0u, space->linear_allocation_buffer().size());
+bool HeapStatisticsCollector::VisitNormalPageSpace(NormalPageSpace& space) {
+ DCHECK_EQ(0u, space.linear_allocation_buffer().size());
FinalizeSpace(current_stats_, &current_space_stats_, &current_page_stats_);
current_space_stats_ =
- InitializeSpace(current_stats_, GetNormalPageSpaceName(space->index()));
+ InitializeSpace(current_stats_, GetNormalPageSpaceName(space.index()));
- space->free_list().CollectStatistics(current_space_stats_->free_list_stats);
+ space.free_list().CollectStatistics(current_space_stats_->free_list_stats);
return false;
}
-bool HeapStatisticsCollector::VisitLargePageSpace(LargePageSpace* space) {
+bool HeapStatisticsCollector::VisitLargePageSpace(LargePageSpace& space) {
FinalizeSpace(current_stats_, &current_space_stats_, &current_page_stats_);
current_space_stats_ = InitializeSpace(current_stats_, "LargePageSpace");
@@ -119,38 +139,46 @@ bool HeapStatisticsCollector::VisitLargePageSpace(LargePageSpace* space) {
return false;
}
-bool HeapStatisticsCollector::VisitNormalPage(NormalPage* page) {
+bool HeapStatisticsCollector::VisitNormalPage(NormalPage& page) {
DCHECK_NOT_NULL(current_space_stats_);
FinalizePage(current_space_stats_, &current_page_stats_);
- current_space_stats_->page_stats.emplace_back(
- HeapStatistics::PageStatistics{kPageSize, 0});
- current_page_stats_ = &current_space_stats_->page_stats.back();
+
+ current_page_stats_ = InitializePage(current_space_stats_);
+ current_page_stats_->committed_size_bytes = kPageSize;
+ current_page_stats_->resident_size_bytes =
+ kPageSize - page.discarded_memory();
return false;
}
-bool HeapStatisticsCollector::VisitLargePage(LargePage* page) {
+bool HeapStatisticsCollector::VisitLargePage(LargePage& page) {
DCHECK_NOT_NULL(current_space_stats_);
FinalizePage(current_space_stats_, &current_page_stats_);
- HeapObjectHeader* object_header = page->ObjectHeader();
- size_t object_size = page->PayloadSize();
- RecordObjectType(current_space_stats_, object_header, object_size);
- size_t allocated_size = LargePage::AllocationSize(object_size);
- current_space_stats_->physical_size_bytes += allocated_size;
- current_space_stats_->used_size_bytes += object_size;
- current_space_stats_->page_stats.emplace_back(
- HeapStatistics::PageStatistics{allocated_size, object_size});
- return true;
+ const size_t object_size = page.PayloadSize();
+ const size_t allocated_size = LargePage::AllocationSize(object_size);
+ current_page_stats_ = InitializePage(current_space_stats_);
+ current_page_stats_->committed_size_bytes = allocated_size;
+ current_page_stats_->resident_size_bytes = allocated_size;
+ return false;
}
-bool HeapStatisticsCollector::VisitHeapObjectHeader(HeapObjectHeader* header) {
- DCHECK(!header->IsLargeObject());
+bool HeapStatisticsCollector::VisitHeapObjectHeader(HeapObjectHeader& header) {
+ if (header.IsFree()) return true;
+
DCHECK_NOT_NULL(current_space_stats_);
DCHECK_NOT_NULL(current_page_stats_);
- if (header->IsFree()) return true;
- size_t object_size = header->AllocatedSize();
- RecordObjectType(current_space_stats_, header, object_size);
- current_page_stats_->used_size_bytes += object_size;
+ // For the purpose of heap statistics, the header counts towards the allocated
+ // object size.
+ const size_t allocated_object_size =
+ header.IsLargeObject()
+ ? LargePage::From(
+ BasePage::FromPayload(const_cast<HeapObjectHeader*>(&header)))
+ ->PayloadSize()
+ : header.AllocatedSize();
+ RecordObjectType(type_name_to_index_map_,
+ current_page_stats_->object_statistics, &header,
+ allocated_object_size);
+ current_page_stats_->used_size_bytes += allocated_object_size;
return true;
}
diff --git a/deps/v8/src/heap/cppgc/heap-statistics-collector.h b/deps/v8/src/heap/cppgc/heap-statistics-collector.h
index 52c92198a8..c0b1fe7c63 100644
--- a/deps/v8/src/heap/cppgc/heap-statistics-collector.h
+++ b/deps/v8/src/heap/cppgc/heap-statistics-collector.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_CPPGC_HEAP_STATISTICS_COLLECTOR_H_
#define V8_HEAP_CPPGC_HEAP_STATISTICS_COLLECTOR_H_
+#include <unordered_map>
+
#include "include/cppgc/heap-statistics.h"
#include "src/heap/cppgc/heap-visitor.h"
@@ -15,18 +17,23 @@ class HeapStatisticsCollector : private HeapVisitor<HeapStatisticsCollector> {
friend class HeapVisitor<HeapStatisticsCollector>;
public:
- HeapStatistics CollectStatistics(HeapBase*);
+ HeapStatistics CollectDetailedStatistics(HeapBase*);
private:
- bool VisitNormalPageSpace(NormalPageSpace*);
- bool VisitLargePageSpace(LargePageSpace*);
- bool VisitNormalPage(NormalPage*);
- bool VisitLargePage(LargePage*);
- bool VisitHeapObjectHeader(HeapObjectHeader*);
+ bool VisitNormalPageSpace(NormalPageSpace&);
+ bool VisitLargePageSpace(LargePageSpace&);
+ bool VisitNormalPage(NormalPage&);
+ bool VisitLargePage(LargePage&);
+ bool VisitHeapObjectHeader(HeapObjectHeader&);
HeapStatistics* current_stats_;
HeapStatistics::SpaceStatistics* current_space_stats_ = nullptr;
HeapStatistics::PageStatistics* current_page_stats_ = nullptr;
+ // Index from type name to final index in `HeapStats::type_names`.
+ // Canonicalizing based on `const char*` assuming stable addresses. If the
+ // implementation of `NameProvider` decides to return different type name
+ // c-strings, the final outcome is less compact.
+ std::unordered_map<const char*, size_t> type_name_to_index_map_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-visitor.h b/deps/v8/src/heap/cppgc/heap-visitor.h
index 7fcbc1b980..cc1c83b50e 100644
--- a/deps/v8/src/heap/cppgc/heap-visitor.h
+++ b/deps/v8/src/heap/cppgc/heap-visitor.h
@@ -19,34 +19,34 @@ namespace internal {
template <typename Derived>
class HeapVisitor {
public:
- void Traverse(RawHeap* heap) {
+ void Traverse(RawHeap& heap) {
if (VisitHeapImpl(heap)) return;
- for (auto& space : *heap) {
- Traverse(space.get());
+ for (auto& space : heap) {
+ Traverse(*space.get());
}
}
- void Traverse(BaseSpace* space) {
+ void Traverse(BaseSpace& space) {
const bool is_stopped =
- space->is_large()
+ space.is_large()
? VisitLargePageSpaceImpl(LargePageSpace::From(space))
: VisitNormalPageSpaceImpl(NormalPageSpace::From(space));
if (is_stopped) return;
- for (auto* page : *space) {
- Traverse(page);
+ for (auto* page : space) {
+ Traverse(*page);
}
}
- void Traverse(BasePage* page) {
- if (page->is_large()) {
- auto* large_page = LargePage::From(page);
- if (VisitLargePageImpl(large_page)) return;
- VisitHeapObjectHeaderImpl(large_page->ObjectHeader());
+ void Traverse(BasePage& page) {
+ if (page.is_large()) {
+ auto* large_page = LargePage::From(&page);
+ if (VisitLargePageImpl(*large_page)) return;
+ VisitHeapObjectHeaderImpl(*large_page->ObjectHeader());
} else {
- auto* normal_page = NormalPage::From(page);
- if (VisitNormalPageImpl(normal_page)) return;
+ auto* normal_page = NormalPage::From(&page);
+ if (VisitNormalPageImpl(*normal_page)) return;
for (auto& header : *normal_page) {
- VisitHeapObjectHeaderImpl(&header);
+ VisitHeapObjectHeaderImpl(header);
}
}
}
@@ -54,31 +54,31 @@ class HeapVisitor {
protected:
// Visitor functions return true if no deeper processing is required.
// Users are supposed to override functions that need special treatment.
- bool VisitHeap(RawHeap*) { return false; }
- bool VisitNormalPageSpace(NormalPageSpace*) { return false; }
- bool VisitLargePageSpace(LargePageSpace*) { return false; }
- bool VisitNormalPage(NormalPage*) { return false; }
- bool VisitLargePage(LargePage*) { return false; }
- bool VisitHeapObjectHeader(HeapObjectHeader*) { return false; }
+ bool VisitHeap(RawHeap&) { return false; }
+ bool VisitNormalPageSpace(NormalPageSpace&) { return false; }
+ bool VisitLargePageSpace(LargePageSpace&) { return false; }
+ bool VisitNormalPage(NormalPage&) { return false; }
+ bool VisitLargePage(LargePage&) { return false; }
+ bool VisitHeapObjectHeader(HeapObjectHeader&) { return false; }
private:
Derived& ToDerived() { return static_cast<Derived&>(*this); }
- bool VisitHeapImpl(RawHeap* heap) { return ToDerived().VisitHeap(heap); }
- bool VisitNormalPageSpaceImpl(NormalPageSpace* space) {
+ bool VisitHeapImpl(RawHeap& heap) { return ToDerived().VisitHeap(heap); }
+ bool VisitNormalPageSpaceImpl(NormalPageSpace& space) {
return ToDerived().VisitNormalPageSpace(space);
}
- bool VisitLargePageSpaceImpl(LargePageSpace* space) {
+ bool VisitLargePageSpaceImpl(LargePageSpace& space) {
return ToDerived().VisitLargePageSpace(space);
}
- bool VisitNormalPageImpl(NormalPage* page) {
+ bool VisitNormalPageImpl(NormalPage& page) {
return ToDerived().VisitNormalPage(page);
}
- bool VisitLargePageImpl(LargePage* page) {
+ bool VisitLargePageImpl(LargePage& page) {
return ToDerived().VisitLargePage(page);
}
- bool VisitHeapObjectHeaderImpl(HeapObjectHeader* hoh) {
- return ToDerived().VisitHeapObjectHeader(hoh);
+ bool VisitHeapObjectHeaderImpl(HeapObjectHeader& header) {
+ return ToDerived().VisitHeapObjectHeader(header);
}
};
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index dc127f8e51..58252a20ab 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -14,6 +14,7 @@
#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
@@ -45,6 +46,8 @@ void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
internal::Heap::From(this)->CollectGarbage(
{internal::GarbageCollector::Config::CollectionType::kMajor, stack_state,
MarkingType::kAtomic, SweepingType::kAtomic,
+ internal::GarbageCollector::Config::FreeMemoryHandling::
+ kDiscardWherePossible,
internal::GarbageCollector::Config::IsForcedGC::kForced});
}
@@ -62,11 +65,11 @@ class Unmarker final : private HeapVisitor<Unmarker> {
friend class HeapVisitor<Unmarker>;
public:
- explicit Unmarker(RawHeap* heap) { Traverse(heap); }
+ explicit Unmarker(RawHeap& heap) { Traverse(heap); }
private:
- bool VisitHeapObjectHeader(HeapObjectHeader* header) {
- if (header->IsMarked()) header->Unmark();
+ bool VisitHeapObjectHeader(HeapObjectHeader& header) {
+ if (header.IsMarked()) header.Unmark();
return true;
}
};
@@ -87,8 +90,7 @@ void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
- : HeapBase(platform, options.custom_spaces, options.stack_support,
- nullptr /* metric_recorder */),
+ : HeapBase(platform, options.custom_spaces, options.stack_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints, options.marking_support,
@@ -158,7 +160,7 @@ void Heap::StartGarbageCollection(Config config) {
#if defined(CPPGC_YOUNG_GENERATION)
if (config.collection_type == Config::CollectionType::kMajor)
- Unmarker unmarker(&raw_heap());
+ Unmarker unmarker(raw_heap());
#endif
const Marker::MarkingConfig marking_config{
@@ -196,7 +198,8 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
subtle::NoGarbageCollectionScope no_gc(*this);
const Sweeper::SweepingConfig sweeping_config{
config_.sweeping_type,
- Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep};
+ Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep,
+ config_.free_memory_handling};
sweeper_.Start(sweeping_config);
in_atomic_pause_ = false;
sweeper_.NotifyDoneIfNeeded();
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index d26fd580df..549a9fe1da 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -496,6 +496,17 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
})) {
return false;
}
+ if (!DrainWorklistWithBytesAndTimeDeadline(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.retrace_marked_objects_worklist(),
+ [this](HeapObjectHeader* header) {
+ // Retracing does not increment marked bytes as the object has
+ // already been processed before.
+ DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
+ *header);
+ })) {
+ return false;
+ }
}
{
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 50288bd0cb..1b41d0b6e8 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -58,6 +58,11 @@ class V8_EXPORT_PRIVATE MarkerBase {
IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
};
+ enum class WriteBarrierType {
+ kDijkstra,
+ kSteele,
+ };
+
virtual ~MarkerBase();
MarkerBase(const MarkerBase&) = delete;
@@ -95,6 +100,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void ProcessWeakness();
inline void WriteBarrierForInConstructionObject(HeapObjectHeader&);
+
+ template <WriteBarrierType type>
inline void WriteBarrierForObject(HeapObjectHeader&);
HeapBase& heap() { return heap_; }
@@ -220,8 +227,16 @@ void MarkerBase::WriteBarrierForInConstructionObject(HeapObjectHeader& header) {
.Push<AccessMode::kAtomic>(&header);
}
+template <MarkerBase::WriteBarrierType type>
void MarkerBase::WriteBarrierForObject(HeapObjectHeader& header) {
- mutator_marking_state_.write_barrier_worklist().Push(&header);
+ switch (type) {
+ case MarkerBase::WriteBarrierType::kDijkstra:
+ mutator_marking_state_.write_barrier_worklist().Push(&header);
+ break;
+ case MarkerBase::WriteBarrierType::kSteele:
+ mutator_marking_state_.retrace_marked_objects_worklist().Push(&header);
+ break;
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index c462746333..b014bd6134 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -99,6 +99,10 @@ class MarkingStateBase {
MarkingWorklists::WeakContainersWorklist& weak_containers_worklist() {
return weak_containers_worklist_;
}
+ MarkingWorklists::RetraceMarkedObjectsWorklist::Local&
+ retrace_marked_objects_worklist() {
+ return retrace_marked_objects_worklist_;
+ }
CompactionWorklists::MovableReferencesWorklist::Local*
movable_slots_worklist() {
@@ -138,6 +142,8 @@ class MarkingStateBase {
MarkingWorklists::EphemeronPairsWorklist::Local
ephemeron_pairs_for_processing_worklist_;
MarkingWorklists::WeakContainersWorklist& weak_containers_worklist_;
+ MarkingWorklists::RetraceMarkedObjectsWorklist::Local
+ retrace_marked_objects_worklist_;
// Existence of the worklist (|movable_slot_worklist_| != nullptr) denotes
// that compaction is currently enabled and slots must be recorded.
std::unique_ptr<CompactionWorklists::MovableReferencesWorklist::Local>
@@ -149,8 +155,7 @@ class MarkingStateBase {
MarkingStateBase::MarkingStateBase(HeapBase& heap,
MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
- :
- heap_(heap),
+ : heap_(heap),
marking_worklist_(marking_worklists.marking_worklist()),
not_fully_constructed_worklist_(
*marking_worklists.not_fully_constructed_worklist()),
@@ -164,7 +169,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
marking_worklists.discovered_ephemeron_pairs_worklist()),
ephemeron_pairs_for_processing_worklist_(
marking_worklists.ephemeron_pairs_for_processing_worklist()),
- weak_containers_worklist_(*marking_worklists.weak_containers_worklist()) {
+ weak_containers_worklist_(*marking_worklists.weak_containers_worklist()),
+ retrace_marked_objects_worklist_(
+ marking_worklists.retrace_marked_objects_worklist()) {
if (compaction_worklists) {
movable_slots_worklist_ =
std::make_unique<CompactionWorklists::MovableReferencesWorklist::Local>(
@@ -192,7 +199,7 @@ void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
// A GC should only mark the objects that belong in its heap.
- DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap());
+ DCHECK_EQ(&heap_, &BasePage::FromPayload(&header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
DCHECK(!header.IsFree<AccessMode::kAtomic>());
@@ -354,9 +361,7 @@ void MutatorMarkingState::ReTraceMarkedWeakContainer(cppgc::Visitor& visitor,
HeapObjectHeader& header) {
DCHECK(weak_containers_worklist_.Contains(&header));
recently_retraced_weak_containers_.Insert(&header);
- // Don't push to the marking worklist to avoid double accounting of marked
- // bytes as the container is already accounted for.
- header.Trace(&visitor);
+ retrace_marked_objects_worklist().Push(&header);
}
void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 2bbf8878e4..4d2ebcff1d 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -24,7 +24,7 @@ MarkingVerifierBase::MarkingVerifierBase(
void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
uintptr_t stack_end,
size_t expected_marked_bytes) {
- Traverse(&heap_.raw_heap());
+ Traverse(heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
heap_.stack()->IteratePointersUnsafe(this, stack_end);
@@ -87,22 +87,22 @@ void MarkingVerifierBase::VisitPointer(const void* address) {
TraceConservativelyIfNeeded(address);
}
-bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader* header) {
+bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
// Verify only non-free marked objects.
- if (!header->IsMarked()) return true;
+ if (!header.IsMarked()) return true;
- DCHECK(!header->IsFree());
+ DCHECK(!header.IsFree());
- verification_state_.SetCurrentParent(header);
+ verification_state_.SetCurrentParent(&header);
- if (!header->IsInConstruction()) {
- header->Trace(visitor_.get());
+ if (!header.IsInConstruction()) {
+ header.Trace(visitor_.get());
} else {
// Dispatches to conservative tracing implementation.
- TraceConservativelyIfNeeded(*header);
+ TraceConservativelyIfNeeded(header);
}
- found_marked_bytes_ += ObjectView(*header).Size() + sizeof(HeapObjectHeader);
+ found_marked_bytes_ += ObjectView(header).Size() + sizeof(HeapObjectHeader);
verification_state_.SetCurrentParent(nullptr);
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index 95475f5191..72d49daa76 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -51,7 +51,7 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
TraceConservativelyCallback) final;
void VisitPointer(const void*) final;
- bool VisitHeapObjectHeader(HeapObjectHeader*);
+ bool VisitHeapObjectHeader(HeapObjectHeader&);
VerificationState& verification_state_;
std::unique_ptr<cppgc::Visitor> visitor_;
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.cc b/deps/v8/src/heap/cppgc/marking-worklists.cc
index 993b5e069d..548fce156e 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.cc
+++ b/deps/v8/src/heap/cppgc/marking-worklists.cc
@@ -19,6 +19,7 @@ void MarkingWorklists::ClearForTesting() {
concurrent_marking_bailout_worklist_.Clear();
discovered_ephemeron_pairs_worklist_.Clear();
ephemeron_pairs_for_processing_worklist_.Clear();
+ retrace_marked_objects_worklist_.Clear();
}
MarkingWorklists::ExternalMarkingWorklist::~ExternalMarkingWorklist() {
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.h b/deps/v8/src/heap/cppgc/marking-worklists.h
index 4ad136b353..96055f0902 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.h
+++ b/deps/v8/src/heap/cppgc/marking-worklists.h
@@ -84,6 +84,8 @@ class MarkingWorklists {
using EphemeronPairsWorklist =
heap::base::Worklist<EphemeronPairItem, 64 /* local entries */>;
using WeakContainersWorklist = ExternalMarkingWorklist;
+ using RetraceMarkedObjectsWorklist =
+ heap::base::Worklist<HeapObjectHeader*, 16 /* local entries */>;
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
@@ -111,6 +113,9 @@ class MarkingWorklists {
WeakContainersWorklist* weak_containers_worklist() {
return &weak_containers_worklist_;
}
+ RetraceMarkedObjectsWorklist* retrace_marked_objects_worklist() {
+ return &retrace_marked_objects_worklist_;
+ }
void ClearForTesting();
@@ -125,6 +130,7 @@ class MarkingWorklists {
EphemeronPairsWorklist discovered_ephemeron_pairs_worklist_;
EphemeronPairsWorklist ephemeron_pairs_for_processing_worklist_;
WeakContainersWorklist weak_containers_worklist_;
+ RetraceMarkedObjectsWorklist retrace_marked_objects_worklist_;
};
template <>
diff --git a/deps/v8/src/heap/cppgc/memory.h b/deps/v8/src/heap/cppgc/memory.h
index d31af33ee3..adc2ce9bb3 100644
--- a/deps/v8/src/heap/cppgc/memory.h
+++ b/deps/v8/src/heap/cppgc/memory.h
@@ -12,6 +12,7 @@
#include "src/base/macros.h"
#include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/msan.h"
+#include "src/heap/cppgc/globals.h"
namespace cppgc {
namespace internal {
@@ -19,13 +20,26 @@ namespace internal {
V8_NOINLINE DISABLE_ASAN void NoSanitizeMemset(void* address, char c,
size_t bytes);
-inline void ZapMemory(void* address, size_t size) {
+static constexpr uint8_t kZappedValue = 0xdc;
+
+V8_INLINE void ZapMemory(void* address, size_t size) {
// The lowest bit of the zapped value should be 0 so that zapped object are
// never viewed as fully constructed objects.
- static constexpr uint8_t kZappedValue = 0xdc;
memset(address, kZappedValue, size);
}
+V8_INLINE void CheckMemoryIsZapped(const void* address, size_t size) {
+ for (size_t i = 0; i < size; i++) {
+ CHECK_EQ(kZappedValue, reinterpret_cast<ConstAddress>(address)[i]);
+ }
+}
+
+V8_INLINE void CheckMemoryIsZero(const void* address, size_t size) {
+ for (size_t i = 0; i < size; i++) {
+ CHECK_EQ(0, reinterpret_cast<ConstAddress>(address)[i]);
+ }
+}
+
// Together `SetMemoryAccessible()` and `SetMemoryInaccessible()` form the
// memory access model for allocation and free.
V8_INLINE void SetMemoryAccessible(void* address, size_t size) {
@@ -70,6 +84,61 @@ V8_INLINE void SetMemoryInaccessible(void* address, size_t size) {
#endif // Release builds.
}
+constexpr bool CheckMemoryIsInaccessibleIsNoop() {
+#if defined(V8_USE_MEMORY_SANITIZER)
+
+ return true;
+
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+
+ return false;
+
+#elif DEBUG
+
+ return false;
+
+#else // Release builds.
+
+ return true;
+
+#endif // Release builds.
+}
+
+V8_INLINE void CheckMemoryIsInaccessible(const void* address, size_t size) {
+#if defined(V8_USE_MEMORY_SANITIZER)
+
+ static_assert(CheckMemoryIsInaccessibleIsNoop(),
+ "CheckMemoryIsInaccessibleIsNoop() needs to reflect "
+ "CheckMemoryIsInaccessible().");
+ // Unable to check that memory is marked as uninitialized by MSAN.
+
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+
+ static_assert(!CheckMemoryIsInaccessibleIsNoop(),
+ "CheckMemoryIsInaccessibleIsNoop() needs to reflect "
+ "CheckMemoryIsInaccessible().");
+ ASAN_CHECK_MEMORY_REGION_IS_POISONED(address, size);
+ ASAN_UNPOISON_MEMORY_REGION(address, size);
+ CheckMemoryIsZero(address, size);
+ ASAN_POISON_MEMORY_REGION(address, size);
+
+#elif DEBUG
+
+ static_assert(!CheckMemoryIsInaccessibleIsNoop(),
+ "CheckMemoryIsInaccessibleIsNoop() needs to reflect "
+ "CheckMemoryIsInaccessible().");
+ CheckMemoryIsZapped(address, size);
+
+#else // Release builds.
+
+ static_assert(CheckMemoryIsInaccessibleIsNoop(),
+ "CheckMemoryIsInaccessibleIsNoop() needs to reflect "
+ "CheckMemoryIsInaccessible().");
+ // No check in release builds.
+
+#endif // Release builds.
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/metric-recorder.h b/deps/v8/src/heap/cppgc/metric-recorder.h
index 6118627d01..53c5f3a40b 100644
--- a/deps/v8/src/heap/cppgc/metric-recorder.h
+++ b/deps/v8/src/heap/cppgc/metric-recorder.h
@@ -20,19 +20,19 @@ class StatsCollector;
*/
class MetricRecorder {
public:
- struct CppGCFullCycle {
+ struct FullCycle {
struct IncrementalPhases {
- int64_t mark_duration_us;
- int64_t sweep_duration_us;
+ int64_t mark_duration_us = -1;
+ int64_t sweep_duration_us = -1;
};
struct Phases : public IncrementalPhases {
- int64_t weak_duration_us;
- int64_t compact_duration_us;
+ int64_t weak_duration_us = -1;
+ int64_t compact_duration_us = -1;
};
struct Sizes {
- int64_t before_bytes;
- int64_t after_bytes;
- int64_t freed_bytes;
+ int64_t before_bytes = -1;
+ int64_t after_bytes = -1;
+ int64_t freed_bytes = -1;
};
Phases total;
@@ -46,21 +46,19 @@ class MetricRecorder {
double main_thread_efficiency_in_bytes_per_us;
};
- struct CppGCMainThreadIncrementalMark {
- int64_t duration_us;
+ struct MainThreadIncrementalMark {
+ int64_t duration_us = -1;
};
- struct CppGCMainThreadIncrementalSweep {
- int64_t duration_us;
+ struct MainThreadIncrementalSweep {
+ int64_t duration_us = -1;
};
virtual ~MetricRecorder() = default;
- virtual void AddMainThreadEvent(const CppGCFullCycle& event) {}
- virtual void AddMainThreadEvent(const CppGCMainThreadIncrementalMark& event) {
- }
- virtual void AddMainThreadEvent(
- const CppGCMainThreadIncrementalSweep& event) {}
+ virtual void AddMainThreadEvent(const FullCycle& event) {}
+ virtual void AddMainThreadEvent(const MainThreadIncrementalMark& event) {}
+ virtual void AddMainThreadEvent(const MainThreadIncrementalSweep& event) {}
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 1197356c29..191e73e6d8 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -13,6 +13,7 @@
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -53,30 +54,28 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
#endif
}
-void AddToFreeList(NormalPageSpace* space, Address start, size_t size) {
- auto& free_list = space->free_list();
- free_list.Add({start, size});
+void AddToFreeList(NormalPageSpace& space, Address start, size_t size) {
+ // No need for SetMemoryInaccessible() as LAB memory is retrieved as free
+ // inaccessible memory.
+ space.free_list().Add({start, size});
NormalPage::From(BasePage::FromPayload(start))
->object_start_bitmap()
.SetBit(start);
}
-void ReplaceLinearAllocationBuffer(NormalPageSpace* space,
- StatsCollector* stats_collector,
+void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
+ StatsCollector& stats_collector,
Address new_buffer, size_t new_size) {
- DCHECK_NOT_NULL(space);
- DCHECK_NOT_NULL(stats_collector);
-
- auto& lab = space->linear_allocation_buffer();
+ auto& lab = space.linear_allocation_buffer();
if (lab.size()) {
AddToFreeList(space, lab.start(), lab.size());
- stats_collector->NotifyExplicitFree(lab.size());
+ stats_collector.NotifyExplicitFree(lab.size());
}
lab.Set(new_buffer, new_size);
if (new_size) {
DCHECK_NOT_NULL(new_buffer);
- stats_collector->NotifyAllocation(new_size);
+ stats_collector.NotifyAllocation(new_size);
auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
page->object_start_bitmap().ClearBit(new_buffer);
MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
@@ -86,7 +85,7 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace* space,
void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
StatsCollector* stats_collector, size_t size,
GCInfoIndex gcinfo) {
- LargePage* page = LargePage::Create(page_backend, space, size);
+ LargePage* page = LargePage::Create(*page_backend, *space, size);
space->AddPage(page);
auto* header = new (page->ObjectHeader())
@@ -108,7 +107,7 @@ ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
page_backend_(page_backend),
stats_collector_(stats_collector) {}
-void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
+void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
stats_collector_->NotifySafePointForConservativeCollection();
@@ -116,7 +115,7 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
return memory;
}
-void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
+void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
size_t size, GCInfoIndex gcinfo) {
DCHECK_EQ(0, size & kAllocationMask);
DCHECK_LE(kFreeListEntrySize, size);
@@ -125,8 +124,8 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
// 1. If this allocation is big enough, allocate a large object.
if (size >= kLargeObjectSizeThreshold) {
- auto* large_space = LargePageSpace::From(
- raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
+ auto* large_space = &LargePageSpace::From(
+ *raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
return AllocateLargeObject(page_backend_, large_space, stats_collector_,
size, gcinfo);
}
@@ -142,7 +141,7 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
// TODO(chromium:1056170): Investigate whether this should be a loop which
// would result in more agressive re-use of memory at the expense of
// potentially larger allocation time.
- if (sweeper.SweepForAllocationIfRunning(space, size)) {
+ if (sweeper.SweepForAllocationIfRunning(&space, size)) {
// Sweeper found a block of at least `size` bytes. Allocation from the free
// list may still fail as actual buckets are not exhaustively searched for
// a suitable block. Instead, buckets are tested from larger sizes that are
@@ -160,11 +159,11 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
// TODO(chromium:1056170): Make use of the synchronously freed memory.
// 5. Add a new page to this heap.
- auto* new_page = NormalPage::Create(page_backend_, space);
- space->AddPage(new_page);
+ auto* new_page = NormalPage::Create(*page_backend_, space);
+ space.AddPage(new_page);
// 6. Set linear allocation buffer to new page.
- ReplaceLinearAllocationBuffer(space, stats_collector_,
+ ReplaceLinearAllocationBuffer(space, *stats_collector_,
new_page->PayloadStart(),
new_page->PayloadSize());
@@ -175,13 +174,21 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
return result;
}
-void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace* space, size_t size,
+void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) {
- const FreeList::Block entry = space->free_list().Allocate(size);
+ const FreeList::Block entry = space.free_list().Allocate(size);
if (!entry.address) return nullptr;
- ReplaceLinearAllocationBuffer(
- space, stats_collector_, static_cast<Address>(entry.address), entry.size);
+ // Assume discarded memory on that page is now zero.
+ auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
+ if (page.discarded_memory()) {
+ stats_collector_->DecrementDiscardedMemory(page.discarded_memory());
+ page.ResetDiscardedMemory();
+ }
+
+ ReplaceLinearAllocationBuffer(space, *stats_collector_,
+ static_cast<Address>(entry.address),
+ entry.size);
return AllocateObjectOnSpace(space, size, gcinfo);
}
@@ -191,10 +198,10 @@ void ObjectAllocator::ResetLinearAllocationBuffers() {
public:
explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}
- bool VisitLargePageSpace(LargePageSpace*) { return true; }
+ bool VisitLargePageSpace(LargePageSpace&) { return true; }
- bool VisitNormalPageSpace(NormalPageSpace* space) {
- ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
+ bool VisitNormalPageSpace(NormalPageSpace& space) {
+ ReplaceLinearAllocationBuffer(space, *stats_collector_, nullptr, 0);
return true;
}
@@ -202,7 +209,7 @@ void ObjectAllocator::ResetLinearAllocationBuffers() {
StatsCollector* stats_collector_;
} visitor(stats_collector_);
- visitor.Traverse(raw_heap_);
+ visitor.Traverse(*raw_heap_);
}
void ObjectAllocator::Terminate() {
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index dd99d83ba5..dd0035cfe9 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -18,6 +18,10 @@
namespace cppgc {
+namespace internal {
+class ObjectAllocator;
+} // namespace internal
+
class V8_EXPORT AllocationHandle {
private:
AllocationHandle() = default;
@@ -53,11 +57,11 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
size_t size);
- inline void* AllocateObjectOnSpace(NormalPageSpace* space, size_t size,
+ inline void* AllocateObjectOnSpace(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo);
- void* OutOfLineAllocate(NormalPageSpace*, size_t, GCInfoIndex);
- void* OutOfLineAllocateImpl(NormalPageSpace*, size_t, GCInfoIndex);
- void* AllocateFromFreeList(NormalPageSpace*, size_t, GCInfoIndex);
+ void* OutOfLineAllocate(NormalPageSpace&, size_t, GCInfoIndex);
+ void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, GCInfoIndex);
+ void* AllocateFromFreeList(NormalPageSpace&, size_t, GCInfoIndex);
RawHeap* raw_heap_;
PageBackend* page_backend_;
@@ -70,7 +74,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
GetInitialSpaceIndexForSize(allocation_size);
- return AllocateObjectOnSpace(NormalPageSpace::From(raw_heap_->Space(type)),
+ return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_->Space(type)),
allocation_size, gcinfo);
}
@@ -80,7 +84,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
- NormalPageSpace::From(raw_heap_->CustomSpace(space_index)),
+ NormalPageSpace::From(*raw_heap_->CustomSpace(space_index)),
allocation_size, gcinfo);
}
@@ -97,12 +101,12 @@ RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
return RawHeap::RegularSpaceType::kNormal4;
}
-void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
+void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace& space,
size_t size, GCInfoIndex gcinfo) {
DCHECK_LT(0u, gcinfo);
NormalPageSpace::LinearAllocationBuffer& current_lab =
- space->linear_allocation_buffer();
+ space.linear_allocation_buffer();
if (current_lab.size() < size) {
return OutOfLineAllocate(space, size, gcinfo);
}
diff --git a/deps/v8/src/heap/cppgc/object-poisoner.h b/deps/v8/src/heap/cppgc/object-poisoner.h
index 632dea9b9d..3b738eaeb6 100644
--- a/deps/v8/src/heap/cppgc/object-poisoner.h
+++ b/deps/v8/src/heap/cppgc/object-poisoner.h
@@ -9,6 +9,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/object-view.h"
namespace cppgc {
namespace internal {
@@ -20,14 +21,10 @@ class UnmarkedObjectsPoisoner : public HeapVisitor<UnmarkedObjectsPoisoner> {
friend class HeapVisitor<UnmarkedObjectsPoisoner>;
private:
- bool VisitHeapObjectHeader(HeapObjectHeader* header) {
- if (header->IsFree() || header->IsMarked()) return true;
-
- const size_t size =
- header->IsLargeObject()
- ? LargePage::From(BasePage::FromPayload(header))->ObjectSize()
- : header->ObjectSize();
- ASAN_POISON_MEMORY_REGION(header->ObjectStart(), size);
+ bool VisitHeapObjectHeader(HeapObjectHeader& header) {
+ if (header.IsFree() || header.IsMarked()) return true;
+
+ ASAN_POISON_MEMORY_REGION(header.ObjectStart(), ObjectView(header).Size());
return true;
}
};
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
index 23ad552c7a..3c7cb61761 100644
--- a/deps/v8/src/heap/cppgc/pointer-policies.cc
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -13,6 +13,7 @@
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/process-heap.h"
namespace cppgc {
@@ -41,7 +42,7 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
// References cannot change their heap association which means that state is
// immutable once it is set.
if (!heap_) {
- heap_ = base_page->heap();
+ heap_ = &base_page->heap();
if (!heap_->page_backend()->Lookup(reinterpret_cast<Address>(this))) {
// If `this` is not contained within the heap of `ptr`, we must deal with
// an on-stack or off-heap reference. For both cases there should be no
@@ -51,7 +52,7 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
}
// Member references should never mix heaps.
- DCHECK_EQ(heap_, base_page->heap());
+ DCHECK_EQ(heap_, &base_page->heap());
// Header checks.
const HeapObjectHeader* header = nullptr;
@@ -67,31 +68,44 @@ void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
DCHECK(!header->IsFree());
}
- // TODO(v8:11749): Check mark bits when during pre-finalizer phase.
+#ifdef CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
+ if (heap_->prefinalizer_handler()->IsInvokingPreFinalizers()) {
+ // During prefinalizers invocation, check that |ptr| refers to a live object
+ // and that it is assigned to a live slot.
+ DCHECK(header->IsMarked());
+ // Slot can be in a large object.
+ const auto* slot_page = BasePage::FromInnerAddress(heap_, this);
+ // Off-heap slots (from other heaps or on-stack) are considered live.
+ bool slot_is_live =
+ !slot_page || slot_page->ObjectHeaderFromInnerAddress(this).IsMarked();
+ DCHECK(slot_is_live);
+ USE(slot_is_live);
+ }
+#endif // CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
}
PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(
const void* object) {
- auto* heap = BasePage::FromPayload(object)->heap();
- return heap->GetStrongPersistentRegion();
+ return BasePage::FromPayload(object)->heap().GetStrongPersistentRegion();
}
PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(
const void* object) {
- auto* heap = BasePage::FromPayload(object)->heap();
- return heap->GetWeakPersistentRegion();
+ return BasePage::FromPayload(object)->heap().GetWeakPersistentRegion();
}
CrossThreadPersistentRegion&
StrongCrossThreadPersistentPolicy::GetPersistentRegion(const void* object) {
- auto* heap = BasePage::FromPayload(object)->heap();
- return heap->GetStrongCrossThreadPersistentRegion();
+ return BasePage::FromPayload(object)
+ ->heap()
+ .GetStrongCrossThreadPersistentRegion();
}
CrossThreadPersistentRegion&
WeakCrossThreadPersistentPolicy::GetPersistentRegion(const void* object) {
- auto* heap = BasePage::FromPayload(object)->heap();
- return heap->GetWeakCrossThreadPersistentRegion();
+ return BasePage::FromPayload(object)
+ ->heap()
+ .GetWeakCrossThreadPersistentRegion();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index 993ba54854..c05f06f6b0 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -21,7 +21,7 @@ void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
PreFinalizer pre_finalizer) {
BasePage::FromPayload(pre_finalizer.object)
->heap()
- ->prefinalizer_handler()
+ .prefinalizer_handler()
->RegisterPrefinalizer(pre_finalizer);
}
@@ -53,6 +53,7 @@ void PreFinalizerHandler::InvokePreFinalizers() {
DCHECK(CurrentThreadIsCreationThread());
LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
+ is_invoking_ = true;
ordered_pre_finalizers_.erase(
ordered_pre_finalizers_.begin(),
std::remove_if(ordered_pre_finalizers_.rbegin(),
@@ -61,6 +62,7 @@ void PreFinalizerHandler::InvokePreFinalizers() {
return (pf.callback)(liveness_broker, pf.object);
})
.base());
+ is_invoking_ = false;
ordered_pre_finalizers_.shrink_to_fit();
}
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.h b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
index c18e487fb4..e91931bf6f 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -25,6 +25,8 @@ class PreFinalizerHandler final {
void InvokePreFinalizers();
+ bool IsInvokingPreFinalizers() const { return is_invoking_; }
+
private:
// Checks that the current thread is the thread that created the heap.
bool CurrentThreadIsCreationThread();
@@ -36,6 +38,7 @@ class PreFinalizerHandler final {
std::vector<PreFinalizer> ordered_pre_finalizers_;
HeapBase& heap_;
+ bool is_invoking_ = false;
#ifdef DEBUG
int creation_thread_id_;
#endif
diff --git a/deps/v8/src/heap/cppgc/process-heap.cc b/deps/v8/src/heap/cppgc/process-heap.cc
index 6f8bb05c6c..f7ce15e5ff 100644
--- a/deps/v8/src/heap/cppgc/process-heap.cc
+++ b/deps/v8/src/heap/cppgc/process-heap.cc
@@ -8,6 +8,7 @@
#include <vector>
#include "src/base/lazy-instance.h"
+#include "src/base/logging.h"
#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/page-memory.h"
@@ -42,6 +43,10 @@ void HeapRegistry::RegisterHeap(HeapBase& heap) {
void HeapRegistry::UnregisterHeap(HeapBase& heap) {
v8::base::MutexGuard guard(g_heap_registry_mutex.Pointer());
+ // HeapRegistry requires access to PageBackend which means it must still
+ // be present by the time a heap is removed from the registry.
+ DCHECK_NOT_NULL(heap.page_backend());
+
auto& storage = GetHeapRegistryStorage();
const auto pos = std::find(storage.begin(), storage.end(), &heap);
DCHECK_NE(storage.end(), pos);
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index 135ccc12a7..54b68f4c28 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -5,6 +5,7 @@
#include "src/heap/cppgc/stats-collector.h"
#include <algorithm>
+#include <atomic>
#include <cmath>
#include "src/base/atomicops.h"
@@ -18,9 +19,7 @@ namespace internal {
// static
constexpr size_t StatsCollector::kAllocationThresholdBytes;
-StatsCollector::StatsCollector(
- std::unique_ptr<MetricRecorder> histogram_recorder, Platform* platform)
- : metric_recorder_(std::move(histogram_recorder)), platform_(platform) {
+StatsCollector::StatsCollector(Platform* platform) : platform_(platform) {
USE(platform_);
}
@@ -42,11 +41,19 @@ void StatsCollector::NotifyAllocation(size_t bytes) {
// The current GC may not have been started. This is ok as recording considers
// the whole time range between garbage collections.
allocated_bytes_since_safepoint_ += bytes;
+#ifdef CPPGC_VERIFY_LIVE_BYTES
+ DCHECK_GE(live_bytes_ + bytes, live_bytes_);
+ live_bytes_ += bytes;
+#endif // CPPGC_VERIFY_LIVE_BYTES
}
void StatsCollector::NotifyExplicitFree(size_t bytes) {
// See IncreaseAllocatedObjectSize for lifetime of the counter.
explicitly_freed_bytes_since_safepoint_ += bytes;
+#ifdef CPPGC_VERIFY_LIVE_BYTES
+ DCHECK_GE(live_bytes_, bytes);
+ live_bytes_ -= bytes;
+#endif // CPPGC_VERIFY_LIVE_BYTES
}
void StatsCollector::NotifySafePointForConservativeCollection() {
@@ -66,6 +73,9 @@ void StatsCollector::AllocatedObjectSizeSafepointImpl() {
static_cast<int64_t>(allocated_bytes_since_safepoint_) -
static_cast<int64_t>(explicitly_freed_bytes_since_safepoint_);
+ // Save the epoch to avoid clearing counters when a GC happened, see below.
+ const auto saved_epoch = current_.epoch;
+
// These observer methods may start or finalize GC. In case they trigger a
// final GC pause, the delta counters are reset there and the following
// observer calls are called with '0' updates.
@@ -80,8 +90,15 @@ void StatsCollector::AllocatedObjectSizeSafepointImpl() {
observer->AllocatedObjectSizeIncreased(static_cast<size_t>(delta));
}
});
- allocated_bytes_since_safepoint_ = 0;
- explicitly_freed_bytes_since_safepoint_ = 0;
+ // Only clear the counters when no garbage collection happened. In case of a
+ // garbage collection in the callbacks, the counters have been cleared by
+ // `NotifyMarkingFinished()`. In addition, atomic sweeping may have already
+ // allocated new memory which would be dropped from accounting in case
+ // of clearing here.
+ if (saved_epoch == current_.epoch) {
+ allocated_bytes_since_safepoint_ = 0;
+ explicitly_freed_bytes_since_safepoint_ = 0;
+ }
}
StatsCollector::Event::Event() {
@@ -107,6 +124,9 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
explicitly_freed_bytes_since_safepoint_;
allocated_bytes_since_safepoint_ = 0;
explicitly_freed_bytes_since_safepoint_ = 0;
+#ifdef CPPGC_VERIFY_LIVE_BYTES
+ live_bytes_ = marked_bytes;
+#endif // CPPGC_VERIFY_LIVE_BYTES
DCHECK_LE(memory_freed_bytes_since_end_of_marking_, memory_allocated_bytes_);
memory_allocated_bytes_ -= memory_freed_bytes_since_end_of_marking_;
@@ -133,12 +153,12 @@ double StatsCollector::GetRecentAllocationSpeedInBytesPerMs() const {
namespace {
-int64_t SumPhases(const MetricRecorder::CppGCFullCycle::Phases& phases) {
+int64_t SumPhases(const MetricRecorder::FullCycle::Phases& phases) {
return phases.mark_duration_us + phases.weak_duration_us +
phases.compact_duration_us + phases.sweep_duration_us;
}
-MetricRecorder::CppGCFullCycle GetFullCycleEventForMetricRecorder(
+MetricRecorder::FullCycle GetFullCycleEventForMetricRecorder(
int64_t atomic_mark_us, int64_t atomic_weak_us, int64_t atomic_compact_us,
int64_t atomic_sweep_us, int64_t incremental_mark_us,
int64_t incremental_sweep_us, int64_t concurrent_mark_us,
@@ -146,7 +166,7 @@ MetricRecorder::CppGCFullCycle GetFullCycleEventForMetricRecorder(
int64_t objects_after_bytes, int64_t objects_freed_bytes,
int64_t memory_before_bytes, int64_t memory_after_bytes,
int64_t memory_freed_bytes) {
- MetricRecorder::CppGCFullCycle event;
+ MetricRecorder::FullCycle event;
// MainThread.Incremental:
event.main_thread_incremental.mark_duration_us = incremental_mark_us;
event.main_thread_incremental.sweep_duration_us = incremental_sweep_us;
@@ -202,7 +222,7 @@ void StatsCollector::NotifySweepingCompleted() {
previous_ = std::move(current_);
current_ = Event();
if (metric_recorder_) {
- MetricRecorder::CppGCFullCycle event = GetFullCycleEventForMetricRecorder(
+ MetricRecorder::FullCycle event = GetFullCycleEventForMetricRecorder(
previous_.scope_data[kAtomicMark].InMicroseconds(),
previous_.scope_data[kAtomicWeak].InMicroseconds(),
previous_.scope_data[kAtomicCompact].InMicroseconds(),
@@ -264,30 +284,72 @@ v8::base::TimeDelta StatsCollector::marking_time() const {
void StatsCollector::NotifyAllocatedMemory(int64_t size) {
memory_allocated_bytes_ += size;
+#ifdef DEBUG
+ const auto saved_epoch = current_.epoch;
+#endif // DEBUG
ForAllAllocationObservers([size](AllocationObserver* observer) {
observer->AllocatedSizeIncreased(static_cast<size_t>(size));
});
+#ifdef DEBUG
+ // AllocatedSizeIncreased() must not trigger GC.
+ DCHECK_EQ(saved_epoch, current_.epoch);
+#endif // DEBUG
}
void StatsCollector::NotifyFreedMemory(int64_t size) {
memory_freed_bytes_since_end_of_marking_ += size;
+#ifdef DEBUG
+ const auto saved_epoch = current_.epoch;
+#endif // DEBUG
ForAllAllocationObservers([size](AllocationObserver* observer) {
observer->AllocatedSizeDecreased(static_cast<size_t>(size));
});
+#ifdef DEBUG
+ // AllocatedSizeDecreased() must not trigger GC.
+ DCHECK_EQ(saved_epoch, current_.epoch);
+#endif // DEBUG
+}
+
+void StatsCollector::IncrementDiscardedMemory(size_t value) {
+ const size_t old =
+ discarded_bytes_.fetch_add(value, std::memory_order_relaxed);
+ DCHECK_GE(old + value, old);
+ USE(old);
+}
+
+void StatsCollector::DecrementDiscardedMemory(size_t value) {
+ const size_t old =
+ discarded_bytes_.fetch_sub(value, std::memory_order_relaxed);
+ DCHECK_GE(old, old - value);
+ USE(old);
+}
+
+void StatsCollector::ResetDiscardedMemory() {
+ discarded_bytes_.store(0, std::memory_order_relaxed);
+}
+
+size_t StatsCollector::discarded_memory_size() const {
+ return discarded_bytes_.load(std::memory_order_relaxed);
+}
+
+size_t StatsCollector::resident_memory_size() const {
+ const auto allocated = allocated_memory_size();
+ const auto discarded = discarded_memory_size();
+ DCHECK_IMPLIES(allocated == 0, discarded == 0);
+ DCHECK_IMPLIES(allocated > 0, allocated > discarded);
+ return allocated - discarded;
}
void StatsCollector::RecordHistogramSample(ScopeId scope_id_,
v8::base::TimeDelta time) {
switch (scope_id_) {
case kIncrementalMark: {
- MetricRecorder::CppGCMainThreadIncrementalMark event{
- time.InMicroseconds()};
+ MetricRecorder::MainThreadIncrementalMark event{time.InMicroseconds()};
metric_recorder_->AddMainThreadEvent(event);
break;
}
case kIncrementalSweep: {
- MetricRecorder::CppGCMainThreadIncrementalSweep event{
- time.InMicroseconds()};
+ MetricRecorder::MainThreadIncrementalSweep event{time.InMicroseconds()};
metric_recorder_->AddMainThreadEvent(event);
break;
}
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index 4709f22703..d63d297c77 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -8,6 +8,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <atomic>
#include <vector>
#include "include/cppgc/platform.h"
@@ -249,7 +250,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// reasonably interesting sizes.
static constexpr size_t kAllocationThresholdBytes = 1024;
- StatsCollector(std::unique_ptr<MetricRecorder>, Platform*);
+ explicit StatsCollector(Platform*);
StatsCollector(const StatsCollector&) = delete;
StatsCollector& operator=(const StatsCollector&) = delete;
@@ -293,11 +294,18 @@ class V8_EXPORT_PRIVATE StatsCollector final {
void NotifyAllocatedMemory(int64_t);
void NotifyFreedMemory(int64_t);
- void SetMetricRecorderForTesting(
- std::unique_ptr<MetricRecorder> histogram_recorder) {
+ void IncrementDiscardedMemory(size_t);
+ void DecrementDiscardedMemory(size_t);
+ void ResetDiscardedMemory();
+ size_t discarded_memory_size() const;
+ size_t resident_memory_size() const;
+
+ void SetMetricRecorder(std::unique_ptr<MetricRecorder> histogram_recorder) {
metric_recorder_ = std::move(histogram_recorder);
}
+ MetricRecorder* GetMetricRecorder() const { return metric_recorder_.get(); }
+
private:
enum class GarbageCollectionState : uint8_t {
kNotRunning,
@@ -326,9 +334,13 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// arithmetic for simplicity.
int64_t allocated_bytes_since_safepoint_ = 0;
int64_t explicitly_freed_bytes_since_safepoint_ = 0;
+#ifdef CPPGC_VERIFY_LIVE_BYTES
+ size_t live_bytes_ = 0;
+#endif // CPPGC_VERIFY_LIVE_BYTES
int64_t memory_allocated_bytes_ = 0;
int64_t memory_freed_bytes_since_end_of_marking_ = 0;
+ std::atomic<size_t> discarded_bytes_{0};
// vector to allow fast iteration of observers. Register/Unregisters only
// happens on startup/teardown.
@@ -344,6 +356,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
std::unique_ptr<MetricRecorder> metric_recorder_;
+ // |platform_| is used by the TRACE_EVENT_* macros.
Platform* platform_;
};
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 3e740f7924..4aa884fcfd 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -37,25 +37,25 @@ class ObjectStartBitmapVerifier
friend class HeapVisitor<ObjectStartBitmapVerifier>;
public:
- void Verify(RawHeap* heap) { Traverse(heap); }
+ void Verify(RawHeap& heap) { Traverse(heap); }
private:
- bool VisitNormalPage(NormalPage* page) {
+ bool VisitNormalPage(NormalPage& page) {
// Remember bitmap and reset previous pointer.
- bitmap_ = &page->object_start_bitmap();
+ bitmap_ = &page.object_start_bitmap();
prev_ = nullptr;
return false;
}
- bool VisitHeapObjectHeader(HeapObjectHeader* header) {
- if (header->IsLargeObject()) return true;
+ bool VisitHeapObjectHeader(HeapObjectHeader& header) {
+ if (header.IsLargeObject()) return true;
- auto* raw_header = reinterpret_cast<ConstAddress>(header);
+ auto* raw_header = reinterpret_cast<ConstAddress>(&header);
CHECK(bitmap_->CheckBit(raw_header));
if (prev_) {
CHECK_EQ(prev_, bitmap_->FindHeader(raw_header - 1));
}
- prev_ = header;
+ prev_ = &header;
return true;
}
@@ -63,6 +63,70 @@ class ObjectStartBitmapVerifier
HeapObjectHeader* prev_ = nullptr;
};
+class FreeHandlerBase {
+ public:
+ virtual ~FreeHandlerBase() = default;
+ virtual void FreeFreeList(
+ std::vector<FreeList::Block>& unfinalized_free_list) = 0;
+};
+
+class DiscardingFreeHandler : public FreeHandlerBase {
+ public:
+ DiscardingFreeHandler(PageAllocator& page_allocator, FreeList& free_list,
+ BasePage& page)
+ : page_allocator_(page_allocator), free_list_(free_list), page_(page) {}
+
+ void Free(FreeList::Block block) {
+ const uintptr_t aligned_begin_unused =
+ RoundUp(reinterpret_cast<uintptr_t>(free_list_.Add(block)),
+ page_allocator_.CommitPageSize());
+ const uintptr_t aligned_end_unused =
+ RoundDown(reinterpret_cast<uintptr_t>(block.address) + block.size,
+ page_allocator_.CommitPageSize());
+ if (aligned_begin_unused < aligned_end_unused) {
+ const size_t discarded_size = aligned_end_unused - aligned_begin_unused;
+ page_allocator_.DiscardSystemPages(
+ reinterpret_cast<void*>(aligned_begin_unused),
+ aligned_end_unused - aligned_begin_unused);
+ page_.IncrementDiscardedMemory(discarded_size);
+ page_.space()
+ .raw_heap()
+ ->heap()
+ ->stats_collector()
+ ->IncrementDiscardedMemory(discarded_size);
+ }
+ }
+
+ void FreeFreeList(std::vector<FreeList::Block>& unfinalized_free_list) final {
+ for (auto entry : unfinalized_free_list) {
+ Free(std::move(entry));
+ }
+ }
+
+ private:
+ PageAllocator& page_allocator_;
+ FreeList& free_list_;
+ BasePage& page_;
+};
+
+class RegularFreeHandler : public FreeHandlerBase {
+ public:
+ RegularFreeHandler(PageAllocator& page_allocator, FreeList& free_list,
+ BasePage& page)
+ : free_list_(free_list) {}
+
+ void Free(FreeList::Block block) { free_list_.Add(std::move(block)); }
+
+ void FreeFreeList(std::vector<FreeList::Block>& unfinalized_free_list) final {
+ for (auto entry : unfinalized_free_list) {
+ Free(std::move(entry));
+ }
+ }
+
+ private:
+ FreeList& free_list_;
+};
+
template <typename T>
class ThreadSafeStack {
public:
@@ -121,15 +185,22 @@ void StickyUnmark(HeapObjectHeader* header) {
#endif
}
-// Builder that finalizes objects and adds freelist entries right away.
-class InlinedFinalizationBuilder final {
+class InlinedFinalizationBuilderBase {
public:
struct ResultType {
bool is_empty = false;
size_t largest_new_free_list_entry = 0;
};
+};
- explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
+// Builder that finalizes objects and adds freelist entries right away.
+template <typename FreeHandler>
+class InlinedFinalizationBuilder final : public InlinedFinalizationBuilderBase,
+ public FreeHandler {
+ public:
+ InlinedFinalizationBuilder(BasePage& page, PageAllocator& page_allocator)
+ : FreeHandler(page_allocator,
+ NormalPageSpace::From(page.space()).free_list(), page) {}
void AddFinalizer(HeapObjectHeader* header, size_t size) {
header->Finalize();
@@ -137,24 +208,24 @@ class InlinedFinalizationBuilder final {
}
void AddFreeListEntry(Address start, size_t size) {
- auto* space = NormalPageSpace::From(page_->space());
- space->free_list().Add({start, size});
+ FreeHandler::Free({start, size});
}
ResultType GetResult(bool is_empty, size_t largest_new_free_list_entry) {
return {is_empty, largest_new_free_list_entry};
}
-
- private:
- BasePage* page_;
};
// Builder that produces results for deferred processing.
-class DeferredFinalizationBuilder final {
+template <typename FreeHandler>
+class DeferredFinalizationBuilder final : public FreeHandler {
public:
using ResultType = SpaceState::SweptPageState;
- explicit DeferredFinalizationBuilder(BasePage* page) { result_.page = page; }
+ DeferredFinalizationBuilder(BasePage& page, PageAllocator& page_allocator)
+ : FreeHandler(page_allocator, result_.cached_free_list, page) {
+ result_.page = &page;
+ }
void AddFinalizer(HeapObjectHeader* header, size_t size) {
if (header->IsFinalizable()) {
@@ -169,7 +240,7 @@ class DeferredFinalizationBuilder final {
if (found_finalizer_) {
result_.unfinalized_free_list.push_back({start, size});
} else {
- result_.cached_free_list.Add({start, size});
+ FreeHandler::Free({start, size});
}
found_finalizer_ = false;
}
@@ -186,9 +257,10 @@ class DeferredFinalizationBuilder final {
};
template <typename FinalizationBuilder>
-typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
+typename FinalizationBuilder::ResultType SweepNormalPage(
+ NormalPage* page, PageAllocator& page_allocator) {
constexpr auto kAtomicAccess = AccessMode::kAtomic;
- FinalizationBuilder builder(page);
+ FinalizationBuilder builder(*page, page_allocator);
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
@@ -204,6 +276,9 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
// Check if this is a free list entry.
if (header->IsFree<kAtomicAccess>()) {
SetMemoryInaccessible(header, std::min(kFreeListEntrySize, size));
+ // This prevents memory from being discarded in configurations where
+ // `CheckMemoryIsInaccessibleIsNoop()` is false.
+ CheckMemoryIsInaccessible(header, size);
begin += size;
continue;
}
@@ -248,8 +323,12 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
// - returns (unmaps) empty pages;
// - merges freelists to the space's freelist.
class SweepFinalizer final {
+ using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+
public:
- explicit SweepFinalizer(cppgc::Platform* platform) : platform_(platform) {}
+ SweepFinalizer(cppgc::Platform* platform,
+ FreeMemoryHandling free_memory_handling)
+ : platform_(platform), free_memory_handling_(free_memory_handling) {}
void FinalizeHeap(SpaceStates* space_states) {
for (SpaceState& space_state : *space_states) {
@@ -304,20 +383,23 @@ class SweepFinalizer final {
DCHECK(!page->is_large());
// Merge freelists without finalizers.
- FreeList& space_freelist =
- NormalPageSpace::From(page->space())->free_list();
+ FreeList& space_freelist = NormalPageSpace::From(page->space()).free_list();
space_freelist.Append(std::move(page_state->cached_free_list));
// Merge freelist with finalizers.
- for (auto entry : page_state->unfinalized_free_list) {
- space_freelist.Add(std::move(entry));
- }
+ std::unique_ptr<FreeHandlerBase> handler =
+ (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
+ ? std::unique_ptr<FreeHandlerBase>(new DiscardingFreeHandler(
+ *platform_->GetPageAllocator(), space_freelist, *page))
+ : std::unique_ptr<FreeHandlerBase>(new RegularFreeHandler(
+ *platform_->GetPageAllocator(), space_freelist, *page));
+ handler->FreeFreeList(page_state->unfinalized_free_list);
largest_new_free_list_entry_ = std::max(
page_state->largest_new_free_list_entry, largest_new_free_list_entry_);
// Add the page to the space.
- page->space()->AddPage(page);
+ page->space().AddPage(page);
}
size_t largest_new_free_list_entry() const {
@@ -327,24 +409,30 @@ class SweepFinalizer final {
private:
cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0;
+ const FreeMemoryHandling free_memory_handling_;
};
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
friend class HeapVisitor<MutatorThreadSweeper>;
+ using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+
public:
- explicit MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform)
- : states_(states), platform_(platform) {}
+ MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform,
+ FreeMemoryHandling free_memory_handling)
+ : states_(states),
+ platform_(platform),
+ free_memory_handling_(free_memory_handling) {}
void Sweep() {
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
- SweepPage(*page);
+ SweepPage(**page);
}
}
}
- void SweepPage(BasePage* page) { Traverse(page); }
+ void SweepPage(BasePage& page) { Traverse(page); }
bool SweepWithDeadline(double deadline_in_seconds) {
DCHECK(platform_);
@@ -358,7 +446,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
if (remaining_budget <= 0.) return false;
// First, prioritize finalization of pages that were swept concurrently.
- SweepFinalizer finalizer(platform_);
+ SweepFinalizer finalizer(platform_, free_memory_handling_);
if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
return false;
}
@@ -380,7 +468,7 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
static constexpr size_t kDeadlineCheckInterval = 8;
size_t page_count = 1;
while (auto page = state->unswept_pages.Pop()) {
- Traverse(*page);
+ Traverse(**page);
if (page_count % kDeadlineCheckInterval == 0 &&
deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
return false;
@@ -391,27 +479,35 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
return true;
}
- bool VisitNormalPage(NormalPage* page) {
- const InlinedFinalizationBuilder::ResultType result =
- SweepNormalPage<InlinedFinalizationBuilder>(page);
+ bool VisitNormalPage(NormalPage& page) {
+ if (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible) {
+ page.ResetDiscardedMemory();
+ }
+ const auto result =
+ (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
+ ? SweepNormalPage<
+ InlinedFinalizationBuilder<DiscardingFreeHandler>>(
+ &page, *platform_->GetPageAllocator())
+ : SweepNormalPage<InlinedFinalizationBuilder<RegularFreeHandler>>(
+ &page, *platform_->GetPageAllocator());
if (result.is_empty) {
- NormalPage::Destroy(page);
+ NormalPage::Destroy(&page);
} else {
- page->space()->AddPage(page);
+ page.space().AddPage(&page);
largest_new_free_list_entry_ = std::max(
result.largest_new_free_list_entry, largest_new_free_list_entry_);
}
return true;
}
- bool VisitLargePage(LargePage* page) {
- HeapObjectHeader* header = page->ObjectHeader();
+ bool VisitLargePage(LargePage& page) {
+ HeapObjectHeader* header = page.ObjectHeader();
if (header->IsMarked()) {
StickyUnmark(header);
- page->space()->AddPage(page);
+ page.space().AddPage(&page);
} else {
header->Finalize();
- LargePage::Destroy(page);
+ LargePage::Destroy(&page);
}
return true;
}
@@ -419,15 +515,22 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
SpaceStates* states_;
cppgc::Platform* platform_;
size_t largest_new_free_list_entry_ = 0;
+ const FreeMemoryHandling free_memory_handling_;
};
class ConcurrentSweepTask final : public cppgc::JobTask,
private HeapVisitor<ConcurrentSweepTask> {
friend class HeapVisitor<ConcurrentSweepTask>;
+ using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+
public:
- explicit ConcurrentSweepTask(HeapBase& heap, SpaceStates* states)
- : heap_(heap), states_(states) {}
+ ConcurrentSweepTask(HeapBase& heap, SpaceStates* states, Platform* platform,
+ FreeMemoryHandling free_memory_handling)
+ : heap_(heap),
+ states_(states),
+ platform_(platform),
+ free_memory_handling_(free_memory_handling) {}
void Run(cppgc::JobDelegate* delegate) final {
StatsCollector::EnabledConcurrentScope stats_scope(
@@ -435,7 +538,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
- Traverse(*page);
+ Traverse(**page);
if (delegate->ShouldYield()) return;
}
}
@@ -447,38 +550,48 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
}
private:
- bool VisitNormalPage(NormalPage* page) {
+ bool VisitNormalPage(NormalPage& page) {
+ if (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible) {
+ page.ResetDiscardedMemory();
+ }
SpaceState::SweptPageState sweep_result =
- SweepNormalPage<DeferredFinalizationBuilder>(page);
- const size_t space_index = page->space()->index();
+ (free_memory_handling_ == FreeMemoryHandling::kDiscardWherePossible)
+ ? SweepNormalPage<
+ DeferredFinalizationBuilder<DiscardingFreeHandler>>(
+ &page, *platform_->GetPageAllocator())
+ : SweepNormalPage<DeferredFinalizationBuilder<RegularFreeHandler>>(
+ &page, *platform_->GetPageAllocator());
+ const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index);
SpaceState& space_state = (*states_)[space_index];
space_state.swept_unfinalized_pages.Push(std::move(sweep_result));
return true;
}
- bool VisitLargePage(LargePage* page) {
- HeapObjectHeader* header = page->ObjectHeader();
+ bool VisitLargePage(LargePage& page) {
+ HeapObjectHeader* header = page.ObjectHeader();
if (header->IsMarked()) {
StickyUnmark(header);
- page->space()->AddPage(page);
+ page.space().AddPage(&page);
return true;
}
if (!header->IsFinalizable()) {
- LargePage::Destroy(page);
+ LargePage::Destroy(&page);
return true;
}
- const size_t space_index = page->space()->index();
+ const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index];
state.swept_unfinalized_pages.Push(
- {page, {page->ObjectHeader()}, {}, {}, true});
+ {&page, {page.ObjectHeader()}, {}, {}, true});
return true;
}
HeapBase& heap_;
SpaceStates* states_;
+ Platform* platform_;
std::atomic_bool is_completed_{false};
+ const FreeMemoryHandling free_memory_handling_;
};
// This visitor:
@@ -495,12 +608,12 @@ class PrepareForSweepVisitor final
: states_(states),
compactable_space_handling_(compactable_space_handling) {}
- bool VisitNormalPageSpace(NormalPageSpace* space) {
+ bool VisitNormalPageSpace(NormalPageSpace& space) {
if ((compactable_space_handling_ == CompactableSpaceHandling::kIgnore) &&
- space->is_compactable())
+ space.is_compactable())
return true;
- DCHECK(!space->linear_allocation_buffer().size());
- space->free_list().Clear();
+ DCHECK(!space.linear_allocation_buffer().size());
+ space.free_list().Clear();
#ifdef V8_USE_ADDRESS_SANITIZER
UnmarkedObjectsPoisoner().Traverse(space);
#endif // V8_USE_ADDRESS_SANITIZER
@@ -508,7 +621,7 @@ class PrepareForSweepVisitor final
return true;
}
- bool VisitLargePageSpace(LargePageSpace* space) {
+ bool VisitLargePageSpace(LargePageSpace& space) {
#ifdef V8_USE_ADDRESS_SANITIZER
UnmarkedObjectsPoisoner().Traverse(space);
#endif // V8_USE_ADDRESS_SANITIZER
@@ -517,10 +630,10 @@ class PrepareForSweepVisitor final
}
private:
- void ExtractPages(BaseSpace* space) {
- BaseSpace::Pages space_pages = space->RemoveAllPages();
- (*states_)[space->index()].unswept_pages.Insert(space_pages.begin(),
- space_pages.end());
+ void ExtractPages(BaseSpace& space) {
+ BaseSpace::Pages space_pages = space.RemoveAllPages();
+ (*states_)[space.index()].unswept_pages.Insert(space_pages.begin(),
+ space_pages.end());
}
SpaceStates* states_;
@@ -530,6 +643,8 @@ class PrepareForSweepVisitor final
} // namespace
class Sweeper::SweeperImpl final {
+ using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+
public:
SweeperImpl(RawHeap& heap, StatsCollector* stats_collector)
: heap_(heap),
@@ -543,12 +658,26 @@ class Sweeper::SweeperImpl final {
StatsCollector::kAtomicSweep);
is_in_progress_ = true;
platform_ = platform;
+ config_ = config;
#if DEBUG
// Verify bitmap for all spaces regardless of |compactable_space_handling|.
- ObjectStartBitmapVerifier().Verify(&heap_);
+ ObjectStartBitmapVerifier().Verify(heap_);
#endif
+
+ // If inaccessible memory is touched to check whether it is set up
+ // correctly it cannot be discarded.
+ if (!CanDiscardMemory()) {
+ config_.free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
+ }
+
+ if (config_.free_memory_handling ==
+ FreeMemoryHandling::kDiscardWherePossible) {
+ // The discarded counter will be recomputed.
+ heap_.heap()->stats_collector()->ResetDiscardedMemory();
+ }
+
PrepareForSweepVisitor(&space_states_, config.compactable_space_handling)
- .Traverse(&heap_);
+ .Traverse(heap_);
if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
@@ -578,7 +707,7 @@ class Sweeper::SweeperImpl final {
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
- SweepFinalizer finalizer(platform_);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling);
while (auto page = space_state.swept_unfinalized_pages.Pop()) {
finalizer.FinalizePage(&*page);
if (size <= finalizer.largest_new_free_list_entry()) return true;
@@ -587,9 +716,10 @@ class Sweeper::SweeperImpl final {
{
// Then, if no matching slot is found in the unfinalized pages, search the
// unswept page. This also helps out the concurrent sweeper.
- MutatorThreadSweeper sweeper(&space_states_, platform_);
+ MutatorThreadSweeper sweeper(&space_states_, platform_,
+ config_.free_memory_handling);
while (auto page = space_state.unswept_pages.Pop()) {
- sweeper.SweepPage(*page);
+ sweeper.SweepPage(**page);
if (size <= sweeper.largest_new_free_list_entry()) return true;
}
}
@@ -625,11 +755,12 @@ class Sweeper::SweeperImpl final {
MutatorThreadSweepingScope sweeping_in_progresss(*this);
// First, call finalizers on the mutator thread.
- SweepFinalizer finalizer(platform_);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling);
finalizer.FinalizeHeap(&space_states_);
// Then, help out the concurrent thread.
- MutatorThreadSweeper sweeper(&space_states_, platform_);
+ MutatorThreadSweeper sweeper(&space_states_, platform_,
+ config_.free_memory_handling);
sweeper.Sweep();
FinalizeSweep();
@@ -676,7 +807,8 @@ class Sweeper::SweeperImpl final {
StatsCollector::EnabledScope stats_scope(
stats_collector_, StatsCollector::kIncrementalSweep);
- MutatorThreadSweeper sweeper(&space_states_, platform_);
+ MutatorThreadSweeper sweeper(&space_states_, platform_,
+ config_.free_memory_handling);
{
StatsCollector::EnabledScope stats_scope(
stats_collector_, internal_scope_id, "deltaInSeconds",
@@ -755,9 +887,11 @@ class Sweeper::SweeperImpl final {
void ScheduleConcurrentSweeping() {
DCHECK(platform_);
- concurrent_sweeper_handle_ = platform_->PostJob(
- cppgc::TaskPriority::kUserVisible,
- std::make_unique<ConcurrentSweepTask>(*heap_.heap(), &space_states_));
+ concurrent_sweeper_handle_ =
+ platform_->PostJob(cppgc::TaskPriority::kUserVisible,
+ std::make_unique<ConcurrentSweepTask>(
+ *heap_.heap(), &space_states_, platform_,
+ config_.free_memory_handling));
}
void CancelSweepers() {
@@ -769,7 +903,7 @@ class Sweeper::SweeperImpl final {
void SynchronizeAndFinalizeConcurrentSweeping() {
CancelSweepers();
- SweepFinalizer finalizer(platform_);
+ SweepFinalizer finalizer(platform_, config_.free_memory_handling);
finalizer.FinalizeHeap(&space_states_);
}
@@ -777,6 +911,7 @@ class Sweeper::SweeperImpl final {
StatsCollector* const stats_collector_;
SpaceStates space_states_;
cppgc::Platform* platform_;
+ SweepingConfig config_;
IncrementalSweepTask::Handle incremental_sweeper_handle_;
std::unique_ptr<cppgc::JobHandle> concurrent_sweeper_handle_;
// Indicates whether the sweeping phase is in progress.
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index a13962aa91..2254453d7a 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -10,6 +10,7 @@
#include "include/cppgc/heap.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
+#include "src/heap/cppgc/memory.h"
namespace cppgc {
@@ -26,12 +27,18 @@ class V8_EXPORT_PRIVATE Sweeper final {
struct SweepingConfig {
using SweepingType = cppgc::Heap::SweepingType;
enum class CompactableSpaceHandling { kSweep, kIgnore };
+ enum class FreeMemoryHandling { kDoNotDiscard, kDiscardWherePossible };
SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
CompactableSpaceHandling compactable_space_handling =
CompactableSpaceHandling::kSweep;
+ FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
};
+ static constexpr bool CanDiscardMemory() {
+ return CheckMemoryIsInaccessibleIsNoop();
+ }
+
explicit Sweeper(HeapBase&);
~Sweeper();
diff --git a/deps/v8/src/heap/cppgc/visitor.cc b/deps/v8/src/heap/cppgc/visitor.cc
index 9d90e4ab3e..e871159b7b 100644
--- a/deps/v8/src/heap/cppgc/visitor.cc
+++ b/deps/v8/src/heap/cppgc/visitor.cc
@@ -55,7 +55,7 @@ void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
if (!page) return;
- DCHECK_EQ(&heap_, page->heap());
+ DCHECK_EQ(&heap_, &page->heap());
auto* header = page->TryObjectHeaderFromInnerAddress(
const_cast<Address>(reinterpret_cast<ConstAddress>(address)));
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 75ff5ef626..6980e4c893 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -25,6 +25,7 @@ AtomicEntryFlag WriteBarrier::incremental_or_concurrent_marking_flag_;
namespace {
+template <MarkerBase::WriteBarrierType type>
void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
const void* value) {
#if defined(CPPGC_CAGED_HEAP)
@@ -46,7 +47,7 @@ void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
return;
}
- marker->WriteBarrierForObject(header);
+ marker->WriteBarrierForObject<type>(header);
}
} // namespace
@@ -62,18 +63,19 @@ void WriteBarrier::DijkstraMarkingBarrierSlowWithSentinelCheck(
// static
void WriteBarrier::DijkstraMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
- const auto* heap = page->heap();
+ const auto& heap = page->heap();
// GetWriteBarrierType() checks marking state.
- DCHECK(heap->marker());
+ DCHECK(heap.marker());
// No write barriers should be executed from atomic pause marking.
- DCHECK(!heap->in_atomic_pause());
+ DCHECK(!heap.in_atomic_pause());
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
if (!header.TryMarkAtomic()) return;
- ProcessMarkValue(header, heap->marker(), value);
+ ProcessMarkValue<MarkerBase::WriteBarrierType::kDijkstra>(
+ header, heap.marker(), value);
}
// static
@@ -106,18 +108,19 @@ void WriteBarrier::SteeleMarkingBarrierSlowWithSentinelCheck(
// static
void WriteBarrier::SteeleMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
- const auto* heap = page->heap();
+ const auto& heap = page->heap();
// GetWriteBarrierType() checks marking state.
- DCHECK(heap->marker());
+ DCHECK(heap.marker());
// No write barriers should be executed from atomic pause marking.
- DCHECK(!heap->in_atomic_pause());
+ DCHECK(!heap.in_atomic_pause());
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
if (!header.IsMarked<AccessMode::kAtomic>()) return;
- ProcessMarkValue(header, heap->marker(), value);
+ ProcessMarkValue<MarkerBase::WriteBarrierType::kSteele>(header, heap.marker(),
+ value);
}
#if defined(CPPGC_YOUNG_GENERATION)
@@ -151,8 +154,8 @@ bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object,
// Large objects cannot have mixins, so we are guaranteed to always have
// a pointer on the same page.
const auto* page = BasePage::FromPayload(object);
- *handle = page->heap();
- const MarkerBase* marker = page->heap()->marker();
+ *handle = &page->heap();
+ const MarkerBase* marker = page->heap().marker();
return marker && marker->IsMarking();
}