summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2022-11-18 09:50:46 +0000
committerNode.js GitHub Bot <github-bot@iojs.org>2022-11-19 09:11:32 +0000
commitedd537ca2f38b94738fe25c2dc9b8c21bc7847f2 (patch)
treedad755f6f70ae5d70ab7bc251193ceeff04f20a5 /deps/v8/src/heap/cppgc
parentbcc704f6e527a2b072bf1477e72ae49a5a96c51a (diff)
downloadnode-new-edd537ca2f38b94738fe25c2dc9b8c21bc7847f2.tar.gz
deps: update V8 to 10.8.168.20
PR-URL: https://github.com/nodejs/node/pull/45230 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Yagiz Nizipli <yagiz@nizipli.com>
Diffstat (limited to 'deps/v8/src/heap/cppgc')
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc20
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h14
-rw-r--r--deps/v8/src/heap/cppgc/explicit-management.cc29
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h60
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.cc27
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.h4
-rw-r--r--deps/v8/src/heap/cppgc/globals.h6
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc12
-rw-r--r--deps/v8/src/heap/cppgc/heap-config.h103
-rw-r--r--deps/v8/src/heap/cppgc/heap-growing.cc8
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc34
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h27
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc62
-rw-r--r--deps/v8/src/heap/cppgc/heap.h14
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc57
-rw-r--r--deps/v8/src/heap/cppgc/marker.h31
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc10
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h10
-rw-r--r--deps/v8/src/heap/cppgc/member-storage.cc26
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc82
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h1
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.cc274
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.h18
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc5
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h11
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc247
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h48
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc18
28 files changed, 815 insertions, 443 deletions
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index e3792a32f8..68ee147dda 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -452,13 +452,11 @@ Compactor::Compactor(RawHeap& heap) : heap_(heap) {
}
}
-bool Compactor::ShouldCompact(
- GarbageCollector::Config::MarkingType marking_type,
- GarbageCollector::Config::StackState stack_state) const {
+bool Compactor::ShouldCompact(GCConfig::MarkingType marking_type,
+ StackState stack_state) const {
if (compactable_spaces_.empty() ||
- (marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
- stack_state ==
- GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
+ (marking_type == GCConfig::MarkingType::kAtomic &&
+ stack_state == StackState::kMayContainHeapPointers)) {
// The following check ensures that tests that want to test compaction are
// not interrupted by garbage collections that cannot use compaction.
DCHECK(!enable_for_next_gc_for_testing_);
@@ -474,9 +472,8 @@ bool Compactor::ShouldCompact(
return free_list_size > kFreeListSizeThreshold;
}
-void Compactor::InitializeIfShouldCompact(
- GarbageCollector::Config::MarkingType marking_type,
- GarbageCollector::Config::StackState stack_state) {
+void Compactor::InitializeIfShouldCompact(GCConfig::MarkingType marking_type,
+ StackState stack_state) {
DCHECK(!is_enabled_);
if (!ShouldCompact(marking_type, stack_state)) return;
@@ -487,9 +484,8 @@ void Compactor::InitializeIfShouldCompact(
is_cancelled_ = false;
}
-void Compactor::CancelIfShouldNotCompact(
- GarbageCollector::Config::MarkingType marking_type,
- GarbageCollector::Config::StackState stack_state) {
+void Compactor::CancelIfShouldNotCompact(GCConfig::MarkingType marking_type,
+ StackState stack_state) {
if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return;
is_cancelled_ = true;
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
index d79e6a7a65..9638996a42 100644
--- a/deps/v8/src/heap/cppgc/compactor.h
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -12,9 +12,10 @@
namespace cppgc {
namespace internal {
+class NormalPageSpace;
+
class V8_EXPORT_PRIVATE Compactor final {
- using CompactableSpaceHandling =
- Sweeper::SweepingConfig::CompactableSpaceHandling;
+ using CompactableSpaceHandling = SweepingConfig::CompactableSpaceHandling;
public:
explicit Compactor(RawHeap&);
@@ -23,10 +24,8 @@ class V8_EXPORT_PRIVATE Compactor final {
Compactor(const Compactor&) = delete;
Compactor& operator=(const Compactor&) = delete;
- void InitializeIfShouldCompact(GarbageCollector::Config::MarkingType,
- GarbageCollector::Config::StackState);
- void CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType,
- GarbageCollector::Config::StackState);
+ void InitializeIfShouldCompact(GCConfig::MarkingType, StackState);
+ void CancelIfShouldNotCompact(GCConfig::MarkingType, StackState);
// Returns whether spaces need to be processed by the Sweeper after
// compaction.
CompactableSpaceHandling CompactSpacesIfEnabled();
@@ -39,8 +38,7 @@ class V8_EXPORT_PRIVATE Compactor final {
bool IsEnabledForTesting() const { return is_enabled_; }
private:
- bool ShouldCompact(GarbageCollector::Config::MarkingType,
- GarbageCollector::Config::StackState) const;
+ bool ShouldCompact(GCConfig::MarkingType, StackState) const;
RawHeap& heap_;
// Compactor does not own the compactable spaces. The heap owns all spaces.
diff --git a/deps/v8/src/heap/cppgc/explicit-management.cc b/deps/v8/src/heap/cppgc/explicit-management.cc
index 3a18bd3369..560b18dc58 100644
--- a/deps/v8/src/heap/cppgc/explicit-management.cc
+++ b/deps/v8/src/heap/cppgc/explicit-management.cc
@@ -11,6 +11,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/memory.h"
+#include "src/heap/cppgc/object-view.h"
namespace cppgc {
namespace internal {
@@ -36,21 +37,30 @@ void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
auto& header = HeapObjectHeader::FromObject(object);
header.Finalize();
- size_t object_size = 0;
- USE(object_size);
-
// `object` is guaranteed to be of type GarbageCollected, so getting the
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ const size_t object_size = ObjectView<>(header).Size();
+
+ if (auto& heap_base = HeapBase::From(heap_handle);
+ heap_base.generational_gc_supported()) {
+ heap_base.remembered_set().InvalidateRememberedSlotsInRange(
+ object, reinterpret_cast<uint8_t*>(object) + object_size);
+ // If this object was registered as remembered, remove it. Do that before
+ // the page gets destroyed.
+ heap_base.remembered_set().InvalidateRememberedSourceObject(header);
+ }
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
if (base_page->is_large()) { // Large object.
- object_size = LargePage::From(base_page)->ObjectSize();
base_page->space().RemovePage(base_page);
base_page->heap().stats_collector()->NotifyExplicitFree(
LargePage::From(base_page)->PayloadSize());
LargePage::Destroy(LargePage::From(base_page));
} else { // Regular object.
const size_t header_size = header.AllocatedSize();
- object_size = header.ObjectSize();
auto* normal_page = NormalPage::From(base_page);
auto& normal_space = *static_cast<NormalPageSpace*>(&base_page->space());
auto& lab = normal_space.linear_allocation_buffer();
@@ -66,15 +76,6 @@ void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
// list entry.
}
}
-#if defined(CPPGC_YOUNG_GENERATION)
- auto& heap_base = HeapBase::From(heap_handle);
- if (heap_base.generational_gc_supported()) {
- heap_base.remembered_set().InvalidateRememberedSlotsInRange(
- object, reinterpret_cast<uint8_t*>(object) + object_size);
- // If this object was registered as remembered, remove it.
- heap_base.remembered_set().InvalidateRememberedSourceObject(header);
- }
-#endif // defined(CPPGC_YOUNG_GENERATION)
}
namespace {
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index a49a7a1bad..8a08f56b6b 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -6,8 +6,7 @@
#define V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
#include "include/cppgc/common.h"
-#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/cppgc/heap-config.h"
namespace cppgc {
namespace internal {
@@ -16,62 +15,9 @@ namespace internal {
// needed to mock/fake GC for testing.
class GarbageCollector {
public:
- struct Config {
- using CollectionType = Marker::MarkingConfig::CollectionType;
- using StackState = cppgc::Heap::StackState;
- using MarkingType = Marker::MarkingConfig::MarkingType;
- using SweepingType = Sweeper::SweepingConfig::SweepingType;
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
- using IsForcedGC = Marker::MarkingConfig::IsForcedGC;
-
- static constexpr Config ConservativeAtomicConfig() {
- return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- static constexpr Config PreciseAtomicConfig() {
- return {CollectionType::kMajor, StackState::kNoHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- static constexpr Config ConservativeIncrementalConfig() {
- return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
- MarkingType::kIncremental, SweepingType::kAtomic};
- }
-
- static constexpr Config PreciseIncrementalConfig() {
- return {CollectionType::kMajor, StackState::kNoHeapPointers,
- MarkingType::kIncremental, SweepingType::kAtomic};
- }
-
- static constexpr Config
- PreciseIncrementalMarkingConcurrentSweepingConfig() {
- return {CollectionType::kMajor, StackState::kNoHeapPointers,
- MarkingType::kIncremental,
- SweepingType::kIncrementalAndConcurrent};
- }
-
- static constexpr Config MinorPreciseAtomicConfig() {
- return {CollectionType::kMinor, StackState::kNoHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- static constexpr Config MinorConservativeAtomicConfig() {
- return {CollectionType::kMinor, StackState::kMayContainHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- CollectionType collection_type = CollectionType::kMajor;
- StackState stack_state = StackState::kMayContainHeapPointers;
- MarkingType marking_type = MarkingType::kAtomic;
- SweepingType sweeping_type = SweepingType::kAtomic;
- FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
- IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
- };
-
// Executes a garbage collection specified in config.
- virtual void CollectGarbage(Config) = 0;
- virtual void StartIncrementalGarbageCollection(Config) = 0;
+ virtual void CollectGarbage(GCConfig) = 0;
+ virtual void StartIncrementalGarbageCollection(GCConfig) = 0;
// The current epoch that the GC maintains. The epoch is increased on every
// GC invocation.
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.cc b/deps/v8/src/heap/cppgc/gc-invoker.cc
index 1bddad7a7e..8561437552 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.cc
+++ b/deps/v8/src/heap/cppgc/gc-invoker.cc
@@ -8,7 +8,6 @@
#include "include/cppgc/common.h"
#include "include/cppgc/platform.h"
-#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
@@ -22,8 +21,8 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
GCInvokerImpl(const GCInvokerImpl&) = delete;
GCInvokerImpl& operator=(const GCInvokerImpl&) = delete;
- void CollectGarbage(GarbageCollector::Config) final;
- void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
+ void CollectGarbage(GCConfig) final;
+ void StartIncrementalGarbageCollection(GCConfig) final;
size_t epoch() const final { return collector_->epoch(); }
const EmbedderStackState* override_stack_state() const final {
return collector_->override_stack_state();
@@ -35,7 +34,7 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
using Handle = SingleThreadedHandle;
static Handle Post(GarbageCollector* collector, cppgc::TaskRunner* runner,
- GarbageCollector::Config config) {
+ GCConfig config) {
auto task =
std::make_unique<GCInvoker::GCInvokerImpl::GCTask>(collector, config);
auto handle = task->GetHandle();
@@ -43,8 +42,7 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
return handle;
}
- explicit GCTask(GarbageCollector* collector,
- GarbageCollector::Config config)
+ explicit GCTask(GarbageCollector* collector, GCConfig config)
: collector_(collector),
config_(config),
handle_(Handle::NonEmptyTag{}),
@@ -63,7 +61,7 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
Handle GetHandle() { return handle_; }
GarbageCollector* collector_;
- GarbageCollector::Config config_;
+ GCConfig config_;
Handle handle_;
size_t saved_epoch_;
};
@@ -87,10 +85,9 @@ GCInvoker::GCInvokerImpl::~GCInvokerImpl() {
}
}
-void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
+void GCInvoker::GCInvokerImpl::CollectGarbage(GCConfig config) {
DCHECK_EQ(config.marking_type, cppgc::Heap::MarkingType::kAtomic);
- if ((config.stack_state ==
- GarbageCollector::Config::StackState::kNoHeapPointers) ||
+ if ((config.stack_state == StackState::kNoHeapPointers) ||
(stack_support_ ==
cppgc::Heap::StackSupport::kSupportsConservativeStackScan)) {
collector_->CollectGarbage(config);
@@ -98,8 +95,7 @@ void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
platform_->GetForegroundTaskRunner()->NonNestableTasksEnabled()) {
if (!gc_task_handle_) {
// Force a precise GC since it will run in a non-nestable task.
- config.stack_state =
- GarbageCollector::Config::StackState::kNoHeapPointers;
+ config.stack_state = StackState::kNoHeapPointers;
DCHECK_NE(cppgc::Heap::StackSupport::kSupportsConservativeStackScan,
stack_support_);
gc_task_handle_ = GCTask::Post(
@@ -109,7 +105,7 @@ void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
}
void GCInvoker::GCInvokerImpl::StartIncrementalGarbageCollection(
- GarbageCollector::Config config) {
+ GCConfig config) {
DCHECK_NE(config.marking_type, cppgc::Heap::MarkingType::kAtomic);
if ((stack_support_ !=
cppgc::Heap::StackSupport::kSupportsConservativeStackScan) &&
@@ -134,12 +130,11 @@ GCInvoker::GCInvoker(GarbageCollector* collector, cppgc::Platform* platform,
GCInvoker::~GCInvoker() = default;
-void GCInvoker::CollectGarbage(GarbageCollector::Config config) {
+void GCInvoker::CollectGarbage(GCConfig config) {
impl_->CollectGarbage(config);
}
-void GCInvoker::StartIncrementalGarbageCollection(
- GarbageCollector::Config config) {
+void GCInvoker::StartIncrementalGarbageCollection(GCConfig config) {
impl_->StartIncrementalGarbageCollection(config);
}
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.h b/deps/v8/src/heap/cppgc/gc-invoker.h
index ceebca139c..c3c379721b 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.h
+++ b/deps/v8/src/heap/cppgc/gc-invoker.h
@@ -34,8 +34,8 @@ class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
GCInvoker(const GCInvoker&) = delete;
GCInvoker& operator=(const GCInvoker&) = delete;
- void CollectGarbage(GarbageCollector::Config) final;
- void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
+ void CollectGarbage(GCConfig) final;
+ void StartIncrementalGarbageCollection(GCConfig) final;
size_t epoch() const final;
const EmbedderStackState* override_stack_state() const final;
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index 19d5cca59c..84fb389a7e 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -80,6 +80,12 @@ constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
#endif // !defined(CPPGC_2GB_CAGE)
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
+#if defined(CPPGC_POINTER_COMPRESSION)
+constexpr size_t kSlotSize = sizeof(uint32_t);
+#else // !defined(CPPGC_POINTER_COMPRESSION)
+constexpr size_t kSlotSize = sizeof(uintptr_t);
+#endif // !defined(CPPGC_POINTER_COMPRESSION)
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index d057d820c8..3b17bb8aa6 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -250,18 +250,16 @@ void HeapBase::Terminate() {
#endif // defined(CPPGC_YOUNG_GENERATION)
in_atomic_pause_ = true;
- stats_collector()->NotifyMarkingStarted(
- GarbageCollector::Config::CollectionType::kMajor,
- GarbageCollector::Config::MarkingType::kAtomic,
- GarbageCollector::Config::IsForcedGC::kForced);
+ stats_collector()->NotifyMarkingStarted(CollectionType::kMajor,
+ GCConfig::MarkingType::kAtomic,
+ GCConfig::IsForcedGC::kForced);
object_allocator().ResetLinearAllocationBuffers();
stats_collector()->NotifyMarkingCompleted(0);
ExecutePreFinalizers();
// TODO(chromium:1029379): Prefinalizers may black-allocate objects (under a
// compile-time option). Run sweeping with forced finalization here.
- sweeper().Start(
- {Sweeper::SweepingConfig::SweepingType::kAtomic,
- Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep});
+ sweeper().Start({SweepingConfig::SweepingType::kAtomic,
+ SweepingConfig::CompactableSpaceHandling::kSweep});
in_atomic_pause_ = false;
sweeper().NotifyDoneIfNeeded();
diff --git a/deps/v8/src/heap/cppgc/heap-config.h b/deps/v8/src/heap/cppgc/heap-config.h
new file mode 100644
index 0000000000..a89581387b
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-config.h
@@ -0,0 +1,103 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_CONFIG_H_
+#define V8_HEAP_CPPGC_HEAP_CONFIG_H_
+
+#include "include/cppgc/heap.h"
+
+namespace cppgc::internal {
+
+using StackState = cppgc::Heap::StackState;
+
+enum class CollectionType : uint8_t {
+ kMinor,
+ kMajor,
+};
+
+struct MarkingConfig {
+ using MarkingType = cppgc::Heap::MarkingType;
+ enum class IsForcedGC : uint8_t {
+ kNotForced,
+ kForced,
+ };
+
+ static constexpr MarkingConfig Default() { return {}; }
+
+ const CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kIncremental;
+ IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
+};
+
+struct SweepingConfig {
+ using SweepingType = cppgc::Heap::SweepingType;
+ enum class CompactableSpaceHandling { kSweep, kIgnore };
+ enum class FreeMemoryHandling { kDoNotDiscard, kDiscardWherePossible };
+
+ SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
+ CompactableSpaceHandling compactable_space_handling =
+ CompactableSpaceHandling::kSweep;
+ FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
+};
+
+struct GCConfig {
+ using MarkingType = MarkingConfig::MarkingType;
+ using SweepingType = SweepingConfig::SweepingType;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
+ using IsForcedGC = MarkingConfig::IsForcedGC;
+
+ static constexpr GCConfig ConservativeAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig PreciseAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig ConservativeIncrementalConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kIncremental, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig PreciseIncrementalConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncremental, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig
+ PreciseIncrementalMarkingConcurrentSweepingConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncremental, SweepingType::kIncrementalAndConcurrent};
+ }
+
+ static constexpr GCConfig PreciseConcurrentConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncrementalAndConcurrent,
+ SweepingType::kIncrementalAndConcurrent};
+ }
+
+ static constexpr GCConfig MinorPreciseAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig MinorConservativeAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
+ SweepingType sweeping_type = SweepingType::kAtomic;
+ FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
+ IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
+};
+
+} // namespace cppgc::internal
+
+#endif // V8_HEAP_CPPGC_HEAP_CONFIG_H_
diff --git a/deps/v8/src/heap/cppgc/heap-growing.cc b/deps/v8/src/heap/cppgc/heap-growing.cc
index 1055626a0a..0af0119863 100644
--- a/deps/v8/src/heap/cppgc/heap-growing.cc
+++ b/deps/v8/src/heap/cppgc/heap-growing.cc
@@ -93,14 +93,12 @@ void HeapGrowing::HeapGrowingImpl::AllocatedObjectSizeIncreased(size_t) {
size_t allocated_object_size = stats_collector_->allocated_object_size();
if (allocated_object_size > limit_for_atomic_gc_) {
collector_->CollectGarbage(
- {GarbageCollector::Config::CollectionType::kMajor,
- GarbageCollector::Config::StackState::kMayContainHeapPointers,
- GarbageCollector::Config::MarkingType::kAtomic, sweeping_support_});
+ {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ GCConfig::MarkingType::kAtomic, sweeping_support_});
} else if (allocated_object_size > limit_for_incremental_gc_) {
if (marking_support_ == cppgc::Heap::MarkingType::kAtomic) return;
collector_->StartIncrementalGarbageCollection(
- {GarbageCollector::Config::CollectionType::kMajor,
- GarbageCollector::Config::StackState::kMayContainHeapPointers,
+ {CollectionType::kMajor, StackState::kMayContainHeapPointers,
marking_support_, sweeping_support_});
}
}
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 07baf2e79d..7e85eeca47 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -18,6 +18,7 @@
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/remembered-set.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
@@ -85,6 +86,13 @@ ConstAddress BasePage::PayloadEnd() const {
return const_cast<BasePage*>(this)->PayloadEnd();
}
+size_t BasePage::AllocatedSize() const {
+ return is_large() ? LargePage::PageHeaderSize() +
+ LargePage::From(this)->PayloadSize()
+ : NormalPage::From(this)->PayloadSize() +
+ RoundUp(sizeof(NormalPage), kAllocationGranularity);
+}
+
size_t BasePage::AllocatedBytesAtLastGC() const {
return is_large() ? LargePage::From(this)->AllocatedBytesAtLastGC()
: NormalPage::From(this)->AllocatedBytesAtLastGC();
@@ -120,8 +128,32 @@ const HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
return header;
}
+#if defined(CPPGC_YOUNG_GENERATION)
+void BasePage::AllocateSlotSet() {
+ DCHECK_NULL(slot_set_);
+ slot_set_ = decltype(slot_set_)(
+ static_cast<SlotSet*>(
+ SlotSet::Allocate(SlotSet::BucketsForSize(AllocatedSize()))),
+ SlotSetDeleter{AllocatedSize()});
+}
+
+void BasePage::SlotSetDeleter::operator()(SlotSet* slot_set) const {
+ DCHECK_NOT_NULL(slot_set);
+ SlotSet::Delete(slot_set, SlotSet::BucketsForSize(page_size_));
+}
+
+void BasePage::ResetSlotSet() { slot_set_.reset(); }
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
BasePage::BasePage(HeapBase& heap, BaseSpace& space, PageType type)
- : BasePageHandle(heap), space_(space), type_(type) {
+ : BasePageHandle(heap),
+ space_(space),
+ type_(type)
+#if defined(CPPGC_YOUNG_GENERATION)
+ ,
+ slot_set_(nullptr, SlotSetDeleter{})
+#endif // defined(CPPGC_YOUNG_GENERATION)
+{
DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
kPageOffsetMask);
DCHECK_EQ(&heap.raw_heap(), space_.raw_heap());
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index f20f159e73..a60bb1448d 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -8,6 +8,7 @@
#include "include/cppgc/internal/base-page-handle.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
+#include "src/heap/base/basic-slot-set.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/object-start-bitmap.h"
@@ -20,6 +21,7 @@ class NormalPageSpace;
class LargePageSpace;
class HeapBase;
class PageBackend;
+class SlotSet;
class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
public:
@@ -45,6 +47,9 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
Address PayloadEnd();
ConstAddress PayloadEnd() const;
+ // Size of the payload with the page header.
+ size_t AllocatedSize() const;
+
// Returns the size of live objects on the page at the last GC.
// The counter is update after sweeping.
size_t AllocatedBytesAtLastGC() const;
@@ -92,14 +97,29 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
contains_young_objects_ = value;
}
+#if defined(CPPGC_YOUNG_GENERATION)
+ V8_INLINE SlotSet* slot_set() const { return slot_set_.get(); }
+ V8_INLINE SlotSet& GetOrAllocateSlotSet();
+ void ResetSlotSet();
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
protected:
enum class PageType : uint8_t { kNormal, kLarge };
BasePage(HeapBase&, BaseSpace&, PageType);
private:
+ struct SlotSetDeleter {
+ void operator()(SlotSet*) const;
+ size_t page_size_ = 0;
+ };
+ void AllocateSlotSet();
+
BaseSpace& space_;
PageType type_;
bool contains_young_objects_ = false;
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::unique_ptr<SlotSet, SlotSetDeleter> slot_set_;
+#endif // defined(CPPGC_YOUNG_GENERATION)
size_t discarded_memory_ = 0;
};
@@ -311,6 +331,13 @@ const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
return *header;
}
+#if defined(CPPGC_YOUNG_GENERATION)
+SlotSet& BasePage::GetOrAllocateSlotSet() {
+ if (!slot_set_) AllocateSlotSet();
+ return *slot_set_;
+}
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 9cd52b8dd0..7bc55b51de 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -45,11 +45,10 @@ std::unique_ptr<Heap> Heap::Create(std::shared_ptr<cppgc::Platform> platform,
void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
Heap::StackState stack_state) {
internal::Heap::From(this)->CollectGarbage(
- {internal::GarbageCollector::Config::CollectionType::kMajor, stack_state,
- MarkingType::kAtomic, SweepingType::kAtomic,
- internal::GarbageCollector::Config::FreeMemoryHandling::
- kDiscardWherePossible,
- internal::GarbageCollector::Config::IsForcedGC::kForced});
+ {internal::CollectionType::kMajor, stack_state, MarkingType::kAtomic,
+ SweepingType::kAtomic,
+ internal::GCConfig::FreeMemoryHandling::kDiscardWherePossible,
+ internal::GCConfig::IsForcedGC::kForced});
}
AllocationHandle& Heap::GetAllocationHandle() {
@@ -62,12 +61,11 @@ namespace internal {
namespace {
-void CheckConfig(Heap::Config config, HeapBase::MarkingType marking_support,
+void CheckConfig(GCConfig config, HeapBase::MarkingType marking_support,
HeapBase::SweepingType sweeping_support) {
- CHECK_WITH_MSG(
- (config.collection_type != Heap::Config::CollectionType::kMinor) ||
- (config.stack_state == Heap::Config::StackState::kNoHeapPointers),
- "Minor GCs with stack is currently not supported");
+ CHECK_WITH_MSG((config.collection_type != CollectionType::kMinor) ||
+ (config.stack_state == StackState::kNoHeapPointers),
+ "Minor GCs with stack is currently not supported");
CHECK_LE(static_cast<int>(config.marking_type),
static_cast<int>(marking_support));
CHECK_LE(static_cast<int>(config.sweeping_type),
@@ -94,17 +92,16 @@ Heap::~Heap() {
// Gracefully finish already running GC if any, but don't finalize live
// objects.
FinalizeIncrementalGarbageCollectionIfRunning(
- {Config::CollectionType::kMajor,
- Config::StackState::kMayContainHeapPointers,
- Config::MarkingType::kAtomic, Config::SweepingType::kAtomic});
+ {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ GCConfig::MarkingType::kAtomic, GCConfig::SweepingType::kAtomic});
{
subtle::NoGarbageCollectionScope no_gc(*this);
sweeper_.FinishIfRunning();
}
}
-void Heap::CollectGarbage(Config config) {
- DCHECK_EQ(Config::MarkingType::kAtomic, config.marking_type);
+void Heap::CollectGarbage(GCConfig config) {
+ DCHECK_EQ(GCConfig::MarkingType::kAtomic, config.marking_type);
CheckConfig(config, marking_support_, sweeping_support_);
if (in_no_gc_scope()) return;
@@ -118,9 +115,9 @@ void Heap::CollectGarbage(Config config) {
FinalizeGarbageCollection(config.stack_state);
}
-void Heap::StartIncrementalGarbageCollection(Config config) {
- DCHECK_NE(Config::MarkingType::kAtomic, config.marking_type);
- DCHECK_NE(marking_support_, Config::MarkingType::kAtomic);
+void Heap::StartIncrementalGarbageCollection(GCConfig config) {
+ DCHECK_NE(GCConfig::MarkingType::kAtomic, config.marking_type);
+ DCHECK_NE(marking_support_, GCConfig::MarkingType::kAtomic);
CheckConfig(config, marking_support_, sweeping_support_);
if (IsMarking() || in_no_gc_scope()) return;
@@ -130,19 +127,19 @@ void Heap::StartIncrementalGarbageCollection(Config config) {
StartGarbageCollection(config);
}
-void Heap::FinalizeIncrementalGarbageCollectionIfRunning(Config config) {
+void Heap::FinalizeIncrementalGarbageCollectionIfRunning(GCConfig config) {
CheckConfig(config, marking_support_, sweeping_support_);
if (!IsMarking()) return;
DCHECK(!in_no_gc_scope());
- DCHECK_NE(Config::MarkingType::kAtomic, config_.marking_type);
+ DCHECK_NE(GCConfig::MarkingType::kAtomic, config_.marking_type);
config_ = config;
FinalizeGarbageCollection(config.stack_state);
}
-void Heap::StartGarbageCollection(Config config) {
+void Heap::StartGarbageCollection(GCConfig config) {
DCHECK(!IsMarking());
DCHECK(!in_no_gc_scope());
@@ -152,18 +149,17 @@ void Heap::StartGarbageCollection(Config config) {
epoch_++;
#if defined(CPPGC_YOUNG_GENERATION)
- if (config.collection_type == Config::CollectionType::kMajor)
+ if (config.collection_type == CollectionType::kMajor)
SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
- const Marker::MarkingConfig marking_config{
- config.collection_type, config.stack_state, config.marking_type,
- config.is_forced_gc};
+ const MarkingConfig marking_config{config.collection_type, config.stack_state,
+ config.marking_type, config.is_forced_gc};
marker_ = std::make_unique<Marker>(AsBase(), platform_.get(), marking_config);
marker_->StartMarking();
}
-void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
+void Heap::FinalizeGarbageCollection(StackState stack_state) {
DCHECK(IsMarking());
DCHECK(!in_no_gc_scope());
CHECK(!in_disallow_gc_scope());
@@ -203,9 +199,8 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
#endif // defined(CPPGC_YOUNG_GENERATION)
subtle::NoGarbageCollectionScope no_gc(*this);
- const Sweeper::SweepingConfig sweeping_config{
- config_.sweeping_type,
- Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep,
+ const SweepingConfig sweeping_config{
+ config_.sweeping_type, SweepingConfig::CompactableSpaceHandling::kSweep,
config_.free_memory_handling};
sweeper_.Start(sweeping_config);
in_atomic_pause_ = false;
@@ -221,7 +216,7 @@ void Heap::EnableGenerationalGC() {
void Heap::DisableHeapGrowingForTesting() { growing_.DisableForTesting(); }
void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
- Config::StackState stack_state) {
+ StackState stack_state) {
StatsCollector::EnabledScope stats_scope(
stats_collector(), StatsCollector::kMarkIncrementalFinalize);
FinalizeGarbageCollection(stack_state);
@@ -230,10 +225,9 @@ void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
void Heap::StartIncrementalGarbageCollectionForTesting() {
DCHECK(!IsMarking());
DCHECK(!in_no_gc_scope());
- StartGarbageCollection({Config::CollectionType::kMajor,
- Config::StackState::kNoHeapPointers,
- Config::MarkingType::kIncrementalAndConcurrent,
- Config::SweepingType::kIncrementalAndConcurrent});
+ StartGarbageCollection({CollectionType::kMajor, StackState::kNoHeapPointers,
+ GCConfig::MarkingType::kIncrementalAndConcurrent,
+ GCConfig::SweepingType::kIncrementalAndConcurrent});
}
void Heap::FinalizeIncrementalGarbageCollectionForTesting(
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index cc027974f8..3a9e09fa5f 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -32,9 +32,9 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
HeapBase& AsBase() { return *this; }
const HeapBase& AsBase() const { return *this; }
- void CollectGarbage(Config) final;
- void StartIncrementalGarbageCollection(Config) final;
- void FinalizeIncrementalGarbageCollectionIfRunning(Config);
+ void CollectGarbage(GCConfig) final;
+ void StartIncrementalGarbageCollection(GCConfig) final;
+ void FinalizeIncrementalGarbageCollectionIfRunning(GCConfig);
size_t epoch() const final { return epoch_; }
const EmbedderStackState* override_stack_state() const final {
@@ -46,15 +46,15 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
void DisableHeapGrowingForTesting();
private:
- void StartGarbageCollection(Config);
- void FinalizeGarbageCollection(Config::StackState);
+ void StartGarbageCollection(GCConfig);
+ void FinalizeGarbageCollection(StackState);
- void FinalizeIncrementalGarbageCollectionIfNeeded(Config::StackState) final;
+ void FinalizeIncrementalGarbageCollectionIfNeeded(StackState) final;
void StartIncrementalGarbageCollectionForTesting() final;
void FinalizeIncrementalGarbageCollectionForTesting(EmbedderStackState) final;
- Config config_;
+ GCConfig config_;
GCInvoker gc_invoker_;
HeapGrowing growing_;
bool generational_gc_enabled_ = false;
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 056f18912e..11197dafb8 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -32,11 +32,10 @@ namespace internal {
namespace {
-bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
- HeapBase& heap) {
- if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+bool EnterIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase& heap) {
+ if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
- Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::FlagUpdater::Enter();
heap.set_incremental_marking_in_progress(true);
return true;
@@ -44,11 +43,10 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
return false;
}
-bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
- HeapBase& heap) {
- if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+bool ExitIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase& heap) {
+ if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
- Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::FlagUpdater::Exit();
heap.set_incremental_marking_in_progress(false);
return true;
@@ -87,7 +85,7 @@ class MarkerBase::IncrementalMarkingTask final : public cppgc::Task {
public:
using Handle = SingleThreadedHandle;
- IncrementalMarkingTask(MarkerBase*, MarkingConfig::StackState);
+ IncrementalMarkingTask(MarkerBase*, StackState);
static Handle Post(cppgc::TaskRunner*, MarkerBase*);
@@ -95,13 +93,13 @@ class MarkerBase::IncrementalMarkingTask final : public cppgc::Task {
void Run() final;
MarkerBase* const marker_;
- MarkingConfig::StackState stack_state_;
+ StackState stack_state_;
// TODO(chromium:1056170): Change to CancelableTask.
Handle handle_;
};
MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
- MarkerBase* marker, MarkingConfig::StackState stack_state)
+ MarkerBase* marker, StackState stack_state)
: marker_(marker),
stack_state_(stack_state),
handle_(Handle::NonEmptyTag{}) {}
@@ -117,10 +115,9 @@ MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
DCHECK_IMPLIES(marker->heap().stack_support() !=
HeapBase::StackSupport::kSupportsConservativeStackScan,
runner->NonNestableTasksEnabled());
- MarkingConfig::StackState stack_state_for_task =
- runner->NonNestableTasksEnabled()
- ? MarkingConfig::StackState::kNoHeapPointers
- : MarkingConfig::StackState::kMayContainHeapPointers;
+ const auto stack_state_for_task = runner->NonNestableTasksEnabled()
+ ? StackState::kNoHeapPointers
+ : StackState::kMayContainHeapPointers;
auto task =
std::make_unique<IncrementalMarkingTask>(marker, stack_state_for_task);
auto handle = task->handle_;
@@ -152,9 +149,8 @@ MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform,
foreground_task_runner_(platform_->GetForegroundTaskRunner()),
mutator_marking_state_(heap, marking_worklists_,
heap.compactor().compaction_worklists()) {
- DCHECK_IMPLIES(
- config_.collection_type == MarkingConfig::CollectionType::kMinor,
- heap_.generational_gc_supported());
+ DCHECK_IMPLIES(config_.collection_type == CollectionType::kMinor,
+ heap_.generational_gc_supported());
}
MarkerBase::~MarkerBase() {
@@ -163,7 +159,7 @@ MarkerBase::~MarkerBase() {
// and should thus already be marked.
if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
#if DEBUG
- DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
+ DCHECK_NE(StackState::kNoHeapPointers, config_.stack_state);
std::unordered_set<HeapObjectHeader*> objects =
mutator_marking_state_.not_fully_constructed_worklist().Extract();
for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
@@ -229,7 +225,7 @@ void MarkerBase::StartMarking() {
// Performing incremental or concurrent marking.
schedule_.NotifyIncrementalMarkingStart();
// Scanning the stack is expensive so we only do it at the atomic pause.
- VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
+ VisitRoots(StackState::kNoHeapPointers);
ScheduleIncrementalMarkingTask();
if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
@@ -244,14 +240,14 @@ void MarkerBase::StartMarking() {
}
void MarkerBase::HandleNotFullyConstructedObjects() {
- if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
+ if (config_.stack_state == StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
} else {
MarkNotFullyConstructedObjects();
}
}
-void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
+void MarkerBase::EnterAtomicPause(StackState stack_state) {
StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
StatsCollector::kAtomicMark);
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
@@ -310,7 +306,7 @@ void MarkerBase::LeaveAtomicPause() {
heap().SetStackStateOfPrevGC(config_.stack_state);
}
-void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
+void MarkerBase::FinishMarking(StackState stack_state) {
DCHECK(is_marking_);
EnterAtomicPause(stack_state);
{
@@ -383,7 +379,7 @@ void MarkerBase::ProcessWeakness() {
#if defined(CPPGC_YOUNG_GENERATION)
if (heap().generational_gc_supported()) {
auto& remembered_set = heap().remembered_set();
- if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ if (config_.collection_type == CollectionType::kMinor) {
// Custom callbacks assume that untraced pointers point to not yet freed
// objects. They must make sure that upon callback completion no
// UntracedMember points to a freed object. This may not hold true if a
@@ -425,7 +421,7 @@ void MarkerBase::ProcessWeakness() {
DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
}
-void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
+void MarkerBase::VisitRoots(StackState stack_state) {
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkVisitRoots);
@@ -442,13 +438,13 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
}
}
- if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
+ if (stack_state != StackState::kNoHeapPointers) {
StatsCollector::DisabledScope stack_stats_scope(
heap().stats_collector(), StatsCollector::kMarkVisitStack);
heap().stack()->IteratePointers(&stack_visitor());
}
#if defined(CPPGC_YOUNG_GENERATION)
- if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ if (config_.collection_type == CollectionType::kMinor) {
StatsCollector::EnabledScope stats_scope(
heap().stats_collector(), StatsCollector::kMarkVisitRememberedSets);
heap().remembered_set().Visit(visitor(), mutator_marking_state_);
@@ -482,13 +478,12 @@ void MarkerBase::ScheduleIncrementalMarkingTask() {
IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
}
-bool MarkerBase::IncrementalMarkingStepForTesting(
- MarkingConfig::StackState stack_state) {
+bool MarkerBase::IncrementalMarkingStepForTesting(StackState stack_state) {
return IncrementalMarkingStep(stack_state);
}
-bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
- if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
+bool MarkerBase::IncrementalMarkingStep(StackState stack_state) {
+ if (stack_state == StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
}
config_.stack_state = stack_state;
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 9c471250ad..7586a43957 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -15,6 +15,7 @@
#include "src/heap/base/worklist.h"
#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-config.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
@@ -39,26 +40,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
public:
class IncrementalMarkingTask;
- struct MarkingConfig {
- enum class CollectionType : uint8_t {
- kMinor,
- kMajor,
- };
- using StackState = cppgc::Heap::StackState;
- using MarkingType = cppgc::Heap::MarkingType;
- enum class IsForcedGC : uint8_t {
- kNotForced,
- kForced,
- };
-
- static constexpr MarkingConfig Default() { return {}; }
-
- const CollectionType collection_type = CollectionType::kMajor;
- StackState stack_state = StackState::kMayContainHeapPointers;
- MarkingType marking_type = MarkingType::kIncremental;
- IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
- };
-
enum class WriteBarrierType {
kDijkstra,
kSteele,
@@ -89,7 +70,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// - stops incremental/concurrent marking;
// - flushes back any in-construction worklists if needed;
// - Updates the MarkingConfig if the stack state has changed;
- void EnterAtomicPause(MarkingConfig::StackState);
+ void EnterAtomicPause(StackState);
// Makes marking progress. A `marked_bytes_limit` of 0 means that the limit
// is determined by the internal marking scheduler.
@@ -113,7 +94,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// - AdvanceMarkingWithLimits()
// - ProcessWeakness()
// - LeaveAtomicPause()
- void FinishMarking(MarkingConfig::StackState);
+ void FinishMarking(StackState);
void ProcessWeakness();
@@ -134,7 +115,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
void SetMainThreadMarkingDisabledForTesting(bool);
void WaitForConcurrentMarkingForTesting();
void ClearAllWorklistsForTesting();
- bool IncrementalMarkingStepForTesting(MarkingConfig::StackState);
+ bool IncrementalMarkingStepForTesting(StackState);
MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
MutatorMarkingState& MutatorMarkingStateForTesting() {
@@ -157,7 +138,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool ProcessWorklistsWithDeadline(size_t, v8::base::TimeTicks);
- void VisitRoots(MarkingConfig::StackState);
+ void VisitRoots(StackState);
bool VisitCrossThreadPersistentsIfNeeded();
@@ -165,7 +146,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
void ScheduleIncrementalMarkingTask();
- bool IncrementalMarkingStep(MarkingConfig::StackState);
+ bool IncrementalMarkingStep(StackState);
void AdvanceMarkingOnAllocation();
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index a64a6d5f25..666e715cd7 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -36,7 +36,7 @@ void VerificationState::VerifyMarked(const void* base_object_payload) const {
}
MarkingVerifierBase::MarkingVerifierBase(
- HeapBase& heap, Heap::Config::CollectionType collection_type,
+ HeapBase& heap, CollectionType collection_type,
VerificationState& verification_state,
std::unique_ptr<cppgc::Visitor> visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), *visitor.get()),
@@ -45,7 +45,7 @@ MarkingVerifierBase::MarkingVerifierBase(
collection_type_(collection_type) {}
void MarkingVerifierBase::Run(
- Heap::Config::StackState stack_state, uintptr_t stack_end,
+ StackState stack_state, uintptr_t stack_end,
v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
// Avoid verifying the stack when running with TSAN as the TSAN runtime changes
@@ -61,7 +61,7 @@ void MarkingVerifierBase::Run(
// TODO(chromium:1325007): Investigate if Oilpan verification can be moved
// before V8 compaction or compaction never runs with stack.
#if !defined(THREAD_SANITIZER) && !defined(CPPGC_POINTER_COMPRESSION)
- if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
+ if (stack_state == StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
heap_.stack()->IteratePointersUnsafe(this, stack_end);
// The objects found through the unsafe iteration are only a subset of the
@@ -114,7 +114,7 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
DCHECK(!header.IsFree());
#if defined(CPPGC_YOUNG_GENERATION)
- if (collection_type_ == Heap::Config::CollectionType::kMinor) {
+ if (collection_type_ == CollectionType::kMinor) {
auto& caged_heap = CagedHeap::Instance();
const auto age = CagedHeapLocalData::Get().age_table.GetAge(
caged_heap.OffsetFromAddress(header.ObjectStart()));
@@ -185,7 +185,7 @@ class VerificationVisitor final : public cppgc::Visitor {
} // namespace
MarkingVerifier::MarkingVerifier(HeapBase& heap_base,
- Heap::Config::CollectionType collection_type)
+ CollectionType collection_type)
: MarkingVerifierBase(heap_base, collection_type, state_,
std::make_unique<VerificationVisitor>(state_)) {}
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index cb2eb4c80c..c966aea51f 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -41,11 +41,11 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(Heap::Config::StackState, uintptr_t, v8::base::Optional<size_t>);
+ void Run(StackState, uintptr_t, v8::base::Optional<size_t>);
protected:
- MarkingVerifierBase(HeapBase&, Heap::Config::CollectionType,
- VerificationState&, std::unique_ptr<cppgc::Visitor>);
+ MarkingVerifierBase(HeapBase&, CollectionType, VerificationState&,
+ std::unique_ptr<cppgc::Visitor>);
private:
void VisitInConstructionConservatively(HeapObjectHeader&,
@@ -63,12 +63,12 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
&in_construction_objects_heap_;
size_t verifier_found_marked_bytes_ = 0;
bool verifier_found_marked_bytes_are_exact_ = true;
- Heap::Config::CollectionType collection_type_;
+ CollectionType collection_type_;
};
class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
public:
- MarkingVerifier(HeapBase&, Heap::Config::CollectionType);
+ MarkingVerifier(HeapBase&, CollectionType);
~MarkingVerifier() final = default;
private:
diff --git a/deps/v8/src/heap/cppgc/member-storage.cc b/deps/v8/src/heap/cppgc/member-storage.cc
index a0e4562472..c457c60ba4 100644
--- a/deps/v8/src/heap/cppgc/member-storage.cc
+++ b/deps/v8/src/heap/cppgc/member-storage.cc
@@ -4,6 +4,11 @@
#include "include/cppgc/internal/member-storage.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/member.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
+
namespace cppgc {
namespace internal {
@@ -11,5 +16,26 @@ namespace internal {
uintptr_t CageBaseGlobal::g_base_ = CageBaseGlobal::kLowerHalfWordMask;
#endif // defined(CPPGC_POINTER_COMPRESSION)
+// Debugging helpers.
+
+#if defined(CPPGC_POINTER_COMPRESSION)
+extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
+_cppgc_internal_Decompress_Compressed_Pointer(uint32_t cmprsd) {
+ return MemberStorage::Decompress(cmprsd);
+}
+#endif // !defined(CPPGC_POINTER_COMPRESSION)
+
+class MemberDebugHelper final {
+ public:
+ static void* PrintUncompressed(MemberBase* m) {
+ return const_cast<void*>(m->GetRaw());
+ }
+};
+
+extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
+_cppgc_internal_Print_Member(MemberBase* m) {
+ return MemberDebugHelper::PrintUncompressed(m);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 38a3ccd8e9..b88ba5c200 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -148,9 +148,9 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
void* result = TryAllocateLargeObject(page_backend_, large_space,
stats_collector_, size, gcinfo);
if (!result) {
- auto config = GarbageCollector::Config::ConservativeAtomicConfig();
+ auto config = GCConfig::ConservativeAtomicConfig();
config.free_memory_handling =
- GarbageCollector::Config::FreeMemoryHandling::kDiscardWherePossible;
+ GCConfig::FreeMemoryHandling::kDiscardWherePossible;
garbage_collector_.CollectGarbage(config);
result = TryAllocateLargeObject(page_backend_, large_space,
stats_collector_, size, gcinfo);
@@ -170,9 +170,9 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
}
if (!TryRefillLinearAllocationBuffer(space, request_size)) {
- auto config = GarbageCollector::Config::ConservativeAtomicConfig();
+ auto config = GCConfig::ConservativeAtomicConfig();
config.free_memory_handling =
- GarbageCollector::Config::FreeMemoryHandling::kDiscardWherePossible;
+ GCConfig::FreeMemoryHandling::kDiscardWherePossible;
garbage_collector_.CollectGarbage(config);
if (!TryRefillLinearAllocationBuffer(space, request_size)) {
oom_handler_("Oilpan: Normal allocation.");
@@ -187,42 +187,64 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
return result;
}
+bool ObjectAllocator::TryExpandAndRefillLinearAllocationBuffer(
+ NormalPageSpace& space) {
+ auto* const new_page = NormalPage::TryCreate(page_backend_, space);
+ if (!new_page) return false;
+
+ space.AddPage(new_page);
+ // Set linear allocation buffer to new page.
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
+ new_page->PayloadStart(),
+ new_page->PayloadSize());
+ return true;
+}
+
bool ObjectAllocator::TryRefillLinearAllocationBuffer(NormalPageSpace& space,
size_t size) {
// Try to allocate from the freelist.
if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
- // Lazily sweep pages of this heap until we find a freed area for this
- // allocation or we finish sweeping all pages of this heap.
Sweeper& sweeper = raw_heap_.heap()->sweeper();
- // TODO(chromium:1056170): Investigate whether this should be a loop which
- // would result in more aggressive re-use of memory at the expense of
- // potentially larger allocation time.
- if (sweeper.SweepForAllocationIfRunning(&space, size)) {
- // Sweeper found a block of at least `size` bytes. Allocation from the
- // free list may still fail as actual buckets are not exhaustively
- // searched for a suitable block. Instead, buckets are tested from larger
- // sizes that are guaranteed to fit the block to smaller bucket sizes that
- // may only potentially fit the block. For the bucket that may exactly fit
- // the allocation of `size` bytes (no overallocation), only the first
- // entry is checked.
- if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
+ // Lazily sweep pages of this heap. This is not exhaustive to limit jank on
+ // allocation. Allocation from the free list may still fail as actual buckets
+ // are not exhaustively searched for a suitable block. Instead, buckets are
+ // tested from larger sizes that are guaranteed to fit the block to smaller
+ // bucket sizes that may only potentially fit the block. For the bucket that
+ // may exactly fit the allocation of `size` bytes (no overallocation), only
+ // the first entry is checked.
+ if (sweeper.SweepForAllocationIfRunning(
+ &space, size, v8::base::TimeDelta::FromMicroseconds(500)) &&
+ TryRefillLinearAllocationBufferFromFreeList(space, size)) {
+ return true;
}
- sweeper.FinishIfRunning();
- // TODO(chromium:1056170): Make use of the synchronously freed memory.
-
- auto* new_page = NormalPage::TryCreate(page_backend_, space);
- if (!new_page) {
- return false;
+ // Sweeping was off or did not yield in any memory within limited
+ // contributing. We expand at this point as that's cheaper than possibly
+ // continuing sweeping the whole heap.
+ if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;
+
+ // Expansion failed. Before finishing all sweeping, finish sweeping of a given
+ // space which is cheaper.
+ if (sweeper.SweepForAllocationIfRunning(&space, size,
+ v8::base::TimeDelta::Max()) &&
+ TryRefillLinearAllocationBufferFromFreeList(space, size)) {
+ return true;
}
- space.AddPage(new_page);
- // Set linear allocation buffer to new page.
- ReplaceLinearAllocationBuffer(space, stats_collector_,
- new_page->PayloadStart(),
- new_page->PayloadSize());
- return true;
+ // Heap expansion and sweeping of a space failed. At this point the caller
+ // could run OOM or do a full GC which needs to finish sweeping if it's
+ // running. Hence, we may as well finish sweeping here. Note that this is
+ // possibly very expensive but not more expensive than running a full GC as
+ // the alternative is OOM.
+ if (sweeper.FinishIfRunning()) {
+ // Sweeping may have added memory to the free list.
+ if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
+
+ // Sweeping may have freed pages completely.
+ if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;
+ }
+ return false;
}
bool ObjectAllocator::TryRefillLinearAllocationBufferFromFreeList(
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index ea01f671f7..77f26ce3b5 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -70,6 +70,7 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
bool TryRefillLinearAllocationBuffer(NormalPageSpace&, size_t);
bool TryRefillLinearAllocationBufferFromFreeList(NormalPageSpace&, size_t);
+ bool TryExpandAndRefillLinearAllocationBuffer(NormalPageSpace&);
RawHeap& raw_heap_;
PageBackend& page_backend_;
diff --git a/deps/v8/src/heap/cppgc/remembered-set.cc b/deps/v8/src/heap/cppgc/remembered-set.cc
index 485fb4057f..60e8f978ef 100644
--- a/deps/v8/src/heap/cppgc/remembered-set.cc
+++ b/deps/v8/src/heap/cppgc/remembered-set.cc
@@ -2,15 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if defined(CPPGC_YOUNG_GENERATION)
+
#include "src/heap/cppgc/remembered-set.h"
#include <algorithm>
#include "include/cppgc/member.h"
#include "include/cppgc/visitor.h"
+#include "src/heap/base/basic-slot-set.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marking-state.h"
namespace cppgc {
@@ -20,23 +24,54 @@ namespace {
enum class SlotType { kCompressed, kUncompressed };
-template <SlotType slot_type>
-void InvalidateRememberedSlots(std::set<void*>& slots, void* begin, void* end) {
+void EraseFromSet(std::set<void*>& set, void* begin, void* end) {
// TODO(1029379): The 2 binary walks can be optimized with a custom algorithm.
- auto from = slots.lower_bound(begin), to = slots.lower_bound(end);
- slots.erase(from, to);
+ auto from = set.lower_bound(begin), to = set.lower_bound(end);
+ set.erase(from, to);
+}
+
+// TODO(1029379): Make the implementation functions private functions of
+// OldToNewRememberedSet to avoid parameter passing.
+void InvalidateCompressedRememberedSlots(
+ const HeapBase& heap, void* begin, void* end,
+ std::set<void*>& remembered_slots_for_verification) {
+ DCHECK_LT(begin, end);
+
+ BasePage* page = BasePage::FromInnerAddress(&heap, begin);
+ DCHECK_NOT_NULL(page);
+ // The input range must reside within the same page.
+ DCHECK_EQ(page, BasePage::FromInnerAddress(
+ &heap, reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(end) - 1)));
+
+ auto* slot_set = page->slot_set();
+ if (!slot_set) return;
+
+ const size_t buckets_size = SlotSet::BucketsForSize(page->AllocatedSize());
+
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(page);
+ const uintptr_t ubegin = reinterpret_cast<uintptr_t>(begin);
+ const uintptr_t uend = reinterpret_cast<uintptr_t>(end);
+
+ slot_set->RemoveRange(ubegin - page_start, uend - page_start, buckets_size,
+ SlotSet::EmptyBucketMode::FREE_EMPTY_BUCKETS);
+#if DEBUG
+ EraseFromSet(remembered_slots_for_verification, begin, end);
+#endif // DEBUG
+}
+
+void InvalidateUncompressedRememberedSlots(
+ std::set<void*>& slots, void* begin, void* end,
+ std::set<void*>& remembered_slots_for_verification) {
+ EraseFromSet(slots, begin, end);
+#if DEBUG
+ EraseFromSet(remembered_slots_for_verification, begin, end);
+#endif // DEBUG
#if defined(ENABLE_SLOW_DCHECKS)
// Check that no remembered slots are referring to the freed area.
DCHECK(std::none_of(slots.begin(), slots.end(), [begin, end](void* slot) {
void* value = nullptr;
-#if defined(CPPGC_POINTER_COMPRESSION)
- if constexpr (slot_type == SlotType::kCompressed)
- value = CompressedPointer::Decompress(*reinterpret_cast<uint32_t*>(slot));
- else
- value = *reinterpret_cast<void**>(slot);
-#else // !defined(CPPGC_POINTER_COMPRESSION)
value = *reinterpret_cast<void**>(slot);
-#endif // !defined(CPPGC_POINTER_COMPRESSION)
return begin <= value && value < end;
}));
#endif // defined(ENABLE_SLOW_DCHECKS)
@@ -44,45 +79,155 @@ void InvalidateRememberedSlots(std::set<void*>& slots, void* begin, void* end) {
// Visit remembered set that was recorded in the generational barrier.
template <SlotType slot_type>
-void VisitRememberedSlots(const std::set<void*>& slots, const HeapBase& heap,
- MutatorMarkingState& mutator_marking_state) {
- for (void* slot : slots) {
- // Slot must always point to a valid, not freed object.
- auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
- ->ObjectHeaderFromInnerAddress(slot);
- // The age checking in the generational barrier is imprecise, since a card
- // may have mixed young/old objects. Check here precisely if the object is
- // old.
- if (slot_header.IsYoung()) continue;
- // The design of young generation requires collections to be executed at the
- // top level (with the guarantee that no objects are currently being in
- // construction). This can be ensured by running young GCs from safe points
- // or by reintroducing nested allocation scopes that avoid finalization.
- DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
+void VisitSlot(const HeapBase& heap, const BasePage& page, Address slot,
+ MutatorMarkingState& marking_state,
+ const std::set<void*>& slots_for_verification) {
+#if defined(DEBUG)
+ DCHECK_EQ(BasePage::FromInnerAddress(&heap, slot), &page);
+ DCHECK_NE(slots_for_verification.end(), slots_for_verification.find(slot));
+#endif // defined(DEBUG)
+
+ // Slot must always point to a valid, not freed object.
+ auto& slot_header = page.ObjectHeaderFromInnerAddress(slot);
+ // The age checking in the generational barrier is imprecise, since a card
+ // may have mixed young/old objects. Check here precisely if the object is
+ // old.
+ if (slot_header.IsYoung()) return;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
#if defined(CPPGC_POINTER_COMPRESSION)
- void* value = nullptr;
- if constexpr (slot_type == SlotType::kCompressed) {
- value = CompressedPointer::Decompress(*reinterpret_cast<uint32_t*>(slot));
- } else {
- value = *reinterpret_cast<void**>(slot);
- }
+ void* value = nullptr;
+ if constexpr (slot_type == SlotType::kCompressed) {
+ value = CompressedPointer::Decompress(*reinterpret_cast<uint32_t*>(slot));
+ } else {
+ value = *reinterpret_cast<void**>(slot);
+ }
#else // !defined(CPPGC_POINTER_COMPRESSION)
- void* value = *reinterpret_cast<void**>(slot);
+ void* value = *reinterpret_cast<void**>(slot);
#endif // !defined(CPPGC_POINTER_COMPRESSION)
- // Slot could be updated to nullptr or kSentinelPointer by the mutator.
- if (value == kSentinelPointer || value == nullptr) continue;
+ // Slot could be updated to nullptr or kSentinelPointer by the mutator.
+ if (value == kSentinelPointer || value == nullptr) return;
-#if DEBUG
- // Check that the slot can not point to a freed object.
- HeapObjectHeader& header =
- BasePage::FromPayload(value)->ObjectHeaderFromInnerAddress(value);
- DCHECK(!header.IsFree());
-#endif
+#if defined(DEBUG)
+ // Check that the slot can not point to a freed object.
+ HeapObjectHeader& header =
+ BasePage::FromPayload(value)->ObjectHeaderFromInnerAddress(value);
+ DCHECK(!header.IsFree());
+#endif // defined(DEBUG)
+
+ marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+}
+
+class CompressedSlotVisitor : HeapVisitor<CompressedSlotVisitor> {
+ friend class HeapVisitor<CompressedSlotVisitor>;
+
+ public:
+ CompressedSlotVisitor(HeapBase& heap, MutatorMarkingState& marking_state,
+ const std::set<void*>& slots_for_verification)
+ : heap_(heap),
+ marking_state_(marking_state),
+ remembered_slots_for_verification_(slots_for_verification) {}
- mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+ size_t Run() {
+ Traverse(heap_.raw_heap());
+ return objects_visited_;
}
+
+ private:
+ heap::base::SlotCallbackResult VisitCompressedSlot(Address slot) {
+ DCHECK(current_page_);
+ VisitSlot<SlotType::kCompressed>(heap_, *current_page_, slot,
+ marking_state_,
+ remembered_slots_for_verification_);
+ ++objects_visited_;
+ return heap::base::KEEP_SLOT;
+ }
+
+ void VisitSlotSet(SlotSet* slot_set) {
+ DCHECK(current_page_);
+
+ if (!slot_set) return;
+
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(current_page_);
+ const size_t buckets_size =
+ SlotSet::BucketsForSize(current_page_->AllocatedSize());
+
+ slot_set->Iterate(
+ page_start, 0, buckets_size,
+ [this](SlotSet::Address slot) {
+ return VisitCompressedSlot(reinterpret_cast<Address>(slot));
+ },
+ SlotSet::EmptyBucketMode::FREE_EMPTY_BUCKETS);
+ }
+
+ bool VisitNormalPage(NormalPage& page) {
+ current_page_ = &page;
+ VisitSlotSet(page.slot_set());
+ return true;
+ }
+
+ bool VisitLargePage(LargePage& page) {
+ current_page_ = &page;
+ VisitSlotSet(page.slot_set());
+ return true;
+ }
+
+ HeapBase& heap_;
+ MutatorMarkingState& marking_state_;
+ BasePage* current_page_ = nullptr;
+
+ const std::set<void*>& remembered_slots_for_verification_;
+ size_t objects_visited_ = 0u;
+};
+
+class SlotRemover : HeapVisitor<SlotRemover> {
+ friend class HeapVisitor<SlotRemover>;
+
+ public:
+ explicit SlotRemover(HeapBase& heap) : heap_(heap) {}
+
+ void Run() { Traverse(heap_.raw_heap()); }
+
+ private:
+ bool VisitNormalPage(NormalPage& page) {
+ page.ResetSlotSet();
+ return true;
+ }
+
+ bool VisitLargePage(LargePage& page) {
+ page.ResetSlotSet();
+ return true;
+ }
+
+ HeapBase& heap_;
+};
+
+// Visit remembered set that was recorded in the generational barrier.
+void VisitRememberedSlots(
+ HeapBase& heap, MutatorMarkingState& mutator_marking_state,
+ const std::set<void*>& remembered_uncompressed_slots,
+ const std::set<void*>& remembered_slots_for_verification) {
+ size_t objects_visited = 0;
+ {
+ CompressedSlotVisitor slot_visitor(heap, mutator_marking_state,
+ remembered_slots_for_verification);
+ objects_visited += slot_visitor.Run();
+ }
+ for (void* uncompressed_slot : remembered_uncompressed_slots) {
+ auto* page = BasePage::FromInnerAddress(&heap, uncompressed_slot);
+ DCHECK(page);
+ VisitSlot<SlotType::kUncompressed>(
+ heap, *page, static_cast<Address>(uncompressed_slot),
+ mutator_marking_state, remembered_slots_for_verification);
+ ++objects_visited;
+ }
+ DCHECK_EQ(remembered_slots_for_verification.size(), objects_visited);
+ USE(objects_visited);
}
// Visits source objects that were recorded in the generational barrier for
@@ -114,12 +259,29 @@ void VisitRememberedSourceObjects(
void OldToNewRememberedSet::AddSlot(void* slot) {
DCHECK(heap_.generational_gc_supported());
- remembered_slots_.insert(slot);
+
+ BasePage* source_page = BasePage::FromInnerAddress(&heap_, slot);
+ DCHECK(source_page);
+
+ auto& slot_set = source_page->GetOrAllocateSlotSet();
+
+ const uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
+ reinterpret_cast<uintptr_t>(source_page);
+
+ slot_set.Insert<SlotSet::AccessMode::NON_ATOMIC>(
+ static_cast<size_t>(slot_offset));
+
+#if defined(DEBUG)
+ remembered_slots_for_verification_.insert(slot);
+#endif // defined(DEBUG)
}
void OldToNewRememberedSet::AddUncompressedSlot(void* uncompressed_slot) {
DCHECK(heap_.generational_gc_supported());
remembered_uncompressed_slots_.insert(uncompressed_slot);
+#if defined(DEBUG)
+ remembered_slots_for_verification_.insert(uncompressed_slot);
+#endif // defined(DEBUG)
}
void OldToNewRememberedSet::AddSourceObject(HeapObjectHeader& hoh) {
@@ -138,10 +300,11 @@ void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
void* end) {
DCHECK(heap_.generational_gc_supported());
- InvalidateRememberedSlots<SlotType::kCompressed>(remembered_slots_, begin,
- end);
- InvalidateRememberedSlots<SlotType::kUncompressed>(
- remembered_uncompressed_slots_, begin, end);
+ InvalidateCompressedRememberedSlots(heap_, begin, end,
+ remembered_slots_for_verification_);
+ InvalidateUncompressedRememberedSlots(remembered_uncompressed_slots_, begin,
+ end,
+ remembered_slots_for_verification_);
}
void OldToNewRememberedSet::InvalidateRememberedSourceObject(
@@ -153,10 +316,8 @@ void OldToNewRememberedSet::InvalidateRememberedSourceObject(
void OldToNewRememberedSet::Visit(Visitor& visitor,
MutatorMarkingState& marking_state) {
DCHECK(heap_.generational_gc_supported());
- VisitRememberedSlots<SlotType::kCompressed>(remembered_slots_, heap_,
- marking_state);
- VisitRememberedSlots<SlotType::kUncompressed>(remembered_uncompressed_slots_,
- heap_, marking_state);
+ VisitRememberedSlots(heap_, marking_state, remembered_uncompressed_slots_,
+ remembered_slots_for_verification_);
VisitRememberedSourceObjects(remembered_source_objects_, visitor);
}
@@ -174,16 +335,23 @@ void OldToNewRememberedSet::ReleaseCustomCallbacks() {
void OldToNewRememberedSet::Reset() {
DCHECK(heap_.generational_gc_supported());
- remembered_slots_.clear();
+ SlotRemover slot_remover(heap_);
+ slot_remover.Run();
remembered_uncompressed_slots_.clear();
remembered_source_objects_.clear();
+#if DEBUG
+ remembered_slots_for_verification_.clear();
+#endif // DEBUG
}
bool OldToNewRememberedSet::IsEmpty() const {
- return remembered_slots_.empty() && remembered_uncompressed_slots_.empty() &&
+ // TODO(1029379): Add visitor to check if empty.
+ return remembered_uncompressed_slots_.empty() &&
remembered_source_objects_.empty() &&
remembered_weak_callbacks_.empty();
}
} // namespace internal
} // namespace cppgc
+
+#endif // defined(CPPGC_YOUNG_GENERATION)
diff --git a/deps/v8/src/heap/cppgc/remembered-set.h b/deps/v8/src/heap/cppgc/remembered-set.h
index 24e460d438..086ba62289 100644
--- a/deps/v8/src/heap/cppgc/remembered-set.h
+++ b/deps/v8/src/heap/cppgc/remembered-set.h
@@ -5,9 +5,12 @@
#ifndef V8_HEAP_CPPGC_REMEMBERED_SET_H_
#define V8_HEAP_CPPGC_REMEMBERED_SET_H_
+#if defined(CPPGC_YOUNG_GENERATION)
+
#include <set>
#include "src/base/macros.h"
+#include "src/heap/base/basic-slot-set.h"
#include "src/heap/cppgc/marking-worklists.h"
namespace cppgc {
@@ -21,11 +24,14 @@ class HeapBase;
class HeapObjectHeader;
class MutatorMarkingState;
+class SlotSet : public ::heap::base::BasicSlotSet<kSlotSize> {};
+
+// OldToNewRememberedSet represents a per-heap set of old-to-new references.
class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
public:
using WeakCallbackItem = MarkingWorklists::WeakCallbackItem;
- explicit OldToNewRememberedSet(const HeapBase& heap)
+ explicit OldToNewRememberedSet(HeapBase& heap)
: heap_(heap), remembered_weak_callbacks_(compare_parameter) {}
OldToNewRememberedSet(const OldToNewRememberedSet&) = delete;
@@ -58,15 +64,19 @@ class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
}
} compare_parameter{};
- const HeapBase& heap_;
- std::set<void*> remembered_slots_;
- std::set<void*> remembered_uncompressed_slots_;
+ HeapBase& heap_;
std::set<HeapObjectHeader*> remembered_source_objects_;
std::set<WeakCallbackItem, decltype(compare_parameter)>
remembered_weak_callbacks_;
+ // Compressed slots are stored in slot-sets (per-page two-level bitmaps),
+ // whereas uncompressed are stored in std::set.
+ std::set<void*> remembered_uncompressed_slots_;
+ std::set<void*> remembered_slots_for_verification_;
};
} // namespace internal
} // namespace cppgc
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
#endif // V8_HEAP_CPPGC_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index ca01122208..f65309b6f4 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -171,8 +171,7 @@ int64_t SumPhases(const MetricRecorder::GCCycle::Phases& phases) {
}
MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
- StatsCollector::CollectionType type,
- StatsCollector::MarkingType marking_type,
+ CollectionType type, StatsCollector::MarkingType marking_type,
StatsCollector::SweepingType sweeping_type, int64_t atomic_mark_us,
int64_t atomic_weak_us, int64_t atomic_compact_us, int64_t atomic_sweep_us,
int64_t incremental_mark_us, int64_t incremental_sweep_us,
@@ -181,7 +180,7 @@ MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
int64_t objects_freed_bytes, int64_t memory_before_bytes,
int64_t memory_after_bytes, int64_t memory_freed_bytes) {
MetricRecorder::GCCycle event;
- event.type = (type == StatsCollector::CollectionType::kMajor)
+ event.type = (type == CollectionType::kMajor)
? MetricRecorder::GCCycle::Type::kMajor
: MetricRecorder::GCCycle::Type::kMinor;
// MainThread.Incremental:
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index c78db86acf..ff040a3dcc 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -33,6 +33,7 @@ namespace internal {
V(IncrementalSweep)
#define CPPGC_FOR_ALL_SCOPES(V) \
+ V(Unmark) \
V(MarkIncrementalStart) \
V(MarkIncrementalFinalize) \
V(MarkAtomicPrologue) \
@@ -52,9 +53,10 @@ namespace internal {
V(MarkVisitCrossThreadPersistents) \
V(MarkVisitStack) \
V(MarkVisitRememberedSets) \
+ V(SweepFinishIfOutOfWork) \
V(SweepInvokePreFinalizers) \
- V(SweepIdleStep) \
V(SweepInTask) \
+ V(SweepInTaskForStatistics) \
V(SweepOnAllocation) \
V(SweepFinalize)
@@ -67,12 +69,11 @@ namespace internal {
// Sink for various time and memory statistics.
class V8_EXPORT_PRIVATE StatsCollector final {
- using IsForcedGC = GarbageCollector::Config::IsForcedGC;
+ using IsForcedGC = GCConfig::IsForcedGC;
public:
- using CollectionType = GarbageCollector::Config::CollectionType;
- using MarkingType = GarbageCollector::Config::MarkingType;
- using SweepingType = GarbageCollector::Config::SweepingType;
+ using MarkingType = GCConfig::MarkingType;
+ using SweepingType = GCConfig::SweepingType;
#if defined(CPPGC_DECLARE_ENUM)
static_assert(false, "CPPGC_DECLARE_ENUM macro is already defined");
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 23e684ed4d..3cb96f8baa 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -11,6 +11,7 @@
#include "include/cppgc/platform.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
+#include "src/base/platform/time.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-base.h"
@@ -25,13 +26,41 @@
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/task-handle.h"
-namespace cppgc {
-namespace internal {
+namespace cppgc::internal {
namespace {
+class DeadlineChecker final {
+ public:
+ explicit DeadlineChecker(v8::base::TimeTicks end) : end_(end) {}
+
+ bool Check() {
+ return (++count_ % kInterval == 0) && (end_ < v8::base::TimeTicks::Now());
+ }
+
+ private:
+ static constexpr size_t kInterval = 4;
+
+ const v8::base::TimeTicks end_;
+ size_t count_ = 0;
+};
+
using v8::base::Optional;
+enum class MutatorThreadSweepingMode {
+ kOnlyFinalizers,
+ kAll,
+};
+
+constexpr const char* ToString(MutatorThreadSweepingMode sweeping_mode) {
+ switch (sweeping_mode) {
+ case MutatorThreadSweepingMode::kAll:
+ return "all";
+ case MutatorThreadSweepingMode::kOnlyFinalizers:
+ return "only-finalizers";
+ }
+}
+
enum class StickyBits : uint8_t {
kDisabled,
kEnabled,
@@ -220,6 +249,9 @@ class InlinedFinalizationBuilderBase {
bool is_empty = false;
size_t largest_new_free_list_entry = 0;
};
+
+ protected:
+ ResultType result_;
};
// Builder that finalizes objects and adds freelist entries right away.
@@ -238,10 +270,13 @@ class InlinedFinalizationBuilder final : public InlinedFinalizationBuilderBase,
void AddFreeListEntry(Address start, size_t size) {
FreeHandler::Free({start, size});
+ result_.largest_new_free_list_entry =
+ std::max(result_.largest_new_free_list_entry, size);
}
- ResultType GetResult(bool is_empty, size_t largest_new_free_list_entry) {
- return {is_empty, largest_new_free_list_entry};
+ ResultType&& GetResult(bool is_empty) {
+ result_.is_empty = is_empty;
+ return std::move(result_);
}
};
@@ -282,12 +317,13 @@ class DeferredFinalizationBuilder final : public FreeHandler {
} else {
FreeHandler::Free({start, size});
}
+ result_.largest_new_free_list_entry =
+ std::max(result_.largest_new_free_list_entry, size);
found_finalizer_ = false;
}
- ResultType&& GetResult(bool is_empty, size_t largest_new_free_list_entry) {
+ ResultType&& GetResult(bool is_empty) {
result_.is_empty = is_empty;
- result_.largest_new_free_list_entry = largest_new_free_list_entry;
return std::move(result_);
}
@@ -305,7 +341,6 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
- size_t largest_new_free_list_entry = 0;
size_t live_bytes = 0;
Address start_of_gap = page->PayloadStart();
@@ -346,12 +381,10 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
- size_t new_free_list_entry_size =
+ const size_t new_free_list_entry_size =
static_cast<size_t>(header_address - start_of_gap);
builder.AddFreeListEntry(start_of_gap, new_free_list_entry_size);
DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
- largest_new_free_list_entry =
- std::max(largest_new_free_list_entry, new_free_list_entry_size);
}
StickyUnmark(header, sticky_bits);
begin += size;
@@ -368,7 +401,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
page->SetAllocatedBytesAtLastGC(live_bytes);
const bool is_empty = (start_of_gap == page->PayloadStart());
- return builder.GetResult(is_empty, largest_new_free_list_entry);
+ return builder.GetResult(is_empty);
}
// SweepFinalizer is responsible for heap/space/page finalization. Finalization
@@ -377,7 +410,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
// - returns (unmaps) empty pages;
// - merges freelists to the space's freelist.
class SweepFinalizer final {
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
SweepFinalizer(cppgc::Platform* platform,
@@ -397,20 +430,13 @@ class SweepFinalizer final {
}
bool FinalizeSpaceWithDeadline(SpaceState* space_state,
- double deadline_in_seconds) {
+ v8::base::TimeTicks deadline) {
DCHECK(platform_);
- static constexpr size_t kDeadlineCheckInterval = 8;
- size_t page_count = 1;
-
+ DeadlineChecker deadline_check(deadline);
while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
FinalizePage(&*page_state);
- if (page_count % kDeadlineCheckInterval == 0 &&
- deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
- return false;
- }
-
- page_count++;
+ if (deadline_check.Check()) return false;
}
return true;
@@ -488,7 +514,7 @@ class SweepFinalizer final {
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
friend class HeapVisitor<MutatorThreadSweeper>;
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
MutatorThreadSweeper(HeapBase* heap, SpaceStates* states,
@@ -511,25 +537,23 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
void SweepPage(BasePage& page) { Traverse(page); }
- bool SweepWithDeadline(double deadline_in_seconds) {
+ bool SweepWithDeadline(v8::base::TimeDelta max_duration,
+ MutatorThreadSweepingMode sweeping_mode) {
DCHECK(platform_);
- static constexpr double kSlackInSeconds = 0.001;
for (SpaceState& state : *states_) {
- // FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
- // the deadline until it sweeps 10 pages. So we give a small slack for
- // safety.
- const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
- platform_->MonotonicallyIncreasingTime();
- if (remaining_budget <= 0.) return false;
+ const auto deadline = v8::base::TimeTicks::Now() + max_duration;
// First, prioritize finalization of pages that were swept concurrently.
SweepFinalizer finalizer(platform_, free_memory_handling_);
- if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
+ if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline)) {
return false;
}
+ if (sweeping_mode == MutatorThreadSweepingMode::kOnlyFinalizers)
+ return false;
+
// Help out the concurrent sweeper.
- if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
+ if (!SweepSpaceWithDeadline(&state, deadline)) {
return false;
}
}
@@ -541,16 +565,11 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
}
private:
- bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
- static constexpr size_t kDeadlineCheckInterval = 8;
- size_t page_count = 1;
+ bool SweepSpaceWithDeadline(SpaceState* state, v8::base::TimeTicks deadline) {
+ DeadlineChecker deadline_check(deadline);
while (auto page = state->unswept_pages.Pop()) {
Traverse(**page);
- if (page_count % kDeadlineCheckInterval == 0 &&
- deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
- return false;
- }
- page_count++;
+ if (deadline_check.Check()) return false;
}
return true;
@@ -603,7 +622,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
private HeapVisitor<ConcurrentSweepTask> {
friend class HeapVisitor<ConcurrentSweepTask>;
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
ConcurrentSweepTask(HeapBase& heap, SpaceStates* states, Platform* platform,
@@ -693,8 +712,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
class PrepareForSweepVisitor final
: protected HeapVisitor<PrepareForSweepVisitor> {
friend class HeapVisitor<PrepareForSweepVisitor>;
- using CompactableSpaceHandling =
- Sweeper::SweepingConfig::CompactableSpaceHandling;
+ using CompactableSpaceHandling = SweepingConfig::CompactableSpaceHandling;
public:
PrepareForSweepVisitor(SpaceStates* states,
@@ -746,7 +764,7 @@ class PrepareForSweepVisitor final
} // namespace
class Sweeper::SweeperImpl final {
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
SweeperImpl(RawHeap& heap, StatsCollector* stats_collector)
@@ -787,7 +805,8 @@ class Sweeper::SweeperImpl final {
}
}
- bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
+ bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size,
+ v8::base::TimeDelta max_duration) {
if (!is_in_progress_) return false;
// Bail out for recursive sweeping calls. This can happen when finalizers
@@ -808,14 +827,19 @@ class Sweeper::SweeperImpl final {
StatsCollector::EnabledScope inner_scope(
stats_collector_, StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progress(*this);
-
+ DeadlineChecker deadline_check(v8::base::TimeTicks::Now() + max_duration);
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
SweepFinalizer finalizer(platform_, config_.free_memory_handling);
while (auto page = space_state.swept_unfinalized_pages.Pop()) {
finalizer.FinalizePage(&*page);
- if (size <= finalizer.largest_new_free_list_entry()) return true;
+ if (size <= finalizer.largest_new_free_list_entry()) {
+ return true;
+ }
+ if (deadline_check.Check()) {
+ return false;
+ }
}
}
{
@@ -825,19 +849,24 @@ class Sweeper::SweeperImpl final {
config_.free_memory_handling);
while (auto page = space_state.unswept_pages.Pop()) {
sweeper.SweepPage(**page);
- if (size <= sweeper.largest_new_free_list_entry()) return true;
+ if (size <= sweeper.largest_new_free_list_entry()) {
+ return true;
+ }
+ if (deadline_check.Check()) {
+ return false;
+ }
}
}
return false;
}
- void FinishIfRunning() {
- if (!is_in_progress_) return;
+ bool FinishIfRunning() {
+ if (!is_in_progress_) return false;
// Bail out for recursive sweeping calls. This can happen when finalizers
// allocate new memory.
- if (is_sweeping_on_mutator_thread_) return;
+ if (is_sweeping_on_mutator_thread_) return false;
{
StatsCollector::EnabledScope stats_scope(
@@ -852,12 +881,22 @@ class Sweeper::SweeperImpl final {
Finish();
}
NotifyDone();
+ return true;
+ }
+
+ bool IsConcurrentSweepingDone() const {
+ return !concurrent_sweeper_handle_ ||
+ (concurrent_sweeper_handle_->IsValid() &&
+ !concurrent_sweeper_handle_->IsActive());
}
void FinishIfOutOfWork() {
if (is_in_progress_ && !is_sweeping_on_mutator_thread_ &&
concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
!concurrent_sweeper_handle_->IsActive()) {
+ StatsCollector::EnabledScope stats_scope(
+ stats_collector_, StatsCollector::kSweepFinishIfOutOfWork);
+ MutatorThreadSweepingScope sweeping_in_progress(*this);
// At this point we know that the concurrent sweeping task has run
// out-of-work: all pages are swept. The main thread still needs to finish
// sweeping though.
@@ -865,8 +904,18 @@ class Sweeper::SweeperImpl final {
[](const SpaceState& state) {
return state.unswept_pages.IsEmpty();
}));
- FinishIfRunning();
+
+ // There may be unfinalized pages left. Since it's hard to estimate
+ // the actual amount of sweeping necessary, we sweep with a small
+ // deadline to see if sweeping can be fully finished.
+ MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
+ config_.free_memory_handling);
+ if (sweeper.SweepWithDeadline(v8::base::TimeDelta::FromMilliseconds(2),
+ MutatorThreadSweepingMode::kAll)) {
+ FinalizeSweep();
+ }
}
+ NotifyDoneIfNeeded();
}
void Finish() {
@@ -920,8 +969,9 @@ class Sweeper::SweeperImpl final {
bool IsSweepingInProgress() const { return is_in_progress_; }
- bool PerformSweepOnMutatorThread(double deadline_in_seconds,
- StatsCollector::ScopeId internal_scope_id) {
+ bool PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration,
+ StatsCollector::ScopeId internal_scope_id,
+ MutatorThreadSweepingMode sweeping_mode) {
if (!is_in_progress_) return true;
MutatorThreadSweepingScope sweeping_in_progress(*this);
@@ -935,10 +985,10 @@ class Sweeper::SweeperImpl final {
config_.free_memory_handling);
{
StatsCollector::EnabledScope inner_stats_scope(
- stats_collector_, internal_scope_id, "deltaInSeconds",
- deadline_in_seconds - platform_->MonotonicallyIncreasingTime());
-
- sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
+ stats_collector_, internal_scope_id, "max_duration_ms",
+ max_duration.InMillisecondsF(), "sweeping_mode",
+ ToString(sweeping_mode));
+ sweep_complete = sweeper.SweepWithDeadline(max_duration, sweeping_mode);
}
if (sweep_complete) {
FinalizeSweep();
@@ -948,6 +998,23 @@ class Sweeper::SweeperImpl final {
return sweep_complete;
}
+ void AddMutatorThreadSweepingObserver(
+ Sweeper::SweepingOnMutatorThreadObserver* observer) {
+ DCHECK_EQ(mutator_thread_sweeping_observers_.end(),
+ std::find(mutator_thread_sweeping_observers_.begin(),
+ mutator_thread_sweeping_observers_.end(), observer));
+ mutator_thread_sweeping_observers_.push_back(observer);
+ }
+
+ void RemoveMutatorThreadSweepingObserver(
+ Sweeper::SweepingOnMutatorThreadObserver* observer) {
+ const auto it =
+ std::find(mutator_thread_sweeping_observers_.begin(),
+ mutator_thread_sweeping_observers_.end(), observer);
+ DCHECK_NE(mutator_thread_sweeping_observers_.end(), it);
+ mutator_thread_sweeping_observers_.erase(it);
+ }
+
private:
class MutatorThreadSweepingScope final {
public:
@@ -955,9 +1022,15 @@ class Sweeper::SweeperImpl final {
: sweeper_(sweeper) {
DCHECK(!sweeper_.is_sweeping_on_mutator_thread_);
sweeper_.is_sweeping_on_mutator_thread_ = true;
+ for (auto* observer : sweeper_.mutator_thread_sweeping_observers_) {
+ observer->Start();
+ }
}
~MutatorThreadSweepingScope() {
sweeper_.is_sweeping_on_mutator_thread_ = false;
+ for (auto* observer : sweeper_.mutator_thread_sweeping_observers_) {
+ observer->End();
+ }
}
MutatorThreadSweepingScope(const MutatorThreadSweepingScope&) = delete;
@@ -968,33 +1041,37 @@ class Sweeper::SweeperImpl final {
SweeperImpl& sweeper_;
};
- class IncrementalSweepTask : public cppgc::IdleTask {
+ class IncrementalSweepTask final : public cppgc::Task {
public:
using Handle = SingleThreadedHandle;
- explicit IncrementalSweepTask(SweeperImpl* sweeper)
+ explicit IncrementalSweepTask(SweeperImpl& sweeper)
: sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
- static Handle Post(SweeperImpl* sweeper, cppgc::TaskRunner* runner) {
+ static Handle Post(SweeperImpl& sweeper, cppgc::TaskRunner* runner) {
auto task = std::make_unique<IncrementalSweepTask>(sweeper);
auto handle = task->GetHandle();
- runner->PostIdleTask(std::move(task));
+ runner->PostTask(std::move(task));
return handle;
}
private:
- void Run(double deadline_in_seconds) override {
+ void Run() override {
if (handle_.IsCanceled()) return;
- if (!sweeper_->PerformSweepOnMutatorThread(
- deadline_in_seconds, StatsCollector::kSweepIdleStep)) {
- sweeper_->ScheduleIncrementalSweeping();
+ if (!sweeper_.PerformSweepOnMutatorThread(
+ v8::base::TimeDelta::FromMilliseconds(5),
+ StatsCollector::kSweepInTask,
+ sweeper_.IsConcurrentSweepingDone()
+ ? MutatorThreadSweepingMode::kAll
+ : MutatorThreadSweepingMode::kOnlyFinalizers)) {
+ sweeper_.ScheduleIncrementalSweeping();
}
}
Handle GetHandle() const { return handle_; }
- SweeperImpl* sweeper_;
+ SweeperImpl& sweeper_;
// TODO(chromium:1056170): Change to CancelableTask.
Handle handle_;
};
@@ -1002,10 +1079,10 @@ class Sweeper::SweeperImpl final {
void ScheduleIncrementalSweeping() {
DCHECK(platform_);
auto runner = platform_->GetForegroundTaskRunner();
- if (!runner || !runner->IdleTasksEnabled()) return;
+ if (!runner) return;
incremental_sweeper_handle_ =
- IncrementalSweepTask::Post(this, runner.get());
+ IncrementalSweepTask::Post(*this, runner.get());
}
void ScheduleConcurrentSweeping() {
@@ -1042,6 +1119,8 @@ class Sweeper::SweeperImpl final {
SweepingConfig config_;
IncrementalSweepTask::Handle incremental_sweeper_handle_;
std::unique_ptr<cppgc::JobHandle> concurrent_sweeper_handle_;
+ std::vector<Sweeper::SweepingOnMutatorThreadObserver*>
+ mutator_thread_sweeping_observers_;
// Indicates whether the sweeping phase is in progress.
bool is_in_progress_ = false;
bool notify_done_pending_ = false;
@@ -1060,14 +1139,16 @@ Sweeper::~Sweeper() = default;
void Sweeper::Start(SweepingConfig config) {
impl_->Start(config, heap_.platform());
}
-void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
+
+bool Sweeper::FinishIfRunning() { return impl_->FinishIfRunning(); }
void Sweeper::FinishIfOutOfWork() { impl_->FinishIfOutOfWork(); }
void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();
}
void Sweeper::NotifyDoneIfNeeded() { impl_->NotifyDoneIfNeeded(); }
-bool Sweeper::SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
- return impl_->SweepForAllocationIfRunning(space, size);
+bool Sweeper::SweepForAllocationIfRunning(NormalPageSpace* space, size_t size,
+ v8::base::TimeDelta max_duration) {
+ return impl_->SweepForAllocationIfRunning(space, size, max_duration);
}
bool Sweeper::IsSweepingOnMutatorThread() const {
return impl_->IsSweepingOnMutatorThread();
@@ -1077,10 +1158,20 @@ bool Sweeper::IsSweepingInProgress() const {
return impl_->IsSweepingInProgress();
}
-bool Sweeper::PerformSweepOnMutatorThread(double deadline_in_seconds) {
- return impl_->PerformSweepOnMutatorThread(deadline_in_seconds,
- StatsCollector::kSweepInTask);
+bool Sweeper::PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration,
+ StatsCollector::ScopeId scope_id) {
+ return impl_->PerformSweepOnMutatorThread(max_duration, scope_id,
+ MutatorThreadSweepingMode::kAll);
+}
+
+Sweeper::SweepingOnMutatorThreadObserver::SweepingOnMutatorThreadObserver(
+ Sweeper& sweeper)
+ : sweeper_(sweeper) {
+ sweeper_.impl_->AddMutatorThreadSweepingObserver(this);
+}
+
+Sweeper::SweepingOnMutatorThreadObserver::~SweepingOnMutatorThreadObserver() {
+ sweeper_.impl_->RemoveMutatorThreadSweepingObserver(this);
}
-} // namespace internal
-} // namespace cppgc
+} // namespace cppgc::internal
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index 845dfbbfc1..95b61729b8 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -7,16 +7,13 @@
#include <memory>
-#include "include/cppgc/heap.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
+#include "src/heap/cppgc/heap-config.h"
#include "src/heap/cppgc/memory.h"
+#include "src/heap/cppgc/stats-collector.h"
-namespace cppgc {
-
-class Platform;
-
-namespace internal {
+namespace cppgc::internal {
class HeapBase;
class ConcurrentSweeperTest;
@@ -24,15 +21,16 @@ class NormalPageSpace;
class V8_EXPORT_PRIVATE Sweeper final {
public:
- struct SweepingConfig {
- using SweepingType = cppgc::Heap::SweepingType;
- enum class CompactableSpaceHandling { kSweep, kIgnore };
- enum class FreeMemoryHandling { kDoNotDiscard, kDiscardWherePossible };
-
- SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
- CompactableSpaceHandling compactable_space_handling =
- CompactableSpaceHandling::kSweep;
- FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
+ class V8_EXPORT_PRIVATE SweepingOnMutatorThreadObserver {
+ public:
+ explicit SweepingOnMutatorThreadObserver(Sweeper&);
+ virtual ~SweepingOnMutatorThreadObserver();
+
+ virtual void Start() = 0;
+ virtual void End() = 0;
+
+ private:
+ Sweeper& sweeper_;
};
static constexpr bool CanDiscardMemory() {
@@ -47,19 +45,24 @@ class V8_EXPORT_PRIVATE Sweeper final {
// Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(SweepingConfig);
- void FinishIfRunning();
+ // Returns true when sweeping was finished and false if it was not running or
+ // couldn't be finished due to being a recursive sweep call.
+ bool FinishIfRunning();
void FinishIfOutOfWork();
void NotifyDoneIfNeeded();
- // SweepForAllocationIfRunning sweeps the given |space| until a slot that can
- // fit an allocation of size |size| is found. Returns true if a slot was
- // found.
- bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size);
+ // SweepForAllocationIfRunning sweeps the given `space` until a slot that can
+ // fit an allocation of `min_wanted_size` bytes is found. Returns true if a
+ // slot was found. Aborts after `max_duration`.
+ bool SweepForAllocationIfRunning(NormalPageSpace* space,
+ size_t min_wanted_size,
+ v8::base::TimeDelta max_duration);
bool IsSweepingOnMutatorThread() const;
bool IsSweepingInProgress() const;
// Assist with sweeping. Returns true if sweeping is done.
- bool PerformSweepOnMutatorThread(double deadline_in_seconds);
+ bool PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration,
+ StatsCollector::ScopeId);
private:
void WaitForConcurrentSweepingForTesting();
@@ -72,7 +75,6 @@ class V8_EXPORT_PRIVATE Sweeper final {
friend class ConcurrentSweeperTest;
};
-} // namespace internal
-} // namespace cppgc
+} // namespace cppgc::internal
#endif // V8_HEAP_CPPGC_SWEEPER_H_
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 098f950d2a..5cbec656a9 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -187,24 +187,6 @@ void WriteBarrier::CheckParams(Type expected_type, const Params& params) {
}
#endif // V8_ENABLE_CHECKS
-// static
-bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object,
- HeapHandle** handle) {
- // Large objects cannot have mixins, so we are guaranteed to always have
- // a pointer on the same page.
- const auto* page = BasePage::FromPayload(object);
- *handle = &page->heap();
- const MarkerBase* marker = page->heap().marker();
- return marker && marker->IsMarking();
-}
-
-// static
-bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(HeapHandle& heap_handle) {
- const auto& heap_base = internal::HeapBase::From(heap_handle);
- const MarkerBase* marker = heap_base.marker();
- return marker && marker->IsMarking();
-}
-
#if defined(CPPGC_YOUNG_GENERATION)
// static