summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2020-11-13 12:51:53 +0100
committerMichaël Zasso <targos@protonmail.com>2020-11-15 16:46:54 +0100
commit48db20f6f53060e38b2272566b014741eb4f519f (patch)
treee2f9b4c7f69d2e4597b73b4c3c09f4371d5cc963 /deps/v8/src/heap/cppgc
parent79916428a48df937aa5b2b69c061d2d42181a76b (diff)
downloadnode-new-48db20f6f53060e38b2272566b014741eb4f519f.tar.gz
deps: update V8 to 8.7.220
PR-URL: https://github.com/nodejs/node/pull/35700 Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Joyee Cheung <joyeec9h3@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
Diffstat (limited to 'deps/v8/src/heap/cppgc')
-rw-r--r--deps/v8/src/heap/cppgc/default-job.h186
-rw-r--r--deps/v8/src/heap/cppgc/default-platform.cc78
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h13
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.cc28
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.h1
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc9
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h17
-rw-r--r--deps/v8/src/heap/cppgc/heap-growing.cc68
-rw-r--r--deps/v8/src/heap/cppgc/heap-growing.h5
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc71
-rw-r--r--deps/v8/src/heap/cppgc/heap.h16
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.cc74
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.h53
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc278
-rw-r--r--deps/v8/src/heap/cppgc/marker.h115
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.cc20
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h59
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc17
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h7
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h2
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.cc10
-rw-r--r--deps/v8/src/heap/cppgc/marking-worklists.h17
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc3
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc2
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc14
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h4
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc26
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h2
-rw-r--r--deps/v8/src/heap/cppgc/worklist.h473
29 files changed, 990 insertions, 678 deletions
diff --git a/deps/v8/src/heap/cppgc/default-job.h b/deps/v8/src/heap/cppgc/default-job.h
new file mode 100644
index 0000000000..9ef6f3fb58
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/default-job.h
@@ -0,0 +1,186 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_DEFAULT_JOB_H_
+#define V8_HEAP_CPPGC_DEFAULT_JOB_H_
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <unordered_set>
+#include <vector>
+
+#include "include/cppgc/platform.h"
+#include "src/base/logging.h"
+#include "src/base/platform/mutex.h"
+
+namespace cppgc {
+namespace internal {
+
+template <typename Job>
+class DefaultJobFactory {
+ public:
+ static std::shared_ptr<Job> Create(std::unique_ptr<cppgc::JobTask> job_task) {
+ std::shared_ptr<Job> job =
+ std::make_shared<Job>(typename Job::Key(), std::move(job_task));
+ job->NotifyConcurrencyIncrease();
+ return job;
+ }
+};
+
+template <typename Thread>
+class DefaultJobImpl {
+ public:
+ class JobDelegate;
+ class JobHandle;
+
+ class Key {
+ private:
+ Key() {}
+
+ template <typename Job>
+ friend class DefaultJobFactory;
+ };
+
+ DefaultJobImpl(Key, std::unique_ptr<cppgc::JobTask> job_task)
+ : job_task_(std::move(job_task)) {}
+
+ ~DefaultJobImpl() {
+ Cancel();
+ DCHECK_EQ(0, active_threads_.load(std::memory_order_relaxed));
+ }
+
+ void NotifyConcurrencyIncrease();
+
+ void Join() {
+ for (std::shared_ptr<Thread>& thread : job_threads_) thread->Join();
+ job_threads_.clear();
+ can_run_.store(false, std::memory_order_relaxed);
+ }
+
+ void Cancel() {
+ can_run_.store(false, std::memory_order_relaxed);
+ Join();
+ }
+
+ bool IsCompleted() const { return !IsRunning(); }
+ bool IsRunning() const {
+ uint8_t active_threads = active_threads_.load(std::memory_order_relaxed);
+ return (active_threads + job_task_->GetMaxConcurrency(active_threads)) > 0;
+ }
+
+ bool CanRun() const { return can_run_.load(std::memory_order_relaxed); }
+
+ void RunJobTask() {
+ DCHECK_NOT_NULL(job_task_);
+ NotifyJobThreadStart();
+ JobDelegate delegate(this);
+ job_task_->Run(&delegate);
+ NotifyJobThreadEnd();
+ }
+
+ protected:
+ virtual std::shared_ptr<Thread> CreateThread(DefaultJobImpl*) = 0;
+
+ void NotifyJobThreadStart() {
+ active_threads_.fetch_add(1, std::memory_order_relaxed);
+ }
+ void NotifyJobThreadEnd() {
+ active_threads_.fetch_sub(1, std::memory_order_relaxed);
+ }
+
+ void GuaranteeAvailableIds(uint8_t max_threads) {
+ if (max_threads <= highest_thread_count_) return;
+ v8::base::MutexGuard guard(&ids_lock_);
+ while (highest_thread_count_ < max_threads) {
+ available_ids_.push_back(++highest_thread_count_);
+ }
+ }
+
+ std::unique_ptr<cppgc::JobTask> job_task_;
+ std::vector<std::shared_ptr<Thread>> job_threads_;
+ std::atomic_bool can_run_{true};
+ std::atomic<uint8_t> active_threads_{0};
+
+ // Task id management.
+ v8::base::Mutex ids_lock_;
+ std::vector<uint8_t> available_ids_;
+ uint8_t highest_thread_count_ = -1;
+};
+
+template <typename Thread>
+class DefaultJobImpl<Thread>::JobDelegate final : public cppgc::JobDelegate {
+ public:
+ explicit JobDelegate(DefaultJobImpl* job) : job_(job) {}
+ ~JobDelegate() { ReleaseTaskId(); }
+ bool ShouldYield() override { return !job_->CanRun(); }
+ void NotifyConcurrencyIncrease() override {
+ job_->NotifyConcurrencyIncrease();
+ }
+ uint8_t GetTaskId() override {
+ AcquireTaskId();
+ return job_thread_id_;
+ }
+
+ private:
+ void AcquireTaskId() {
+ if (job_thread_id_ != kInvalidTaskId) return;
+ v8::base::MutexGuard guard(&job_->ids_lock_);
+ job_thread_id_ = job_->available_ids_.back();
+ DCHECK_NE(kInvalidTaskId, job_thread_id_);
+ job_->available_ids_.pop_back();
+ }
+ void ReleaseTaskId() {
+ if (job_thread_id_ == kInvalidTaskId) return;
+ v8::base::MutexGuard guard(&job_->ids_lock_);
+ job_->available_ids_.push_back(job_thread_id_);
+ }
+
+ DefaultJobImpl* const job_;
+ static constexpr uint8_t kInvalidTaskId = std::numeric_limits<uint8_t>::max();
+ uint8_t job_thread_id_ = kInvalidTaskId;
+};
+
+template <typename Thread>
+void DefaultJobImpl<Thread>::NotifyConcurrencyIncrease() {
+ DCHECK(CanRun());
+ static const size_t kMaxThreads = Thread::GetMaxSupportedConcurrency();
+ uint8_t current_active_threads =
+ active_threads_.load(std::memory_order_relaxed);
+ size_t max_threads = std::min(
+ kMaxThreads, job_task_->GetMaxConcurrency(current_active_threads));
+ if (current_active_threads >= max_threads) return;
+ DCHECK_LT(max_threads, std::numeric_limits<uint8_t>::max());
+ GuaranteeAvailableIds(max_threads);
+ for (uint8_t new_threads = max_threads - current_active_threads;
+ new_threads > 0; --new_threads) {
+ std::shared_ptr<Thread> thread = CreateThread(this);
+ job_threads_.push_back(thread);
+ }
+}
+
+template <typename Thread>
+class DefaultJobImpl<Thread>::JobHandle final : public cppgc::JobHandle {
+ public:
+ explicit JobHandle(std::shared_ptr<DefaultJobImpl> job)
+ : job_(std::move(job)) {
+ DCHECK_NOT_NULL(job_);
+ }
+
+ void NotifyConcurrencyIncrease() override {
+ job_->NotifyConcurrencyIncrease();
+ }
+ void Join() override { job_->Join(); }
+ void Cancel() override { job_->Cancel(); }
+ bool IsCompleted() override { return job_->IsCompleted(); }
+ bool IsRunning() override { return job_->IsRunning(); }
+
+ private:
+ std::shared_ptr<DefaultJobImpl> job_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_DEFAULT_JOB_H_
diff --git a/deps/v8/src/heap/cppgc/default-platform.cc b/deps/v8/src/heap/cppgc/default-platform.cc
index 792bf38189..0ac5440f7e 100644
--- a/deps/v8/src/heap/cppgc/default-platform.cc
+++ b/deps/v8/src/heap/cppgc/default-platform.cc
@@ -7,47 +7,63 @@
#include <chrono> // NOLINT(build/c++11)
#include <thread> // NOLINT(build/c++11)
+#include "src/base/logging.h"
#include "src/base/page-allocator.h"
+#include "src/base/sys-info.h"
+#include "src/heap/cppgc/default-job.h"
namespace cppgc {
-namespace {
+namespace internal {
-// Simple implementation of JobTask based on std::thread.
-class DefaultJobHandle : public JobHandle {
+// Default implementation of Jobs based on std::thread.
+namespace {
+class DefaultJobThread final : private std::thread {
public:
- explicit DefaultJobHandle(std::shared_ptr<std::thread> thread)
- : thread_(std::move(thread)) {}
+ template <typename Function>
+ explicit DefaultJobThread(Function function)
+ : std::thread(std::move(function)) {}
+ ~DefaultJobThread() { DCHECK(!joinable()); }
- void NotifyConcurrencyIncrease() override {}
- void Join() override {
- if (thread_->joinable()) thread_->join();
+ void Join() { join(); }
+
+ static size_t GetMaxSupportedConcurrency() {
+ return v8::base::SysInfo::NumberOfProcessors() - 1;
}
- void Cancel() override { Join(); }
- bool IsRunning() override { return thread_->joinable(); }
+};
+} // namespace
- private:
- std::shared_ptr<std::thread> thread_;
+class DefaultJob final : public DefaultJobImpl<DefaultJobThread> {
+ public:
+ DefaultJob(Key key, std::unique_ptr<cppgc::JobTask> job_task)
+ : DefaultJobImpl(key, std::move(job_task)) {}
+
+ std::shared_ptr<DefaultJobThread> CreateThread(DefaultJobImpl* job) final {
+ return std::make_shared<DefaultJobThread>([job = this] {
+ DCHECK_NOT_NULL(job);
+ job->RunJobTask();
+ });
+ }
};
-} // namespace
+} // namespace internal
void DefaultTaskRunner::PostTask(std::unique_ptr<cppgc::Task> task) {
tasks_.push_back(std::move(task));
}
-void DefaultTaskRunner::PostNonNestableTask(std::unique_ptr<cppgc::Task> task) {
- PostTask(std::move(task));
-}
-
void DefaultTaskRunner::PostDelayedTask(std::unique_ptr<cppgc::Task> task,
double) {
PostTask(std::move(task));
}
-void DefaultTaskRunner::PostNonNestableDelayedTask(
- std::unique_ptr<cppgc::Task> task, double) {
- PostTask(std::move(task));
+void DefaultTaskRunner::PostNonNestableTask(std::unique_ptr<cppgc::Task>) {
+ UNREACHABLE();
+}
+
+void DefaultTaskRunner::PostNonNestableDelayedTask(std::unique_ptr<cppgc::Task>,
+ double) {
+ UNREACHABLE();
}
void DefaultTaskRunner::PostIdleTask(std::unique_ptr<cppgc::IdleTask> task) {
@@ -106,17 +122,11 @@ std::shared_ptr<cppgc::TaskRunner> DefaultPlatform::GetForegroundTaskRunner() {
std::unique_ptr<cppgc::JobHandle> DefaultPlatform::PostJob(
cppgc::TaskPriority priority, std::unique_ptr<cppgc::JobTask> job_task) {
- auto thread = std::make_shared<std::thread>([task = std::move(job_task)] {
- class SimpleDelegate final : public cppgc::JobDelegate {
- public:
- bool ShouldYield() override { return false; }
- void NotifyConcurrencyIncrease() override {}
- } delegate;
-
- if (task) task->Run(&delegate);
- });
- job_threads_.push_back(thread);
- return std::make_unique<DefaultJobHandle>(std::move(thread));
+ std::shared_ptr<internal::DefaultJob> job =
+ internal::DefaultJobFactory<internal::DefaultJob>::Create(
+ std::move(job_task));
+ jobs_.push_back(job);
+ return std::make_unique<internal::DefaultJob::JobHandle>(std::move(job));
}
void DefaultPlatform::WaitAllForegroundTasks() {
@@ -124,10 +134,10 @@ void DefaultPlatform::WaitAllForegroundTasks() {
}
void DefaultPlatform::WaitAllBackgroundTasks() {
- for (auto& thread : job_threads_) {
- thread->join();
+ for (auto& job : jobs_) {
+ job->Join();
}
- job_threads_.clear();
+ jobs_.clear();
}
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index 6c906fd501..1fc7ed925d 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -31,6 +31,16 @@ class GarbageCollector {
MarkingType::kAtomic, SweepingType::kAtomic};
}
+ static constexpr Config ConservativeIncrementalConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kIncremental, SweepingType::kAtomic};
+ }
+
+ static constexpr Config PreciseIncrementalConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncremental, SweepingType::kAtomic};
+ }
+
static constexpr Config MinorPreciseAtomicConfig() {
return {CollectionType::kMinor, StackState::kNoHeapPointers,
MarkingType::kAtomic, SweepingType::kAtomic};
@@ -43,7 +53,8 @@ class GarbageCollector {
};
// Executes a garbage collection specified in config.
- virtual void CollectGarbage(Config config) = 0;
+ virtual void CollectGarbage(Config) = 0;
+ virtual void StartIncrementalGarbageCollection(Config) = 0;
// The current epoch that the GC maintains. The epoch is increased on every
// GC invocation.
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.cc b/deps/v8/src/heap/cppgc/gc-invoker.cc
index a1212d8052..31ca9780bf 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.cc
+++ b/deps/v8/src/heap/cppgc/gc-invoker.cc
@@ -22,6 +22,7 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
GCInvokerImpl& operator=(const GCInvokerImpl&) = delete;
void CollectGarbage(GarbageCollector::Config) final;
+ void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
size_t epoch() const final { return collector_->epoch(); }
private:
@@ -37,7 +38,9 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
}
explicit GCTask(GarbageCollector* collector)
- : collector_(collector), saved_epoch_(collector->epoch()) {}
+ : collector_(collector),
+ handle_(Handle::NonEmptyTag{}),
+ saved_epoch_(collector->epoch()) {}
private:
void Run() final {
@@ -88,6 +91,24 @@ void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
}
}
+void GCInvoker::GCInvokerImpl::StartIncrementalGarbageCollection(
+ GarbageCollector::Config config) {
+ if ((stack_support_ !=
+ cppgc::Heap::StackSupport::kSupportsConservativeStackScan) &&
+ (!platform_->GetForegroundTaskRunner() ||
+ !platform_->GetForegroundTaskRunner()->NonNestableTasksEnabled())) {
+ // In this configuration the GC finalization can only be triggered through
+ // ForceGarbageCollectionSlow. If incremental GC is started, there is no
+ // way to know how long it will remain enabled (and the write barrier with
+ // it). For that reason, we do not support running incremental GCs in this
+ // configuration.
+ return;
+ }
+ // No need to postpone starting incremental GC since the stack is not scanned
+ // until GC finalization.
+ collector_->StartIncrementalGarbageCollection(config);
+}
+
GCInvoker::GCInvoker(GarbageCollector* collector, cppgc::Platform* platform,
cppgc::Heap::StackSupport stack_support)
: impl_(std::make_unique<GCInvoker::GCInvokerImpl>(collector, platform,
@@ -99,6 +120,11 @@ void GCInvoker::CollectGarbage(GarbageCollector::Config config) {
impl_->CollectGarbage(config);
}
+void GCInvoker::StartIncrementalGarbageCollection(
+ GarbageCollector::Config config) {
+ impl_->StartIncrementalGarbageCollection(config);
+}
+
size_t GCInvoker::epoch() const { return impl_->epoch(); }
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.h b/deps/v8/src/heap/cppgc/gc-invoker.h
index a9e3369b3e..fa5e7e5435 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.h
+++ b/deps/v8/src/heap/cppgc/gc-invoker.h
@@ -34,6 +34,7 @@ class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
GCInvoker& operator=(const GCInvoker&) = delete;
void CollectGarbage(GarbageCollector::Config) final;
+ void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
size_t epoch() const final;
private:
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index c3ea0d3d16..5a92c4f159 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -54,7 +54,7 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
} // namespace
HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
- size_t custom_spaces)
+ size_t custom_spaces, StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
#if defined(CPPGC_CAGED_HEAP)
@@ -70,7 +70,8 @@ HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
object_allocator_(&raw_heap_, page_backend_.get(),
stats_collector_.get()),
- sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()) {
+ sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()),
+ stack_support_(stack_support) {
}
HeapBase::~HeapBase() = default;
@@ -89,5 +90,9 @@ void HeapBase::VerifyMarking(cppgc::Heap::StackState stack_state) {
MarkingVerifier verifier(*this, stack_state);
}
+void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
+ if (marker_) marker_->AdvanceMarkingOnAllocation();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index df5646a202..efc4dbd40d 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -12,6 +12,7 @@
#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/macros.h"
#include "src/base/macros.h"
+#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h"
@@ -36,7 +37,6 @@ namespace testing {
class TestWithHeap;
} // namespace testing
-class MarkerBase;
class PageBackend;
class PreFinalizerHandler;
class StatsCollector;
@@ -44,6 +44,8 @@ class StatsCollector;
// Base class for heap implementations.
class V8_EXPORT_PRIVATE HeapBase {
public:
+ using StackSupport = cppgc::Heap::StackSupport;
+
// NoGCScope allows going over limits and avoids triggering garbage
// collection triggered through allocations or even explicitly.
class V8_EXPORT_PRIVATE NoGCScope final {
@@ -60,7 +62,8 @@ class V8_EXPORT_PRIVATE HeapBase {
HeapBase& heap_;
};
- HeapBase(std::shared_ptr<cppgc::Platform> platform, size_t custom_spaces);
+ HeapBase(std::shared_ptr<cppgc::Platform> platform, size_t custom_spaces,
+ StackSupport stack_support);
virtual ~HeapBase();
HeapBase(const HeapBase&) = delete;
@@ -116,9 +119,16 @@ class V8_EXPORT_PRIVATE HeapBase {
size_t ObjectPayloadSize() const;
+ StackSupport stack_support() const { return stack_support_; }
+
+ void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
+
protected:
void VerifyMarking(cppgc::Heap::StackState);
+ virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
+ cppgc::Heap::StackState) = 0;
+
bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
RawHeap raw_heap_;
@@ -145,6 +155,9 @@ class V8_EXPORT_PRIVATE HeapBase {
size_t no_gc_scope_ = 0;
+ const StackSupport stack_support_;
+
+ friend class MarkerBase::IncrementalMarkingTask;
friend class testing::TestWithHeap;
};
diff --git a/deps/v8/src/heap/cppgc/heap-growing.cc b/deps/v8/src/heap/cppgc/heap-growing.cc
index 751d32b0e6..45c606505f 100644
--- a/deps/v8/src/heap/cppgc/heap-growing.cc
+++ b/deps/v8/src/heap/cppgc/heap-growing.cc
@@ -4,18 +4,29 @@
#include "src/heap/cppgc/heap-growing.h"
+#include <cmath>
#include <memory>
#include "include/cppgc/platform.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
namespace internal {
+namespace {
+// Minimum ratio between limit for incremental GC and limit for atomic GC
+// (to guarantee that limits are not to close to each other).
+constexpr double kMaximumLimitRatioForIncrementalGC = 0.9;
+// Minimum ratio between limit for incremental GC and limit for atomic GC
+// (to guarantee that limit is not too close to current allocated size).
+constexpr double kMinimumLimitRatioForIncrementalGC = 0.5;
+} // namespace
+
class HeapGrowing::HeapGrowingImpl final
: public StatsCollector::AllocationObserver {
public:
@@ -31,7 +42,10 @@ class HeapGrowing::HeapGrowingImpl final
void AllocatedObjectSizeDecreased(size_t) final {}
void ResetAllocatedObjectSize(size_t) final;
- size_t limit() const { return limit_; }
+ size_t limit_for_atomic_gc() const { return limit_for_atomic_gc_; }
+ size_t limit_for_incremental_gc() const { return limit_for_incremental_gc_; }
+
+ void DisableForTesting();
private:
void ConfigureLimit(size_t allocated_object_size);
@@ -40,9 +54,12 @@ class HeapGrowing::HeapGrowingImpl final
StatsCollector* stats_collector_;
// Allow 1 MB heap by default;
size_t initial_heap_size_ = 1 * kMB;
- size_t limit_ = 0; // See ConfigureLimit().
+ size_t limit_for_atomic_gc_ = 0; // See ConfigureLimit().
+ size_t limit_for_incremental_gc_ = 0; // See ConfigureLimit().
SingleThreadedHandle gc_task_handle_;
+
+ bool disabled_for_testing_ = false;
};
HeapGrowing::HeapGrowingImpl::HeapGrowingImpl(
@@ -64,9 +81,14 @@ HeapGrowing::HeapGrowingImpl::~HeapGrowingImpl() {
}
void HeapGrowing::HeapGrowingImpl::AllocatedObjectSizeIncreased(size_t) {
- if (stats_collector_->allocated_object_size() > limit_) {
+ if (disabled_for_testing_) return;
+ size_t allocated_object_size = stats_collector_->allocated_object_size();
+ if (allocated_object_size > limit_for_atomic_gc_) {
collector_->CollectGarbage(
GarbageCollector::Config::ConservativeAtomicConfig());
+ } else if (allocated_object_size > limit_for_incremental_gc_) {
+ collector_->StartIncrementalGarbageCollection(
+ GarbageCollector::Config::ConservativeIncrementalConfig());
}
}
@@ -78,8 +100,35 @@ void HeapGrowing::HeapGrowingImpl::ResetAllocatedObjectSize(
void HeapGrowing::HeapGrowingImpl::ConfigureLimit(
size_t allocated_object_size) {
const size_t size = std::max(allocated_object_size, initial_heap_size_);
- limit_ = std::max(static_cast<size_t>(size * kGrowingFactor),
- size + kMinLimitIncrease);
+ limit_for_atomic_gc_ = std::max(static_cast<size_t>(size * kGrowingFactor),
+ size + kMinLimitIncrease);
+ // Estimate when to start incremental GC based on current allocation speed.
+ // Ideally we start incremental GC such that it is ready to finalize no
+ // later than when we reach |limit_for_atomic_gc_|. However, we need to cap
+ // |limit_for_incremental_gc_| within a range to prevent:
+ // 1) |limit_for_incremental_gc_| being too close to |limit_for_atomic_gc_|
+ // such that incremental gc gets nothing done before reaching
+ // |limit_for_atomic_gc_| (in case where the allocation rate is very low).
+ // 2) |limit_for_incremental_gc_| being too close to |size| such that GC is
+ // essentially always running and write barriers are always active (in
+ // case allocation rate is very high).
+ size_t estimated_bytes_allocated_during_incremental_gc =
+ std::ceil(IncrementalMarkingSchedule::kEstimatedMarkingTimeMs *
+ stats_collector_->GetRecentAllocationSpeedInBytesPerMs());
+ size_t limit_incremental_gc_based_on_allocation_rate =
+ limit_for_atomic_gc_ - estimated_bytes_allocated_during_incremental_gc;
+ size_t maximum_limit_incremental_gc =
+ size + (limit_for_atomic_gc_ - size) * kMaximumLimitRatioForIncrementalGC;
+ size_t minimum_limit_incremental_gc =
+ size + (limit_for_atomic_gc_ - size) * kMinimumLimitRatioForIncrementalGC;
+ limit_for_incremental_gc_ =
+ std::max(minimum_limit_incremental_gc,
+ std::min(maximum_limit_incremental_gc,
+ limit_incremental_gc_based_on_allocation_rate));
+}
+
+void HeapGrowing::HeapGrowingImpl::DisableForTesting() {
+ disabled_for_testing_ = true;
}
HeapGrowing::HeapGrowing(GarbageCollector* collector,
@@ -90,7 +139,14 @@ HeapGrowing::HeapGrowing(GarbageCollector* collector,
HeapGrowing::~HeapGrowing() = default;
-size_t HeapGrowing::limit() const { return impl_->limit(); }
+size_t HeapGrowing::limit_for_atomic_gc() const {
+ return impl_->limit_for_atomic_gc();
+}
+size_t HeapGrowing::limit_for_incremental_gc() const {
+ return impl_->limit_for_incremental_gc();
+}
+
+void HeapGrowing::DisableForTesting() { impl_->DisableForTesting(); }
// static
constexpr double HeapGrowing::kGrowingFactor;
diff --git a/deps/v8/src/heap/cppgc/heap-growing.h b/deps/v8/src/heap/cppgc/heap-growing.h
index 772fc2db55..4ecedb3e16 100644
--- a/deps/v8/src/heap/cppgc/heap-growing.h
+++ b/deps/v8/src/heap/cppgc/heap-growing.h
@@ -40,7 +40,10 @@ class V8_EXPORT_PRIVATE HeapGrowing final {
HeapGrowing(const HeapGrowing&) = delete;
HeapGrowing& operator=(const HeapGrowing&) = delete;
- size_t limit() const;
+ size_t limit_for_atomic_gc() const;
+ size_t limit_for_incremental_gc() const;
+
+ void DisableForTesting();
private:
class HeapGrowingImpl;
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index ad28ead5d2..0db04fb537 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -40,8 +40,9 @@ std::unique_ptr<Heap> Heap::Create(std::shared_ptr<cppgc::Platform> platform,
void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
Heap::StackState stack_state) {
internal::Heap::From(this)->CollectGarbage(
- {internal::GarbageCollector::Config::CollectionType::kMajor,
- stack_state});
+ {internal::GarbageCollector::Config::CollectionType::kMajor, stack_state,
+ internal::GarbageCollector::Config::MarkingType::kAtomic,
+ internal::GarbageCollector::Config::SweepingType::kAtomic});
}
AllocationHandle& Heap::GetAllocationHandle() {
@@ -76,7 +77,7 @@ void CheckConfig(Heap::Config config) {
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
- : HeapBase(platform, options.custom_spaces.size()),
+ : HeapBase(platform, options.custom_spaces.size(), options.stack_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints) {}
@@ -84,14 +85,54 @@ Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
Heap::~Heap() {
NoGCScope no_gc(*this);
// Finish already running GC if any, but don't finalize live objects.
- sweeper_.Finish();
+ sweeper_.FinishIfRunning();
}
void Heap::CollectGarbage(Config config) {
+ DCHECK_EQ(Config::MarkingType::kAtomic, config.marking_type);
CheckConfig(config);
if (in_no_gc_scope()) return;
+ config_ = config;
+
+ if (!gc_in_progress_) StartGarbageCollection(config);
+
+ DCHECK(marker_);
+
+ FinalizeGarbageCollection(config.stack_state);
+}
+
+void Heap::StartIncrementalGarbageCollection(Config config) {
+ DCHECK_NE(Config::MarkingType::kAtomic, config.marking_type);
+ CheckConfig(config);
+
+ if (gc_in_progress_ || in_no_gc_scope()) return;
+
+ config_ = config;
+
+ StartGarbageCollection(config);
+}
+
+void Heap::FinalizeIncrementalGarbageCollectionIfRunning(Config config) {
+ if (!gc_in_progress_) return;
+
+ DCHECK(!in_no_gc_scope());
+
+ DCHECK_NE(Config::MarkingType::kAtomic, config_.marking_type);
+ config_ = config;
+ FinalizeGarbageCollection(config.stack_state);
+}
+
+void Heap::StartGarbageCollection(Config config) {
+ DCHECK(!gc_in_progress_);
+
+ DCHECK(!in_no_gc_scope());
+
+ // Finish sweeping in case it is still running.
+ sweeper_.FinishIfRunning();
+
+ gc_in_progress_ = true;
epoch_++;
#if defined(CPPGC_YOUNG_GENERATION)
@@ -99,13 +140,18 @@ void Heap::CollectGarbage(Config config) {
Unmarker unmarker(&raw_heap());
#endif
- // "Marking".
- marker_ = std::make_unique<Marker>(AsBase());
const Marker::MarkingConfig marking_config{
config.collection_type, config.stack_state, config.marking_type};
- marker_->StartMarking(marking_config);
- marker_->FinishMarking(marking_config);
- // "Sweeping and finalization".
+ marker_ = MarkerFactory::CreateAndStartMarking<Marker>(
+ AsBase(), platform_.get(), marking_config);
+}
+
+void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
+ DCHECK(gc_in_progress_);
+ DCHECK(!in_no_gc_scope());
+ config_.stack_state = stack_state;
+ DCHECK(marker_);
+ marker_->FinishMarking(stack_state);
{
// Pre finalizers are forbidden from allocating objects.
ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
@@ -115,13 +161,16 @@ void Heap::CollectGarbage(Config config) {
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
- VerifyMarking(config.stack_state);
+ VerifyMarking(stack_state);
#endif
{
NoGCScope no_gc(*this);
- sweeper_.Start(config.sweeping_type);
+ sweeper_.Start(config_.sweeping_type);
}
+ gc_in_progress_ = false;
}
+void Heap::DisableHeapGrowingForTesting() { growing_.DisableForTesting(); }
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index 2512afd1fd..cd4d3d2bfe 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -32,14 +32,28 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
HeapBase& AsBase() { return *this; }
const HeapBase& AsBase() const { return *this; }
- void CollectGarbage(Config config) final;
+ void CollectGarbage(Config) final;
+ void StartIncrementalGarbageCollection(Config) final;
+ void FinalizeIncrementalGarbageCollectionIfRunning(Config);
size_t epoch() const final { return epoch_; }
+ void DisableHeapGrowingForTesting();
+
private:
+ void StartGarbageCollection(Config);
+ void FinalizeGarbageCollection(Config::StackState);
+
+ void FinalizeIncrementalGarbageCollectionIfNeeded(
+ Config::StackState stack_state) final {
+ FinalizeGarbageCollection(stack_state);
+ }
+
+ Config config_;
GCInvoker gc_invoker_;
HeapGrowing growing_;
+ bool gc_in_progress_ = false;
size_t epoch_ = 0;
};
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
new file mode 100644
index 0000000000..7e1ff951ab
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
@@ -0,0 +1,74 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/incremental-marking-schedule.h"
+
+#include <cmath>
+
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+const double IncrementalMarkingSchedule::kEstimatedMarkingTimeMs = 500.0;
+const size_t IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep =
+ 64 * kKB;
+
+void IncrementalMarkingSchedule::NotifyIncrementalMarkingStart() {
+ DCHECK(incremental_marking_start_time_.IsNull());
+ incremental_marking_start_time_ = v8::base::TimeTicks::Now();
+}
+
+void IncrementalMarkingSchedule::UpdateIncrementalMarkedBytes(
+ size_t overall_marked_bytes) {
+ DCHECK(!incremental_marking_start_time_.IsNull());
+ incrementally_marked_bytes_ = overall_marked_bytes;
+}
+
+void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
+ size_t marked_bytes) {
+ DCHECK(!incremental_marking_start_time_.IsNull());
+ concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
+}
+
+size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() {
+ return incrementally_marked_bytes_ +
+ concurrently_marked_bytes_.load(std::memory_order_relaxed);
+}
+
+double IncrementalMarkingSchedule::GetElapsedTimeInMs(
+ v8::base::TimeTicks start_time) {
+ if (elapsed_time_for_testing_ != kNoSetElapsedTimeForTesting) {
+ double elapsed_time = elapsed_time_for_testing_;
+ elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting;
+ return elapsed_time;
+ }
+ return (v8::base::TimeTicks::Now() - start_time).InMillisecondsF();
+}
+
+size_t IncrementalMarkingSchedule::GetNextIncrementalStepDuration(
+ size_t estimated_live_bytes) {
+ DCHECK(!incremental_marking_start_time_.IsNull());
+ double elapsed_time_in_ms =
+ GetElapsedTimeInMs(incremental_marking_start_time_);
+ size_t actual_marked_bytes = GetOverallMarkedBytes();
+ size_t expected_marked_bytes = std::ceil(
+ estimated_live_bytes * elapsed_time_in_ms / kEstimatedMarkingTimeMs);
+ if (expected_marked_bytes < actual_marked_bytes) {
+ // Marking is ahead of schedule, incremental marking should do the minimum.
+ return kMinimumMarkedBytesPerIncrementalStep;
+ }
+ // Assuming marking will take |kEstimatedMarkingTime|, overall there will
+ // be |estimated_live_bytes| live bytes to mark, and that marking speed is
+ // constant, after |elapsed_time| the number of marked_bytes should be
+ // |estimated_live_bytes| * (|elapsed_time| / |kEstimatedMarkingTime|),
+ // denoted as |expected_marked_bytes|. If |actual_marked_bytes| is less,
+ // i.e. marking is behind schedule, incremental marking should help "catch
+ // up" by marking (|expected_marked_bytes| - |actual_marked_bytes|).
+ return std::max(kMinimumMarkedBytesPerIncrementalStep,
+ expected_marked_bytes - actual_marked_bytes);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.h b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
new file mode 100644
index 0000000000..3c8a9e1a01
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_INCREMENTAL_MARKING_SCHEDULE_H_
+#define V8_HEAP_CPPGC_INCREMENTAL_MARKING_SCHEDULE_H_
+
+#include <atomic>
+
+#include "src/base/platform/time.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE IncrementalMarkingSchedule {
+ public:
+ // Estimated duration of GC cycle in milliseconds.
+ static const double kEstimatedMarkingTimeMs;
+
+ // Minimum number of bytes that should be marked during an incremental
+ // marking step.
+ static const size_t kMinimumMarkedBytesPerIncrementalStep;
+
+ void NotifyIncrementalMarkingStart();
+
+ void UpdateIncrementalMarkedBytes(size_t);
+ void AddConcurrentlyMarkedBytes(size_t);
+
+ size_t GetOverallMarkedBytes();
+
+ size_t GetNextIncrementalStepDuration(size_t);
+
+ void SetElapsedTimeForTesting(double elapsed_time) {
+ elapsed_time_for_testing_ = elapsed_time;
+ }
+
+ private:
+ double GetElapsedTimeInMs(v8::base::TimeTicks);
+
+ v8::base::TimeTicks incremental_marking_start_time_;
+
+ size_t incrementally_marked_bytes_ = 0;
+ std::atomic_size_t concurrently_marked_bytes_{0};
+
+ // Using -1 as sentinel to denote
+ static constexpr double kNoSetElapsedTimeForTesting = -1;
+ double elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_INCREMENTAL_MARKING_SCHEDULE_H_
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 104d4d2041..0d044588b6 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "include/cppgc/internal/process-heap.h"
+#include "include/cppgc/platform.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
@@ -25,28 +26,32 @@ namespace internal {
namespace {
-void EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
HeapBase& heap) {
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::EnterIncrementalOrConcurrentMarking();
- }
#if defined(CPPGC_CAGED_HEAP)
- heap.caged_heap().local_data().is_marking_in_progress = true;
+ heap.caged_heap().local_data().is_marking_in_progress = true;
#endif
+ return true;
+ }
+ return false;
}
-void ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
HeapBase& heap) {
if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::ExitIncrementalOrConcurrentMarking();
- }
#if defined(CPPGC_CAGED_HEAP)
- heap.caged_heap().local_data().is_marking_in_progress = false;
+ heap.caged_heap().local_data().is_marking_in_progress = false;
#endif
+ return true;
+ }
+ return false;
}
// Visit remembered set that was recorded in the generational barrier.
@@ -78,26 +83,42 @@ void ResetRememberedSet(HeapBase& heap) {
#endif
}
-template <typename Worklist, typename Callback>
-bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
- Callback callback, int task_id) {
- const size_t kDeadlineCheckInterval = 1250;
+static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
+template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
+ typename WorklistLocal, typename Callback, typename Predicate>
+bool DrainWorklistWithDeadline(Predicate should_yield,
+ WorklistLocal& worklist_local,
+ Callback callback) {
size_t processed_callback_count = 0;
- typename Worklist::View view(worklist, task_id);
- typename Worklist::EntryType item;
- while (view.Pop(&item)) {
+ typename WorklistLocal::ItemType item;
+ while (worklist_local.Pop(&item)) {
callback(item);
- if (++processed_callback_count == kDeadlineCheckInterval) {
- if (deadline <= v8::base::TimeTicks::Now()) {
+ if (processed_callback_count-- == 0) {
+ if (should_yield()) {
return false;
}
- processed_callback_count = 0;
+ processed_callback_count = kDeadlineCheckInterval;
}
}
return true;
}
+template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
+ typename WorklistLocal, typename Callback>
+bool DrainWorklistWithBytesAndTimeDeadline(MarkingState& marking_state,
+ size_t marked_bytes_deadline,
+ v8::base::TimeTicks time_deadline,
+ WorklistLocal& worklist_local,
+ Callback callback) {
+ return DrainWorklistWithDeadline(
+ [&marking_state, marked_bytes_deadline, time_deadline]() {
+ return (marked_bytes_deadline <= marking_state.marked_bytes()) ||
+ (time_deadline <= v8::base::TimeTicks::Now());
+ },
+ worklist_local, callback);
+}
+
void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) {
DCHECK(header);
DCHECK(!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
@@ -107,15 +128,64 @@ void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) {
gcinfo.trace(visitor, header->Payload());
}
+size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule,
+ HeapBase& heap) {
+ return schedule.GetNextIncrementalStepDuration(
+ heap.stats_collector()->allocated_object_size());
+}
+
} // namespace
-MarkerBase::MarkerBase(HeapBase& heap)
+constexpr v8::base::TimeDelta MarkerBase::kMaximumIncrementalStepDuration;
+
+MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
+ MarkerBase* marker, MarkingConfig::StackState stack_state)
+ : marker_(marker),
+ stack_state_(stack_state),
+ handle_(Handle::NonEmptyTag{}) {}
+
+// static
+MarkerBase::IncrementalMarkingTask::Handle
+MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
+ MarkerBase* marker) {
+ // Incremental GC is possible only via the GCInvoker, so getting here
+ // guarantees that either non-nestable tasks or conservative stack
+ // scannnig are supported. This is required so that the incremental
+ // task can safely finalize GC if needed.
+ DCHECK_IMPLIES(marker->heap().stack_support() !=
+ HeapBase::StackSupport::kSupportsConservativeStackScan,
+ runner->NonNestableTasksEnabled());
+ MarkingConfig::StackState stack_state_for_task =
+ runner->NonNestableTasksEnabled()
+ ? MarkingConfig::StackState::kNoHeapPointers
+ : MarkingConfig::StackState::kMayContainHeapPointers;
+ auto task =
+ std::make_unique<IncrementalMarkingTask>(marker, stack_state_for_task);
+ auto handle = task->handle_;
+ if (runner->NonNestableTasksEnabled()) {
+ runner->PostNonNestableTask(std::move(task));
+ } else {
+ runner->PostTask(std::move(task));
+ }
+ return handle;
+}
+
+void MarkerBase::IncrementalMarkingTask::Run() {
+ if (handle_.IsCanceled()) return;
+
+ if (marker_->IncrementalMarkingStep(stack_state_)) {
+ // Incremental marking is done so should finalize GC.
+ marker_->heap().FinalizeIncrementalGarbageCollectionIfNeeded(stack_state_);
+ }
+}
+
+MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
+ MarkingConfig config)
: heap_(heap),
- mutator_marking_state_(
- heap, marking_worklists_.marking_worklist(),
- marking_worklists_.not_fully_constructed_worklist(),
- marking_worklists_.weak_callback_worklist(),
- MarkingWorklists::kMutatorThreadId) {}
+ config_(config),
+ platform_(platform),
+ foreground_task_runner_(platform_->GetForegroundTaskRunner()),
+ mutator_marking_state_(heap, marking_worklists_) {}
MarkerBase::~MarkerBase() {
// The fixed point iteration may have found not-fully-constructed objects.
@@ -125,10 +195,9 @@ MarkerBase::~MarkerBase() {
#if DEBUG
DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
HeapObjectHeader* header;
- MarkingWorklists::NotFullyConstructedWorklist::View view(
- marking_worklists_.not_fully_constructed_worklist(),
- MarkingWorklists::kMutatorThreadId);
- while (view.Pop(&header)) {
+ MarkingWorklists::NotFullyConstructedWorklist::Local& local =
+ mutator_marking_state_.not_fully_constructed_worklist();
+ while (local.Pop(&header)) {
DCHECK(header->IsMarked());
}
#else
@@ -137,37 +206,52 @@ MarkerBase::~MarkerBase() {
}
}
-void MarkerBase::StartMarking(MarkingConfig config) {
+void MarkerBase::StartMarking() {
heap().stats_collector()->NotifyMarkingStarted();
- config_ = config;
- VisitRoots();
- EnterIncrementalMarkingIfNeeded(config, heap());
+ is_marking_started_ = true;
+ if (EnterIncrementalMarkingIfNeeded(config_, heap())) {
+ // Performing incremental or concurrent marking.
+ schedule_.NotifyIncrementalMarkingStart();
+ // Scanning the stack is expensive so we only do it at the atomic pause.
+ VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
+ ScheduleIncrementalMarkingTask();
+ }
}
-void MarkerBase::EnterAtomicPause(MarkingConfig config) {
- ExitIncrementalMarkingIfNeeded(config_, heap());
- config_ = config;
+void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
+ if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
+ // Cancel remaining incremental tasks.
+ if (incremental_marking_handle_) incremental_marking_handle_.Cancel();
+ }
+ config_.stack_state = stack_state;
+ config_.marking_type = MarkingConfig::MarkingType::kAtomic;
// VisitRoots also resets the LABs.
- VisitRoots();
+ VisitRoots(config_.stack_state);
if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
- marking_worklists_.FlushNotFullyConstructedObjects();
+ mutator_marking_state_.FlushNotFullyConstructedObjects();
} else {
MarkNotFullyConstructedObjects();
}
}
void MarkerBase::LeaveAtomicPause() {
+ DCHECK(!incremental_marking_handle_);
ResetRememberedSet(heap());
heap().stats_collector()->NotifyMarkingCompleted(
- mutator_marking_state_.marked_bytes());
+ // GetOverallMarkedBytes also includes concurrently marked bytes.
+ schedule_.GetOverallMarkedBytes());
}
-void MarkerBase::FinishMarking(MarkingConfig config) {
- EnterAtomicPause(config);
- AdvanceMarkingWithDeadline(v8::base::TimeDelta::Max());
+void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
+ DCHECK(is_marking_started_);
+ EnterAtomicPause(stack_state);
+ ProcessWorklistsWithDeadline(std::numeric_limits<size_t>::max(),
+ v8::base::TimeTicks::Max());
+ mutator_marking_state_.Publish();
LeaveAtomicPause();
+ is_marking_started_ = false;
}
void MarkerBase::ProcessWeakness() {
@@ -176,23 +260,22 @@ void MarkerBase::ProcessWeakness() {
// Call weak callbacks on objects that may now be pointing to dead objects.
MarkingWorklists::WeakCallbackItem item;
LivenessBroker broker = LivenessBrokerFactory::Create();
- MarkingWorklists::WeakCallbackWorklist::View view(
- marking_worklists_.weak_callback_worklist(),
- MarkingWorklists::kMutatorThreadId);
- while (view.Pop(&item)) {
+ MarkingWorklists::WeakCallbackWorklist::Local& local =
+ mutator_marking_state_.weak_callback_worklist();
+ while (local.Pop(&item)) {
item.callback(broker, item.parameter);
}
// Weak callbacks should not add any new objects for marking.
DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
}
-void MarkerBase::VisitRoots() {
+void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
// Reset LABs before scanning roots. LABs are cleared to allow
// ObjectStartBitmap handling without considering LABs.
heap().object_allocator().ResetLinearAllocationBuffers();
heap().GetStrongPersistentRegion().Trace(&visitor());
- if (config_.stack_state != MarkingConfig::StackState::kNoHeapPointers) {
+ if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
heap().stack()->IteratePointers(&stack_visitor());
}
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
@@ -200,25 +283,79 @@ void MarkerBase::VisitRoots() {
}
}
-bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta duration) {
- v8::base::TimeTicks deadline = v8::base::TimeTicks::Now() + duration;
+void MarkerBase::ScheduleIncrementalMarkingTask() {
+ if (!platform_ || !foreground_task_runner_ || incremental_marking_handle_)
+ return;
+ incremental_marking_handle_ =
+ IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
+}
+
+bool MarkerBase::IncrementalMarkingStepForTesting(
+ MarkingConfig::StackState stack_state) {
+ return IncrementalMarkingStep(stack_state);
+}
+
+bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
+ if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
+ mutator_marking_state_.FlushNotFullyConstructedObjects();
+ }
+ config_.stack_state = stack_state;
+
+ return AdvanceMarkingWithDeadline();
+}
+
+bool MarkerBase::AdvanceMarkingOnAllocation() {
+ bool is_done = AdvanceMarkingWithDeadline();
+ if (is_done) {
+ // Schedule another incremental task for finalizing without a stack.
+ ScheduleIncrementalMarkingTask();
+ }
+ return is_done;
+}
+
+bool MarkerBase::AdvanceMarkingWithMaxDuration(
+ v8::base::TimeDelta max_duration) {
+ return AdvanceMarkingWithDeadline(max_duration);
+}
+
+bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) {
+ bool is_done = false;
+ if (!incremental_marking_disabled_for_testing_) {
+ size_t step_size_in_bytes =
+ GetNextIncrementalStepDuration(schedule_, heap_);
+ is_done = ProcessWorklistsWithDeadline(
+ mutator_marking_state_.marked_bytes() + step_size_in_bytes,
+ v8::base::TimeTicks::Now() + max_duration);
+ }
+ schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes());
+ if (!is_done) {
+ // If marking is atomic, |is_done| should always be true.
+ DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
+ ScheduleIncrementalMarkingTask();
+ }
+ mutator_marking_state_.Publish();
+ return is_done;
+}
+bool MarkerBase::ProcessWorklistsWithDeadline(
+ size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
do {
// Convert |previously_not_fully_constructed_worklist_| to
// |marking_worklist_|. This merely re-adds items with the proper
// callbacks.
- if (!DrainWorklistWithDeadline(
- deadline,
- marking_worklists_.previously_not_fully_constructed_worklist(),
+ if (!DrainWorklistWithBytesAndTimeDeadline(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
- },
- MarkingWorklists::kMutatorThreadId))
+ })) {
return false;
+ }
- if (!DrainWorklistWithDeadline(
- deadline, marking_worklists_.marking_worklist(),
+ if (!DrainWorklistWithBytesAndTimeDeadline(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.marking_worklist(),
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
@@ -228,30 +365,28 @@ bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta duration) {
header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
item.callback(&visitor(), item.base_object_payload);
mutator_marking_state_.AccountMarkedBytes(header);
- },
- MarkingWorklists::kMutatorThreadId))
+ })) {
return false;
+ }
- if (!DrainWorklistWithDeadline(
- deadline, marking_worklists_.write_barrier_worklist(),
+ if (!DrainWorklistWithBytesAndTimeDeadline(
+ mutator_marking_state_, marked_bytes_deadline, time_deadline,
+ mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
TraceMarkedObject(&visitor(), header);
mutator_marking_state_.AccountMarkedBytes(*header);
- },
- MarkingWorklists::kMutatorThreadId))
+ })) {
return false;
- } while (!marking_worklists_.marking_worklist()->IsLocalViewEmpty(
- MarkingWorklists::kMutatorThreadId));
-
+ }
+ } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
return true;
}
void MarkerBase::MarkNotFullyConstructedObjects() {
HeapObjectHeader* header;
- MarkingWorklists::NotFullyConstructedWorklist::View view(
- marking_worklists_.not_fully_constructed_worklist(),
- MarkingWorklists::kMutatorThreadId);
- while (view.Pop(&header)) {
+ MarkingWorklists::NotFullyConstructedWorklist::Local& local =
+ mutator_marking_state_.not_fully_constructed_worklist();
+ while (local.Pop(&header)) {
DCHECK(header);
DCHECK(header->IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
// TraceConservativelyIfNeeded will either push to a worklist
@@ -264,8 +399,13 @@ void MarkerBase::ClearAllWorklistsForTesting() {
marking_worklists_.ClearForTesting();
}
-Marker::Marker(HeapBase& heap)
- : MarkerBase(heap),
+void MarkerBase::DisableIncrementalMarkingForTesting() {
+ incremental_marking_disabled_for_testing_ = true;
+}
+
+Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
+ MarkingConfig config)
+ : MarkerBase(key, heap, platform, config),
marking_visitor_(heap, mutator_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
marking_visitor_) {}
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 80a056c436..47ce9998b4 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -11,20 +11,24 @@
#include "include/cppgc/visitor.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/marking-worklists.h"
-#include "src/heap/cppgc/worklist.h"
+#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
namespace internal {
class HeapBase;
+class MarkerFactory;
// Marking algorithm. Example for a valid call sequence creating the marking
// phase:
-// 1. StartMarking()
+// 1. StartMarking() [Called implicitly when creating a Marker using
+// MarkerFactory]
// 2. AdvanceMarkingWithDeadline() [Optional, depending on environment.]
// 3. EnterAtomicPause()
// 4. AdvanceMarkingWithDeadline()
@@ -47,9 +51,9 @@ class V8_EXPORT_PRIVATE MarkerBase {
static constexpr MarkingConfig Default() { return {}; }
- CollectionType collection_type = CollectionType::kMajor;
+ const CollectionType collection_type = CollectionType::kMajor;
StackState stack_state = StackState::kMayContainHeapPointers;
- MarkingType marking_type = MarkingType::kAtomic;
+ MarkingType marking_type = MarkingType::kIncremental;
};
virtual ~MarkerBase();
@@ -57,18 +61,19 @@ class V8_EXPORT_PRIVATE MarkerBase {
MarkerBase(const MarkerBase&) = delete;
MarkerBase& operator=(const MarkerBase&) = delete;
- // Initialize marking according to the given config. This method will
- // trigger incremental/concurrent marking if needed.
- void StartMarking(MarkingConfig config);
-
// Signals entering the atomic marking pause. The method
// - stops incremental/concurrent marking;
// - flushes back any in-construction worklists if needed;
// - Updates the MarkingConfig if the stack state has changed;
- void EnterAtomicPause(MarkingConfig config);
+ void EnterAtomicPause(MarkingConfig::StackState);
// Makes marking progress.
- virtual bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
+ // TODO(chromium:1056170): Remove TimeDelta argument when unified heap no
+ // longer uses it.
+ bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta);
+
+ // Makes marking progress when allocation a new lab.
+ bool AdvanceMarkingOnAllocation();
// Signals leaving the atomic marking pause. This method expects no more
// objects to be marked and merely updates marking states if needed.
@@ -78,7 +83,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// - EnterAtomicPause()
// - AdvanceMarkingWithDeadline()
// - LeaveAtomicPause()
- void FinishMarking(MarkingConfig config);
+ void FinishMarking(MarkingConfig::StackState);
void ProcessWeakness();
@@ -92,27 +97,98 @@ class V8_EXPORT_PRIVATE MarkerBase {
cppgc::Visitor& VisitorForTesting() { return visitor(); }
void ClearAllWorklistsForTesting();
+ bool IncrementalMarkingStepForTesting(MarkingConfig::StackState);
+
+ class IncrementalMarkingTask final : public cppgc::Task {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ IncrementalMarkingTask(MarkerBase*, MarkingConfig::StackState);
+
+ static Handle Post(cppgc::TaskRunner*, MarkerBase*);
+
+ private:
+ void Run() final;
+
+ MarkerBase* const marker_;
+ MarkingConfig::StackState stack_state_;
+ // TODO(chromium:1056170): Change to CancelableTask.
+ Handle handle_;
+ };
+
+ void DisableIncrementalMarkingForTesting();
+
protected:
- explicit MarkerBase(HeapBase& heap);
+ static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
+ v8::base::TimeDelta::FromMilliseconds(2);
+
+ class Key {
+ private:
+ Key() = default;
+ friend class MarkerFactory;
+ };
+
+ MarkerBase(Key, HeapBase&, cppgc::Platform*, MarkingConfig);
+
+ // Initialize marking according to the given config. This method will
+ // trigger incremental/concurrent marking if needed.
+ void StartMarking();
virtual cppgc::Visitor& visitor() = 0;
virtual ConservativeTracingVisitor& conservative_visitor() = 0;
virtual heap::base::StackVisitor& stack_visitor() = 0;
- void VisitRoots();
+ // Makes marking progress.
+ // TODO(chromium:1056170): Remove TimeDelta argument when unified heap no
+ // longer uses it.
+ bool AdvanceMarkingWithDeadline(
+ v8::base::TimeDelta = kMaximumIncrementalStepDuration);
+
+ bool ProcessWorklistsWithDeadline(size_t, v8::base::TimeTicks);
+
+ void VisitRoots(MarkingConfig::StackState);
void MarkNotFullyConstructedObjects();
+ void ScheduleIncrementalMarkingTask();
+
+ bool IncrementalMarkingStep(MarkingConfig::StackState);
+
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
+ cppgc::Platform* platform_;
+ std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
+ IncrementalMarkingTask::Handle incremental_marking_handle_;
+
MarkingWorklists marking_worklists_;
MarkingState mutator_marking_state_;
+ bool is_marking_started_ = false;
+
+ IncrementalMarkingSchedule schedule_;
+
+ bool incremental_marking_disabled_for_testing_{false};
+
+ friend class MarkerFactory;
+};
+
+class V8_EXPORT_PRIVATE MarkerFactory {
+ public:
+ template <typename T, typename... Args>
+ static std::unique_ptr<T> CreateAndStartMarking(Args&&... args) {
+ static_assert(std::is_base_of<MarkerBase, T>::value,
+ "MarkerFactory can only create subclasses of MarkerBase");
+ std::unique_ptr<T> marker =
+ std::make_unique<T>(MarkerBase::Key(), std::forward<Args>(args)...);
+ marker->StartMarking();
+ return marker;
+ }
};
class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
public:
- explicit Marker(HeapBase&);
+ Marker(Key, HeapBase&, cppgc::Platform*,
+ MarkingConfig = MarkingConfig::Default());
protected:
cppgc::Visitor& visitor() final { return marking_visitor_; }
@@ -129,18 +205,11 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
};
void MarkerBase::WriteBarrierForInConstructionObject(HeapObjectHeader& header) {
- MarkingWorklists::NotFullyConstructedWorklist::View
- not_fully_constructed_worklist(
- marking_worklists_.not_fully_constructed_worklist(),
- MarkingWorklists::kMutatorThreadId);
- not_fully_constructed_worklist.Push(&header);
+ mutator_marking_state_.not_fully_constructed_worklist().Push(&header);
}
void MarkerBase::WriteBarrierForObject(HeapObjectHeader& header) {
- MarkingWorklists::WriteBarrierWorklist::View write_barrier_worklist(
- marking_worklists_.write_barrier_worklist(),
- MarkingWorklists::kMutatorThreadId);
- write_barrier_worklist.Push(&header);
+ mutator_marking_state_.write_barrier_worklist().Push(&header);
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-state.cc b/deps/v8/src/heap/cppgc/marking-state.cc
new file mode 100644
index 0000000000..0cc160bd0d
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marking-state.cc
@@ -0,0 +1,20 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/marking-state.h"
+
+namespace cppgc {
+namespace internal {
+
+void MarkingState::FlushNotFullyConstructedObjects() {
+ not_fully_constructed_worklist().Publish();
+ if (!not_fully_constructed_worklist_.IsGlobalEmpty()) {
+ previously_not_fully_constructed_worklist_.Merge(
+ &not_fully_constructed_worklist_);
+ }
+ DCHECK(not_fully_constructed_worklist_.IsGlobalEmpty());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index b27956964f..526633d455 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -18,9 +18,7 @@ namespace internal {
// C++ marking implementation.
class MarkingState {
public:
- inline MarkingState(HeapBase& heap, MarkingWorklists::MarkingWorklist*,
- MarkingWorklists::NotFullyConstructedWorklist*,
- MarkingWorklists::WeakCallbackWorklist*, int);
+ inline MarkingState(HeapBase& heap, MarkingWorklists&);
MarkingState(const MarkingState&) = delete;
MarkingState& operator=(const MarkingState&) = delete;
@@ -44,31 +42,64 @@ class MarkingState {
inline void AccountMarkedBytes(const HeapObjectHeader&);
size_t marked_bytes() const { return marked_bytes_; }
+ void Publish() {
+ marking_worklist_.Publish();
+ not_fully_constructed_worklist_.Publish();
+ previously_not_fully_constructed_worklist_.Publish();
+ weak_callback_worklist_.Publish();
+ write_barrier_worklist_.Publish();
+ }
+
+ // Moves objects in not_fully_constructed_worklist_ to
+ // previously_not_full_constructed_worklists_.
+ void FlushNotFullyConstructedObjects();
+
+ MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
+ return marking_worklist_;
+ }
+ MarkingWorklists::NotFullyConstructedWorklist::Local&
+ not_fully_constructed_worklist() {
+ return not_fully_constructed_worklist_;
+ }
+ MarkingWorklists::NotFullyConstructedWorklist::Local&
+ previously_not_fully_constructed_worklist() {
+ return previously_not_fully_constructed_worklist_;
+ }
+ MarkingWorklists::WeakCallbackWorklist::Local& weak_callback_worklist() {
+ return weak_callback_worklist_;
+ }
+ MarkingWorklists::WriteBarrierWorklist::Local& write_barrier_worklist() {
+ return write_barrier_worklist_;
+ }
+
private:
#ifdef DEBUG
HeapBase& heap_;
#endif // DEBUG
- MarkingWorklists::MarkingWorklist::View marking_worklist_;
- MarkingWorklists::NotFullyConstructedWorklist::View
+ MarkingWorklists::MarkingWorklist::Local marking_worklist_;
+ MarkingWorklists::NotFullyConstructedWorklist::Local
not_fully_constructed_worklist_;
- MarkingWorklists::WeakCallbackWorklist::View weak_callback_worklist_;
+ MarkingWorklists::NotFullyConstructedWorklist::Local
+ previously_not_fully_constructed_worklist_;
+ MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
+ MarkingWorklists::WriteBarrierWorklist::Local write_barrier_worklist_;
size_t marked_bytes_ = 0;
};
-MarkingState::MarkingState(
- HeapBase& heap, MarkingWorklists::MarkingWorklist* marking_worklist,
- MarkingWorklists::NotFullyConstructedWorklist*
- not_fully_constructed_worklist,
- MarkingWorklists::WeakCallbackWorklist* weak_callback_worklist, int task_id)
+MarkingState::MarkingState(HeapBase& heap, MarkingWorklists& marking_worklists)
:
#ifdef DEBUG
heap_(heap),
#endif // DEBUG
- marking_worklist_(marking_worklist, task_id),
- not_fully_constructed_worklist_(not_fully_constructed_worklist, task_id),
- weak_callback_worklist_(weak_callback_worklist, task_id) {
+ marking_worklist_(marking_worklists.marking_worklist()),
+ not_fully_constructed_worklist_(
+ marking_worklists.not_fully_constructed_worklist()),
+ previously_not_fully_constructed_worklist_(
+ marking_worklists.previously_not_fully_constructed_worklist()),
+ weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
+ write_barrier_worklist_(marking_worklists.write_barrier_worklist()) {
}
void MarkingState::MarkAndPush(const void* object, TraceDescriptor desc) {
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 58c1368f99..4238709ae1 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -16,8 +16,11 @@ MarkingVerifier::MarkingVerifier(HeapBase& heap,
: cppgc::Visitor(VisitorFactory::CreateKey()),
ConservativeTracingVisitor(heap, *heap.page_backend(), *this) {
Traverse(&heap.raw_heap());
- if (stack_state == Heap::Config::StackState::kMayContainHeapPointers)
+ if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
+ in_construction_objects_ = &in_construction_objects_stack_;
heap.stack()->IteratePointers(this);
+ CHECK_EQ(in_construction_objects_stack_, in_construction_objects_heap_);
+ }
}
void MarkingVerifier::Visit(const void* object, TraceDescriptor desc) {
@@ -42,6 +45,8 @@ void MarkingVerifier::VerifyChild(const void* base_object_payload) {
void MarkingVerifier::VisitConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
CHECK(header.IsMarked());
+ in_construction_objects_->insert(&header);
+ callback(this, header);
}
void MarkingVerifier::VisitPointer(const void* address) {
@@ -54,8 +59,14 @@ bool MarkingVerifier::VisitHeapObjectHeader(HeapObjectHeader* header) {
DCHECK(!header->IsFree());
- GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex())
- .trace(this, header->Payload());
+ if (!header->IsInConstruction()) {
+ GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex())
+ .trace(this, header->Payload());
+ } else {
+ // Dispatches to conservative tracing implementation.
+ TraceConservativelyIfNeeded(*header);
+ }
+
return true;
}
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index 440b198dd5..45661bd465 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_CPPGC_MARKING_VERIFIER_H_
#define V8_HEAP_CPPGC_MARKING_VERIFIER_H_
+#include <unordered_set>
+
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
@@ -34,6 +36,11 @@ class V8_EXPORT_PRIVATE MarkingVerifier final
void VisitPointer(const void*) final;
bool VisitHeapObjectHeader(HeapObjectHeader*);
+
+ std::unordered_set<const HeapObjectHeader*> in_construction_objects_heap_;
+ std::unordered_set<const HeapObjectHeader*> in_construction_objects_stack_;
+ std::unordered_set<const HeapObjectHeader*>* in_construction_objects_ =
+ &in_construction_objects_heap_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
index 0bfc96b7d0..408fa2514c 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.h
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -23,7 +23,7 @@ class V8_EXPORT_PRIVATE MarkingVisitor : public VisitorBase {
MarkingVisitor(HeapBase&, MarkingState&);
~MarkingVisitor() override = default;
- private:
+ protected:
void Visit(const void*, TraceDescriptor) final;
void VisitWeak(const void*, TraceDescriptor, WeakCallback, const void*) final;
void VisitRoot(const void*, TraceDescriptor) final;
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.cc b/deps/v8/src/heap/cppgc/marking-worklists.cc
index ecbfe48d82..15d78fd4cf 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.cc
+++ b/deps/v8/src/heap/cppgc/marking-worklists.cc
@@ -18,15 +18,5 @@ void MarkingWorklists::ClearForTesting() {
weak_callback_worklist_.Clear();
}
-void MarkingWorklists::FlushNotFullyConstructedObjects() {
- if (!not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId)) {
- not_fully_constructed_worklist_.FlushToGlobal(kMutatorThreadId);
- previously_not_fully_constructed_worklist_.MergeGlobalPool(
- &not_fully_constructed_worklist_);
- }
- DCHECK(not_fully_constructed_worklist_.IsLocalViewEmpty(
- MarkingWorklists::kMutatorThreadId));
-}
-
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-worklists.h b/deps/v8/src/heap/cppgc/marking-worklists.h
index b38d77780d..96d11eef53 100644
--- a/deps/v8/src/heap/cppgc/marking-worklists.h
+++ b/deps/v8/src/heap/cppgc/marking-worklists.h
@@ -6,7 +6,7 @@
#define V8_HEAP_CPPGC_MARKING_WORKLISTS_H_
#include "include/cppgc/visitor.h"
-#include "src/heap/cppgc/worklist.h"
+#include "src/heap/base/worklist.h"
namespace cppgc {
namespace internal {
@@ -14,9 +14,6 @@ namespace internal {
class HeapObjectHeader;
class MarkingWorklists {
- static constexpr int kNumConcurrentMarkers = 0;
- static constexpr int kNumMarkers = 1 + kNumConcurrentMarkers;
-
public:
static constexpr int kMutatorThreadId = 0;
@@ -29,13 +26,13 @@ class MarkingWorklists {
// Segment size of 512 entries necessary to avoid throughput regressions.
// Since the work list is currently a temporary object this is not a problem.
using MarkingWorklist =
- Worklist<MarkingItem, 512 /* local entries */, kNumMarkers>;
+ heap::base::Worklist<MarkingItem, 512 /* local entries */>;
using NotFullyConstructedWorklist =
- Worklist<HeapObjectHeader*, 16 /* local entries */, kNumMarkers>;
+ heap::base::Worklist<HeapObjectHeader*, 16 /* local entries */>;
using WeakCallbackWorklist =
- Worklist<WeakCallbackItem, 64 /* local entries */, kNumMarkers>;
+ heap::base::Worklist<WeakCallbackItem, 64 /* local entries */>;
using WriteBarrierWorklist =
- Worklist<HeapObjectHeader*, 64 /*local entries */, kNumMarkers>;
+ heap::base::Worklist<HeapObjectHeader*, 64 /*local entries */>;
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
@@ -51,10 +48,6 @@ class MarkingWorklists {
return &weak_callback_worklist_;
}
- // Moves objects in not_fully_constructed_worklist_ to
- // previously_not_full_constructed_worklists_.
- void FlushNotFullyConstructedObjects();
-
void ClearForTesting();
private:
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 131ab60401..fdc50d0cab 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -110,6 +110,7 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
stats_collector_->NotifySafePointForConservativeCollection();
+ raw_heap_->heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
return memory;
}
@@ -136,7 +137,7 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
// TODO(chromium:1056170): Add lazy sweep.
// 4. Complete sweeping.
- raw_heap_->heap()->sweeper().Finish();
+ raw_heap_->heap()->sweeper().FinishIfRunning();
// 5. Add a new page to this heap.
auto* new_page = NormalPage::Create(page_backend_, space);
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index 97dca47dac..c20b5ec721 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -25,7 +25,7 @@ void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
}
bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
- const PreFinalizer& other) {
+ const PreFinalizer& other) const {
return (object == other.object) && (callback == other.callback);
}
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index a92aba021d..dd6d678877 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -80,13 +80,25 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
DCHECK_EQ(GarbageCollectionState::kMarking, gc_state_);
gc_state_ = GarbageCollectionState::kSweeping;
current_.marked_bytes = marked_bytes;
- allocated_bytes_since_end_of_marking_ = 0;
allocated_bytes_since_safepoint_ = 0;
explicitly_freed_bytes_since_safepoint_ = 0;
ForAllAllocationObservers([marked_bytes](AllocationObserver* observer) {
observer->ResetAllocatedObjectSize(marked_bytes);
});
+
+ // HeapGrowing would use the below fields to estimate allocation rate during
+ // execution of ResetAllocatedObjectSize.
+ allocated_bytes_since_end_of_marking_ = 0;
+ time_of_last_end_of_marking_ = v8::base::TimeTicks::Now();
+}
+
+double StatsCollector::GetRecentAllocationSpeedInBytesPerMs() const {
+ v8::base::TimeTicks current_time = v8::base::TimeTicks::Now();
+ DCHECK_LE(time_of_last_end_of_marking_, current_time);
+ if (time_of_last_end_of_marking_ == current_time) return 0;
+ return allocated_bytes_since_end_of_marking_ /
+ (current_time - time_of_last_end_of_marking_).InMillisecondsF();
}
const StatsCollector::Event& StatsCollector::NotifySweepingCompleted() {
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index cc122a17dd..795832ba47 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -11,6 +11,7 @@
#include <vector>
#include "src/base/macros.h"
+#include "src/base/platform/time.h"
namespace cppgc {
namespace internal {
@@ -79,6 +80,8 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// bytes and the bytes allocated since last marking.
size_t allocated_object_size() const;
+ double GetRecentAllocationSpeedInBytesPerMs() const;
+
private:
enum class GarbageCollectionState : uint8_t {
kNotRunning,
@@ -97,6 +100,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// an object was explicitly freed that was marked as live in the previous
// cycle.
int64_t allocated_bytes_since_end_of_marking_ = 0;
+ v8::base::TimeTicks time_of_last_end_of_marking_ = v8::base::TimeTicks::Now();
// Counters for allocation and free. The individual values are never negative
// but their delta may be because of the same reason the overall
// allocated_bytes_since_end_of_marking_ may be negative. Keep integer
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 9595ae30cf..986ea6f4fa 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -384,14 +384,14 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
cppgc::Platform* platform_;
};
-class ConcurrentSweepTask final : public v8::JobTask,
+class ConcurrentSweepTask final : public cppgc::JobTask,
private HeapVisitor<ConcurrentSweepTask> {
friend class HeapVisitor<ConcurrentSweepTask>;
public:
explicit ConcurrentSweepTask(SpaceStates* states) : states_(states) {}
- void Run(v8::JobDelegate* delegate) final {
+ void Run(cppgc::JobDelegate* delegate) final {
for (SpaceState& state : *states_) {
while (auto page = state.unswept_pages.Pop()) {
Traverse(*page);
@@ -401,7 +401,7 @@ class ConcurrentSweepTask final : public v8::JobTask,
is_completed_.store(true, std::memory_order_relaxed);
}
- size_t GetMaxConcurrency() const final {
+ size_t GetMaxConcurrency(size_t /* active_worker_count */) const final {
return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
}
@@ -499,9 +499,15 @@ class Sweeper::SweeperImpl final {
}
}
- void Finish() {
+ void FinishIfRunning() {
if (!is_in_progress_) return;
+ Finish();
+ }
+
+ void Finish() {
+ DCHECK(is_in_progress_);
+
// First, call finalizers on the mutator thread.
SweepFinalizer finalizer(platform_);
finalizer.FinalizeHeap(&space_states_);
@@ -519,14 +525,14 @@ class Sweeper::SweeperImpl final {
}
private:
- class IncrementalSweepTask : public v8::IdleTask {
+ class IncrementalSweepTask : public cppgc::IdleTask {
public:
using Handle = SingleThreadedHandle;
explicit IncrementalSweepTask(SweeperImpl* sweeper)
: sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
- static Handle Post(SweeperImpl* sweeper, v8::TaskRunner* runner) {
+ static Handle Post(SweeperImpl* sweeper, cppgc::TaskRunner* runner) {
auto task = std::make_unique<IncrementalSweepTask>(sweeper);
auto handle = task->GetHandle();
runner->PostIdleTask(std::move(task));
@@ -567,7 +573,7 @@ class Sweeper::SweeperImpl final {
if (!platform_) return;
concurrent_sweeper_handle_ = platform_->PostJob(
- v8::TaskPriority::kUserVisible,
+ cppgc::TaskPriority::kUserVisible,
std::make_unique<ConcurrentSweepTask>(&space_states_));
}
@@ -587,9 +593,9 @@ class Sweeper::SweeperImpl final {
StatsCollector* stats_collector_;
SpaceStates space_states_;
cppgc::Platform* platform_;
- std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
+ std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
IncrementalSweepTask::Handle incremental_sweeper_handle_;
- std::unique_ptr<v8::JobHandle> concurrent_sweeper_handle_;
+ std::unique_ptr<cppgc::JobHandle> concurrent_sweeper_handle_;
bool is_in_progress_ = false;
};
@@ -600,7 +606,7 @@ Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
Sweeper::~Sweeper() = default;
void Sweeper::Start(Config config) { impl_->Start(config); }
-void Sweeper::Finish() { impl_->Finish(); }
+void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index 6ce17ea8fc..e94036521e 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE Sweeper final {
// Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(Config);
- void Finish();
+ void FinishIfRunning();
private:
class SweeperImpl;
diff --git a/deps/v8/src/heap/cppgc/worklist.h b/deps/v8/src/heap/cppgc/worklist.h
deleted file mode 100644
index 5993d6a04e..0000000000
--- a/deps/v8/src/heap/cppgc/worklist.h
+++ /dev/null
@@ -1,473 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_WORKLIST_H_
-#define V8_HEAP_CPPGC_WORKLIST_H_
-
-#include <cstddef>
-#include <utility>
-
-#include "src/base/atomic-utils.h"
-#include "src/base/logging.h"
-#include "src/base/platform/mutex.h"
-#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
-
-namespace cppgc {
-namespace internal {
-
-// A concurrent worklist based on segments. Each tasks gets private
-// push and pop segments. Empty pop segments are swapped with their
-// corresponding push segments. Full push segments are published to a global
-// pool of segments and replaced with empty segments.
-//
-// Work stealing is best effort, i.e., there is no way to inform other tasks
-// of the need of items.
-template <typename EntryType_, int SEGMENT_SIZE, int max_num_tasks = 8>
-class Worklist {
- using WorklistType = Worklist<EntryType_, SEGMENT_SIZE, max_num_tasks>;
-
- public:
- using EntryType = EntryType_;
- static constexpr int kMaxNumTasks = max_num_tasks;
- static constexpr size_t kSegmentCapacity = SEGMENT_SIZE;
-
- class View {
- public:
- View(WorklistType* worklist, int task_id)
- : worklist_(worklist), task_id_(task_id) {}
-
- // Pushes an entry onto the worklist.
- bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
-
- // Pops an entry from the worklist.
- bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
-
- // Returns true if the local portion of the worklist is empty.
- bool IsLocalEmpty() const { return worklist_->IsLocalEmpty(task_id_); }
-
- // Returns true if the worklist is empty. Can only be used from the main
- // thread without concurrent access.
- bool IsEmpty() const { return worklist_->IsEmpty(); }
-
- bool IsGlobalPoolEmpty() const { return worklist_->IsGlobalPoolEmpty(); }
-
- // Returns true if the local portion and the global pool are empty (i.e.
- // whether the current view cannot pop anymore).
- bool IsLocalViewEmpty() const {
- return worklist_->IsLocalViewEmpty(task_id_);
- }
-
- void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
-
- void* operator new(size_t, void* location) = delete;
- void* operator new(size_t) = delete;
-
- private:
- WorklistType* const worklist_;
- const int task_id_;
- };
-
- Worklist() : Worklist(kMaxNumTasks) {}
-
- explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
- DCHECK_LE(num_tasks_, kMaxNumTasks);
- for (int i = 0; i < num_tasks_; i++) {
- private_push_segment(i) = NewSegment();
- private_pop_segment(i) = NewSegment();
- }
- }
-
- ~Worklist() {
- CHECK(IsEmpty());
- for (int i = 0; i < num_tasks_; i++) {
- DCHECK_NOT_NULL(private_push_segment(i));
- DCHECK_NOT_NULL(private_pop_segment(i));
- delete private_push_segment(i);
- delete private_pop_segment(i);
- }
- }
-
- // Swaps content with the given worklist. Local buffers need to
- // be empty, not thread safe.
- void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
- CHECK(AreLocalsEmpty());
- CHECK(other.AreLocalsEmpty());
-
- global_pool_.Swap(other.global_pool_);
- }
-
- bool Push(int task_id, EntryType entry) {
- DCHECK_LT(task_id, num_tasks_);
- DCHECK_NOT_NULL(private_push_segment(task_id));
- if (!private_push_segment(task_id)->Push(entry)) {
- PublishPushSegmentToGlobal(task_id);
- bool success = private_push_segment(task_id)->Push(entry);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- bool Pop(int task_id, EntryType* entry) {
- DCHECK_LT(task_id, num_tasks_);
- DCHECK_NOT_NULL(private_pop_segment(task_id));
- if (!private_pop_segment(task_id)->Pop(entry)) {
- if (!private_push_segment(task_id)->IsEmpty()) {
- Segment* tmp = private_pop_segment(task_id);
- private_pop_segment(task_id) = private_push_segment(task_id);
- private_push_segment(task_id) = tmp;
- } else if (!StealPopSegmentFromGlobal(task_id)) {
- return false;
- }
- bool success = private_pop_segment(task_id)->Pop(entry);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- size_t LocalPushSegmentSize(int task_id) const {
- return private_push_segment(task_id)->Size();
- }
-
- bool IsLocalEmpty(int task_id) const {
- return private_pop_segment(task_id)->IsEmpty() &&
- private_push_segment(task_id)->IsEmpty();
- }
-
- bool IsGlobalPoolEmpty() const { return global_pool_.IsEmpty(); }
-
- bool IsEmpty() const {
- if (!AreLocalsEmpty()) return false;
- return IsGlobalPoolEmpty();
- }
-
- bool AreLocalsEmpty() const {
- for (int i = 0; i < num_tasks_; i++) {
- if (!IsLocalEmpty(i)) return false;
- }
- return true;
- }
-
- bool IsLocalViewEmpty(int task_id) const {
- return IsLocalEmpty(task_id) && IsGlobalPoolEmpty();
- }
-
- size_t LocalSize(int task_id) const {
- return private_pop_segment(task_id)->Size() +
- private_push_segment(task_id)->Size();
- }
-
- // Thread-safe but may return an outdated result.
- size_t GlobalPoolSize() const { return global_pool_.Size(); }
-
- // Clears all segments. Frees the global segment pool.
- //
- // Assumes that no other tasks are running.
- void Clear() {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Clear();
- private_push_segment(i)->Clear();
- }
- global_pool_.Clear();
- }
-
- // Calls the specified callback on each element of the deques and replaces
- // the element with the result of the callback.
- // The signature of the callback is
- // bool Callback(EntryType old, EntryType* new).
- // If the callback returns |false| then the element is removed from the
- // worklist. Otherwise the |new| entry is updated.
- //
- // Assumes that no other tasks are running.
- template <typename Callback>
- void Update(Callback callback) {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Update(callback);
- private_push_segment(i)->Update(callback);
- }
- global_pool_.Update(callback);
- }
-
- // Calls the specified callback on each element of the deques.
- // The signature of the callback is:
- // void Callback(EntryType entry).
- //
- // Assumes that no other tasks are running.
- template <typename Callback>
- void Iterate(Callback callback) {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Iterate(callback);
- private_push_segment(i)->Iterate(callback);
- }
- global_pool_.Iterate(callback);
- }
-
- template <typename Callback>
- void IterateGlobalPool(Callback callback) {
- global_pool_.Iterate(callback);
- }
-
- void FlushToGlobal(int task_id) {
- PublishPushSegmentToGlobal(task_id);
- PublishPopSegmentToGlobal(task_id);
- }
-
- void MergeGlobalPool(Worklist* other) {
- global_pool_.Merge(&other->global_pool_);
- }
-
- private:
- FRIEND_TEST(CppgcWorkListTest, SegmentCreate);
- FRIEND_TEST(CppgcWorkListTest, SegmentPush);
- FRIEND_TEST(CppgcWorkListTest, SegmentPushPop);
- FRIEND_TEST(CppgcWorkListTest, SegmentIsEmpty);
- FRIEND_TEST(CppgcWorkListTest, SegmentIsFull);
- FRIEND_TEST(CppgcWorkListTest, SegmentClear);
- FRIEND_TEST(CppgcWorkListTest, SegmentFullPushFails);
- FRIEND_TEST(CppgcWorkListTest, SegmentEmptyPopFails);
- FRIEND_TEST(CppgcWorkListTest, SegmentUpdateFalse);
- FRIEND_TEST(CppgcWorkListTest, SegmentUpdate);
-
- class Segment {
- public:
- static const size_t kCapacity = kSegmentCapacity;
-
- Segment() : index_(0) {}
-
- bool Push(EntryType entry) {
- if (IsFull()) return false;
- entries_[index_++] = entry;
- return true;
- }
-
- bool Pop(EntryType* entry) {
- if (IsEmpty()) return false;
- *entry = entries_[--index_];
- return true;
- }
-
- size_t Size() const { return index_; }
- bool IsEmpty() const { return index_ == 0; }
- bool IsFull() const { return index_ == kCapacity; }
- void Clear() { index_ = 0; }
-
- template <typename Callback>
- void Update(Callback callback) {
- size_t new_index = 0;
- for (size_t i = 0; i < index_; i++) {
- if (callback(entries_[i], &entries_[new_index])) {
- new_index++;
- }
- }
- index_ = new_index;
- }
-
- template <typename Callback>
- void Iterate(Callback callback) const {
- for (size_t i = 0; i < index_; i++) {
- callback(entries_[i]);
- }
- }
-
- Segment* next() const { return next_; }
- void set_next(Segment* segment) { next_ = segment; }
-
- private:
- Segment* next_;
- size_t index_;
- EntryType entries_[kCapacity];
- };
-
- struct PrivateSegmentHolder {
- Segment* private_push_segment;
- Segment* private_pop_segment;
- char cache_line_padding[64];
- };
-
- class GlobalPool {
- public:
- GlobalPool() : top_(nullptr) {}
-
- // Swaps contents, not thread safe.
- void Swap(GlobalPool& other) {
- Segment* temp = top_;
- set_top(other.top_);
- other.set_top(temp);
- size_t other_size = other.size_.exchange(
- size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
- size_.store(other_size, std::memory_order_relaxed);
- }
-
- V8_INLINE void Push(Segment* segment) {
- v8::base::MutexGuard guard(&lock_);
- segment->set_next(top_);
- set_top(segment);
- size_.fetch_add(1, std::memory_order_relaxed);
- }
-
- V8_INLINE bool Pop(Segment** segment) {
- v8::base::MutexGuard guard(&lock_);
- if (top_) {
- DCHECK_LT(0U, size_);
- size_.fetch_sub(1, std::memory_order_relaxed);
- *segment = top_;
- set_top(top_->next());
- return true;
- }
- return false;
- }
-
- V8_INLINE bool IsEmpty() const {
- return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
- nullptr;
- }
-
- V8_INLINE size_t Size() const {
- // It is safe to read |size_| without a lock since this variable is
- // atomic, keeping in mind that threads may not immediately see the new
- // value when it is updated.
- return size_.load(std::memory_order_relaxed);
- }
-
- void Clear() {
- v8::base::MutexGuard guard(&lock_);
- size_.store(0, std::memory_order_relaxed);
- Segment* current = top_;
- while (current) {
- Segment* tmp = current;
- current = current->next();
- delete tmp;
- }
- set_top(nullptr);
- }
-
- // See Worklist::Update.
- template <typename Callback>
- void Update(Callback callback) {
- v8::base::MutexGuard guard(&lock_);
- Segment* prev = nullptr;
- Segment* current = top_;
- while (current) {
- current->Update(callback);
- if (current->IsEmpty()) {
- DCHECK_LT(0U, size_);
- size_.fetch_sub(1, std::memory_order_relaxed);
- if (!prev) {
- top_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- Segment* tmp = current;
- current = current->next();
- delete tmp;
- } else {
- prev = current;
- current = current->next();
- }
- }
- }
-
- // See Worklist::Iterate.
- template <typename Callback>
- void Iterate(Callback callback) {
- v8::base::MutexGuard guard(&lock_);
- for (Segment* current = top_; current; current = current->next()) {
- current->Iterate(callback);
- }
- }
-
- void Merge(GlobalPool* other) {
- Segment* top = nullptr;
- size_t other_size = 0;
- {
- v8::base::MutexGuard guard(&other->lock_);
- if (!other->top_) return;
- top = other->top_;
- other_size = other->size_.load(std::memory_order_relaxed);
- other->size_.store(0, std::memory_order_relaxed);
- other->set_top(nullptr);
- }
-
- // It's safe to iterate through these segments because the top was
- // extracted from |other|.
- Segment* end = top;
- while (end->next()) end = end->next();
-
- {
- v8::base::MutexGuard guard(&lock_);
- size_.fetch_add(other_size, std::memory_order_relaxed);
- end->set_next(top_);
- set_top(top);
- }
- }
-
- void* operator new(size_t, void* location) = delete;
- void* operator new(size_t) = delete;
-
- private:
- void set_top(Segment* segment) {
- v8::base::AsAtomicPtr(&top_)->store(segment, std::memory_order_relaxed);
- }
-
- v8::base::Mutex lock_;
- Segment* top_;
- std::atomic<size_t> size_{0};
- };
-
- V8_INLINE Segment*& private_push_segment(int task_id) {
- return private_segments_[task_id].private_push_segment;
- }
-
- V8_INLINE Segment* const& private_push_segment(int task_id) const {
- return private_segments_[task_id].private_push_segment;
- }
-
- V8_INLINE Segment*& private_pop_segment(int task_id) {
- return private_segments_[task_id].private_pop_segment;
- }
-
- V8_INLINE Segment* const& private_pop_segment(int task_id) const {
- return private_segments_[task_id].private_pop_segment;
- }
-
- V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
- if (!private_push_segment(task_id)->IsEmpty()) {
- global_pool_.Push(private_push_segment(task_id));
- private_push_segment(task_id) = NewSegment();
- }
- }
-
- V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
- if (!private_pop_segment(task_id)->IsEmpty()) {
- global_pool_.Push(private_pop_segment(task_id));
- private_pop_segment(task_id) = NewSegment();
- }
- }
-
- V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
- if (global_pool_.IsEmpty()) return false;
- Segment* new_segment = nullptr;
- if (global_pool_.Pop(&new_segment)) {
- delete private_pop_segment(task_id);
- private_pop_segment(task_id) = new_segment;
- return true;
- }
- return false;
- }
-
- V8_INLINE Segment* NewSegment() {
- // Bottleneck for filtering in crash dumps.
- return new Segment();
- }
-
- PrivateSegmentHolder private_segments_[kMaxNumTasks];
- GlobalPool global_pool_;
- int num_tasks_;
-};
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_WORKLIST_H_