summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2022-01-29 08:33:07 +0100
committerMichaël Zasso <targos@protonmail.com>2022-02-02 17:23:18 +0100
commit974ab4060fe3eff74dc0a62a5a27d516738f4c55 (patch)
tree30fbcca796ca5cc7b4abf917e716e2b02899cb7a /deps/v8/src/heap
parent4318b2348dbcd5003e0c4a14b5fe378cceec3c81 (diff)
downloadnode-new-974ab4060fe3eff74dc0a62a5a27d516738f4c55.tar.gz
deps: update V8 to 9.8.177.9
PR-URL: https://github.com/nodejs/node/pull/41610 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com> Reviewed-By: Darshan Sen <raisinten@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/allocation-observer.cc4
-rw-r--r--deps/v8/src/heap/base/worklist.h38
-rw-r--r--deps/v8/src/heap/code-range.cc10
-rw-r--r--deps/v8/src/heap/code-range.h2
-rw-r--r--deps/v8/src/heap/code-stats.cc13
-rw-r--r--deps/v8/src/heap/collection-barrier.cc23
-rw-r--r--deps/v8/src/heap/collection-barrier.h23
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc38
-rw-r--r--deps/v8/src/heap/concurrent-allocator.h6
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc78
-rw-r--r--deps/v8/src/heap/concurrent-marking.h1
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc100
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h37
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc6
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.h9
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc10
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h1
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc9
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h16
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h35
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc11
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h3
-rw-r--r--deps/v8/src/heap/cppgc/heap-state.cc7
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc31
-rw-r--r--deps/v8/src/heap/cppgc/heap.h3
-rw-r--r--deps/v8/src/heap/cppgc/incremental-marking-schedule.cc1
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc37
-rw-r--r--deps/v8/src/heap/cppgc/marker.h5
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h18
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc10
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc3
-rw-r--r--deps/v8/src/heap/cppgc/object-poisoner.h3
-rw-r--r--deps/v8/src/heap/cppgc/object-size-trait.cc6
-rw-r--r--deps/v8/src/heap/cppgc/object-view.h21
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc12
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc4
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc90
-rw-r--r--deps/v8/src/heap/cppgc/visitor.cc2
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc72
-rw-r--r--deps/v8/src/heap/embedder-tracing.h33
-rw-r--r--deps/v8/src/heap/factory-base.cc128
-rw-r--r--deps/v8/src/heap/factory-base.h33
-rw-r--r--deps/v8/src/heap/factory.cc168
-rw-r--r--deps/v8/src/heap/factory.h53
-rw-r--r--deps/v8/src/heap/gc-tracer.cc50
-rw-r--r--deps/v8/src/heap/gc-tracer.h16
-rw-r--r--deps/v8/src/heap/heap-controller.cc4
-rw-r--r--deps/v8/src/heap/heap-inl.h29
-rw-r--r--deps/v8/src/heap/heap-layout-tracer.cc73
-rw-r--r--deps/v8/src/heap/heap-layout-tracer.h33
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h10
-rw-r--r--deps/v8/src/heap/heap-write-barrier.cc13
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h3
-rw-r--r--deps/v8/src/heap/heap.cc353
-rw-r--r--deps/v8/src/heap/heap.h96
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h8
-rw-r--r--deps/v8/src/heap/incremental-marking.cc33
-rw-r--r--deps/v8/src/heap/incremental-marking.h20
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h4
-rw-r--r--deps/v8/src/heap/large-spaces.cc16
-rw-r--r--deps/v8/src/heap/linear-allocation-area.h8
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h2
-rw-r--r--deps/v8/src/heap/local-factory.cc3
-rw-r--r--deps/v8/src/heap/local-factory.h20
-rw-r--r--deps/v8/src/heap/local-heap-inl.h20
-rw-r--r--deps/v8/src/heap/local-heap.cc152
-rw-r--r--deps/v8/src/heap/local-heap.h16
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h4
-rw-r--r--deps/v8/src/heap/mark-compact.cc736
-rw-r--r--deps/v8/src/heap/mark-compact.h107
-rw-r--r--deps/v8/src/heap/marking-barrier-inl.h2
-rw-r--r--deps/v8/src/heap/marking-barrier.cc14
-rw-r--r--deps/v8/src/heap/marking-barrier.h1
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h26
-rw-r--r--deps/v8/src/heap/marking-visitor.h43
-rw-r--r--deps/v8/src/heap/memory-allocator.cc14
-rw-r--r--deps/v8/src/heap/memory-allocator.h10
-rw-r--r--deps/v8/src/heap/memory-chunk.cc9
-rw-r--r--deps/v8/src/heap/memory-chunk.h5
-rw-r--r--deps/v8/src/heap/memory-measurement-inl.h1
-rw-r--r--deps/v8/src/heap/memory-measurement.cc9
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h12
-rw-r--r--deps/v8/src/heap/new-spaces.cc84
-rw-r--r--deps/v8/src/heap/new-spaces.h23
-rw-r--r--deps/v8/src/heap/object-stats.cc27
-rw-r--r--deps/v8/src/heap/objects-visiting.h5
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h21
-rw-r--r--deps/v8/src/heap/paged-spaces.cc106
-rw-r--r--deps/v8/src/heap/paged-spaces.h30
-rw-r--r--deps/v8/src/heap/parked-scope.h2
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc11
-rw-r--r--deps/v8/src/heap/remembered-set.h6
-rw-r--r--deps/v8/src/heap/safepoint.cc267
-rw-r--r--deps/v8/src/heap/safepoint.h109
-rw-r--r--deps/v8/src/heap/scavenger-inl.h102
-rw-r--r--deps/v8/src/heap/scavenger.cc62
-rw-r--r--deps/v8/src/heap/scavenger.h73
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc14
-rw-r--r--deps/v8/src/heap/slot-set.h2
-rw-r--r--deps/v8/src/heap/spaces.cc43
-rw-r--r--deps/v8/src/heap/spaces.h24
-rw-r--r--deps/v8/src/heap/sweeper.cc2
-rw-r--r--deps/v8/src/heap/weak-object-worklists.cc21
-rw-r--r--deps/v8/src/heap/weak-object-worklists.h22
-rw-r--r--deps/v8/src/heap/worklist.h453
105 files changed, 2932 insertions, 1765 deletions
diff --git a/deps/v8/src/heap/allocation-observer.cc b/deps/v8/src/heap/allocation-observer.cc
index 94d5a2f833..d25734e349 100644
--- a/deps/v8/src/heap/allocation-observer.cc
+++ b/deps/v8/src/heap/allocation-observer.cc
@@ -60,8 +60,8 @@ void AllocationCounter::RemoveAllocationObserver(AllocationObserver* observer) {
} else {
size_t step_size = 0;
- for (AllocationObserverCounter& observer : observers_) {
- size_t left_in_step = observer.next_counter_ - current_counter_;
+ for (AllocationObserverCounter& observer_counter : observers_) {
+ size_t left_in_step = observer_counter.next_counter_ - current_counter_;
DCHECK_GT(left_in_step, 0);
step_size = step_size ? std::min(step_size, left_in_step) : left_in_step;
}
diff --git a/deps/v8/src/heap/base/worklist.h b/deps/v8/src/heap/base/worklist.h
index e2d33616ad..70c8a4f1ba 100644
--- a/deps/v8/src/heap/base/worklist.h
+++ b/deps/v8/src/heap/base/worklist.h
@@ -52,14 +52,17 @@ class Worklist {
bool Pop(Segment** segment);
// Returns true if the list of segments is empty.
- bool IsEmpty();
+ bool IsEmpty() const;
// Returns the number of segments in the list.
- size_t Size();
+ size_t Size() const;
// Moves the segments of the given marking worklist into this
// marking worklist.
void Merge(Worklist<EntryType, SegmentSize>* other);
+ // Swaps the segments with the given marking worklist.
+ void Swap(Worklist<EntryType, SegmentSize>* other);
+
// These functions are not thread-safe. They should be called only
// if all local marking worklists that use the current worklist have
// been published and are empty.
@@ -100,13 +103,13 @@ bool Worklist<EntryType, SegmentSize>::Pop(Segment** segment) {
}
template <typename EntryType, uint16_t SegmentSize>
-bool Worklist<EntryType, SegmentSize>::IsEmpty() {
+bool Worklist<EntryType, SegmentSize>::IsEmpty() const {
return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
nullptr;
}
template <typename EntryType, uint16_t SegmentSize>
-size_t Worklist<EntryType, SegmentSize>::Size() {
+size_t Worklist<EntryType, SegmentSize>::Size() const {
// It is safe to read |size_| without a lock since this variable is
// atomic, keeping in mind that threads may not immediately see the new
// value when it is updated.
@@ -191,6 +194,17 @@ void Worklist<EntryType, SegmentSize>::Merge(
}
template <typename EntryType, uint16_t SegmentSize>
+void Worklist<EntryType, SegmentSize>::Swap(
+ Worklist<EntryType, SegmentSize>* other) {
+ Segment* top = top_;
+ set_top(other->top_);
+ other->set_top(top);
+ size_t other_size = other->size_.exchange(
+ size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ size_.store(other_size, std::memory_order_relaxed);
+}
+
+template <typename EntryType, uint16_t SegmentSize>
class Worklist<EntryType, SegmentSize>::Segment : public internal::SegmentBase {
public:
static const uint16_t kSize = SegmentSize;
@@ -214,14 +228,14 @@ class Worklist<EntryType, SegmentSize>::Segment : public internal::SegmentBase {
friend class Worklist<EntryType, SegmentSize>::Local;
- FRIEND_TEST(CppgcWorkListTest, SegmentCreate);
- FRIEND_TEST(CppgcWorkListTest, SegmentPush);
- FRIEND_TEST(CppgcWorkListTest, SegmentPushPop);
- FRIEND_TEST(CppgcWorkListTest, SegmentIsEmpty);
- FRIEND_TEST(CppgcWorkListTest, SegmentIsFull);
- FRIEND_TEST(CppgcWorkListTest, SegmentClear);
- FRIEND_TEST(CppgcWorkListTest, SegmentUpdateFalse);
- FRIEND_TEST(CppgcWorkListTest, SegmentUpdate);
+ FRIEND_TEST(WorkListTest, SegmentCreate);
+ FRIEND_TEST(WorkListTest, SegmentPush);
+ FRIEND_TEST(WorkListTest, SegmentPushPop);
+ FRIEND_TEST(WorkListTest, SegmentIsEmpty);
+ FRIEND_TEST(WorkListTest, SegmentIsFull);
+ FRIEND_TEST(WorkListTest, SegmentClear);
+ FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
+ FRIEND_TEST(WorkListTest, SegmentUpdate);
};
template <typename EntryType, uint16_t SegmentSize>
diff --git a/deps/v8/src/heap/code-range.cc b/deps/v8/src/heap/code-range.cc
index 5d5f3f3014..5c5911d676 100644
--- a/deps/v8/src/heap/code-range.cc
+++ b/deps/v8/src/heap/code-range.cc
@@ -124,8 +124,16 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
: VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
params.page_size = MemoryChunk::kPageSize;
+ // V8_EXTERNAL_CODE_SPACE imposes additional alignment requirement for the
+ // base address, so make sure the hint calculation function takes that into
+ // account. Otherwise the allocated reservation might be outside of the
+ // preferred region (see Isolate::GetShortBuiltinsCallRegion()).
+ const size_t hint_alignment =
+ V8_EXTERNAL_CODE_SPACE_BOOL
+ ? RoundUp(params.base_alignment, allocate_page_size)
+ : allocate_page_size;
params.requested_start_hint =
- GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
+ GetCodeRangeAddressHint()->GetAddressHint(requested, hint_alignment);
if (!VirtualMemoryCage::InitReservation(params)) return false;
diff --git a/deps/v8/src/heap/code-range.h b/deps/v8/src/heap/code-range.h
index 10e0bd5718..4fcea5f26f 100644
--- a/deps/v8/src/heap/code-range.h
+++ b/deps/v8/src/heap/code-range.h
@@ -21,7 +21,7 @@ namespace internal {
class CodeRangeAddressHint {
public:
// When near code range is enabled, an address within
- // kShortBuiltinCallsBoundary to the embedded blob is returned if
+ // kMaxPCRelativeCodeRangeInMB to the embedded blob is returned if
// there is enough space. Otherwise a random address is returned.
// When near code range is disabled, returns the most recently freed code
// range start address for the given size. If there is no such entry, then a
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index abca2c75f9..1fff3e9484 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -17,21 +17,22 @@ namespace internal {
// Record code statisitcs.
void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
Isolate* isolate) {
- if (object.IsScript()) {
+ PtrComprCageBase cage_base(isolate);
+ if (object.IsScript(cage_base)) {
Script script = Script::cast(object);
// Log the size of external source code.
- Object source = script.source();
- if (source.IsExternalString()) {
+ Object source = script.source(cage_base);
+ if (source.IsExternalString(cage_base)) {
ExternalString external_source_string = ExternalString::cast(source);
int size = isolate->external_script_source_size();
size += external_source_string.ExternalPayloadSize();
isolate->set_external_script_source_size(size);
}
- } else if (object.IsAbstractCode()) {
+ } else if (object.IsAbstractCode(cage_base)) {
// Record code+metadata statisitcs.
AbstractCode abstract_code = AbstractCode::cast(object);
int size = abstract_code.SizeIncludingMetadata();
- if (abstract_code.IsCode()) {
+ if (abstract_code.IsCode(cage_base)) {
size += isolate->code_and_metadata_size();
isolate->set_code_and_metadata_size(size);
} else {
@@ -42,7 +43,7 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
#ifdef DEBUG
// Record code kind and code comment statistics.
isolate->code_kind_statistics()[static_cast<int>(abstract_code.kind())] +=
- abstract_code.Size();
+ abstract_code.Size(cage_base);
CodeStatistics::CollectCodeCommentStatistics(object, isolate);
#endif
}
diff --git a/deps/v8/src/heap/collection-barrier.cc b/deps/v8/src/heap/collection-barrier.cc
index 3a1a1e5947..3cf8f41c43 100644
--- a/deps/v8/src/heap/collection-barrier.cc
+++ b/deps/v8/src/heap/collection-barrier.cc
@@ -22,14 +22,17 @@ bool CollectionBarrier::WasGCRequested() {
return collection_requested_.load();
}
-void CollectionBarrier::RequestGC() {
+bool CollectionBarrier::TryRequestGC() {
base::MutexGuard guard(&mutex_);
+ if (shutdown_requested_) return false;
bool was_already_requested = collection_requested_.exchange(true);
if (!was_already_requested) {
CHECK(!timer_.IsStarted());
timer_.Start();
}
+
+ return true;
}
class BackgroundCollectionInterruptTask : public CancelableTask {
@@ -59,8 +62,19 @@ void CollectionBarrier::NotifyShutdownRequested() {
void CollectionBarrier::ResumeThreadsAwaitingCollection() {
base::MutexGuard guard(&mutex_);
+ DCHECK(!timer_.IsStarted());
+ collection_requested_.store(false);
+ block_for_collection_ = false;
+ collection_performed_ = true;
+ cv_wakeup_.NotifyAll();
+}
+
+void CollectionBarrier::CancelCollectionAndResumeThreads() {
+ base::MutexGuard guard(&mutex_);
+ if (timer_.IsStarted()) timer_.Stop();
collection_requested_.store(false);
block_for_collection_ = false;
+ collection_performed_ = false;
cv_wakeup_.NotifyAll();
}
@@ -72,6 +86,10 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
// set before the next GC.
base::MutexGuard guard(&mutex_);
if (shutdown_requested_) return false;
+
+ // Collection was cancelled by the main thread.
+ if (!collection_requested_.load()) return false;
+
first_thread = !block_for_collection_;
block_for_collection_ = true;
CHECK(timer_.IsStarted());
@@ -88,7 +106,8 @@ bool CollectionBarrier::AwaitCollectionBackground(LocalHeap* local_heap) {
cv_wakeup_.Wait(&mutex_);
}
- return true;
+ // Collection may have been cancelled while blocking for it.
+ return collection_performed_;
}
void CollectionBarrier::ActivateStackGuardAndPostTask() {
diff --git a/deps/v8/src/heap/collection-barrier.h b/deps/v8/src/heap/collection-barrier.h
index ee7fd33ad1..fd894324a6 100644
--- a/deps/v8/src/heap/collection-barrier.h
+++ b/deps/v8/src/heap/collection-barrier.h
@@ -27,8 +27,10 @@ class CollectionBarrier {
// Returns true when collection was requested.
bool WasGCRequested();
- // Requests a GC from the main thread.
- void RequestGC();
+ // Requests a GC from the main thread. Returns whether GC was successfully
+ // requested. Requesting a GC can fail when isolate shutdown was already
+ // initiated.
+ bool TryRequestGC();
// Resumes all threads waiting for GC when tear down starts.
void NotifyShutdownRequested();
@@ -39,7 +41,11 @@ class CollectionBarrier {
// Resumes threads waiting for collection.
void ResumeThreadsAwaitingCollection();
+ // Cancels collection if one was requested and resumes threads waiting for GC.
+ void CancelCollectionAndResumeThreads();
+
// This is the method use by background threads to request and wait for GC.
+ // Returns whether a GC was performed.
bool AwaitCollectionBackground(LocalHeap* local_heap);
private:
@@ -50,8 +56,21 @@ class CollectionBarrier {
base::Mutex mutex_;
base::ConditionVariable cv_wakeup_;
base::ElapsedTimer timer_;
+
+ // Flag that main thread checks whether a GC was requested from the background
+ // thread.
std::atomic<bool> collection_requested_{false};
+
+ // This flag is used to detect whether to block for the GC. Only set if the
+ // main thread was actually running and is unset when GC resumes background
+ // threads.
bool block_for_collection_ = false;
+
+ // Set to true when a GC was performed, false in case it was canceled because
+ // the main thread parked itself without running the GC.
+ bool collection_performed_ = false;
+
+ // Will be set as soon as Isolate starts tear down.
bool shutdown_requested_ = false;
};
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 6f4bd625c6..bfdfaea7fe 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -36,7 +36,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationResult result = local_heap.AllocateRaw(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
+ AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kSmallObjectSize,
@@ -47,7 +47,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
+ AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kMediumObjectSize,
@@ -58,7 +58,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
- AllocationAlignment::kWordAligned);
+ AllocationAlignment::kTaggedAligned);
if (!result.IsRetry()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kLargeObjectSize,
@@ -81,10 +81,22 @@ void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
}
void ConcurrentAllocator::FreeLinearAllocationArea() {
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ base::Optional<CodePageMemoryModificationScope> optional_scope;
+ if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
+ }
lab_.CloseAndMakeIterable();
}
void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ base::Optional<CodePageMemoryModificationScope> optional_scope;
+ if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
+ }
lab_.MakeIterable();
}
@@ -110,7 +122,7 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) {
- return AllocationResult::Retry(OLD_SPACE);
+ return AllocationResult::Retry(space_->identity());
}
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
@@ -121,10 +133,10 @@ AllocationResult ConcurrentAllocator::AllocateInLabSlow(
bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(
- local_heap_, kLabSize, kMaxLabSize, kWordAligned, origin);
+ local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
if (!result) return false;
- if (local_heap_->heap()->incremental_marking()->black_allocation()) {
+ if (IsBlackAllocationEnabled()) {
Address top = result->first;
Address limit = top + result->second;
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
@@ -145,17 +157,23 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(local_heap_, object_size,
object_size, alignment, origin);
- if (!result) return AllocationResult::Retry(OLD_SPACE);
+ if (!result) return AllocationResult::Retry(space_->identity());
HeapObject object = HeapObject::FromAddress(result->first);
- if (local_heap_->heap()->incremental_marking()->black_allocation()) {
- local_heap_->heap()->incremental_marking()->MarkBlackBackground(
- object, object_size);
+ if (IsBlackAllocationEnabled()) {
+ owning_heap()->incremental_marking()->MarkBlackBackground(object,
+ object_size);
}
return AllocationResult(object);
}
+bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
+ return owning_heap()->incremental_marking()->black_allocation();
+}
+
+Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/concurrent-allocator.h b/deps/v8/src/heap/concurrent-allocator.h
index fe6144eb7e..bf596cf6de 100644
--- a/deps/v8/src/heap/concurrent-allocator.h
+++ b/deps/v8/src/heap/concurrent-allocator.h
@@ -63,6 +63,12 @@ class ConcurrentAllocator {
V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
+ bool IsBlackAllocationEnabled() const;
+
+ // Returns the Heap of space_. This might differ from the LocalHeap's Heap for
+ // shared spaces.
+ Heap* owning_heap() const;
+
LocalHeap* const local_heap_;
PagedSpace* const space_;
LocalAllocationBuffer lab_;
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index fc19fe3f0d..f806c4eca6 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -23,7 +23,7 @@
#include "src/heap/memory-measurement.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
-#include "src/heap/worklist.h"
+#include "src/heap/weak-object-worklists.h"
#include "src/init/v8.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
@@ -41,8 +41,9 @@ namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
- explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
- : memory_chunk_data_(memory_chunk_data) {}
+ ConcurrentMarkingState(PtrComprCageBase cage_base,
+ MemoryChunkDataMap* memory_chunk_data)
+ : MarkingStateBase(cage_base), memory_chunk_data_(memory_chunk_data) {}
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
@@ -85,17 +86,17 @@ class ConcurrentMarkingVisitor final
public:
ConcurrentMarkingVisitor(int task_id,
MarkingWorklists::Local* local_marking_worklists,
- WeakObjects* weak_objects, Heap* heap,
+ WeakObjects::Local* local_weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled,
bool should_keep_ages_unchanged,
MemoryChunkDataMap* memory_chunk_data)
- : MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
+ : MarkingVisitorBase(local_marking_worklists, local_weak_objects, heap,
mark_compact_epoch, code_flush_mode,
embedder_tracing_enabled,
should_keep_ages_unchanged),
- marking_state_(memory_chunk_data),
+ marking_state_(heap->isolate(), memory_chunk_data),
memory_chunk_data_(memory_chunk_data) {}
template <typename T>
@@ -119,12 +120,19 @@ class ConcurrentMarkingVisitor final
int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
return VisitJSObjectSubclass(map, object);
}
+ int VisitWasmSuspenderObject(Map map, WasmSuspenderObject object) {
+ return VisitJSObjectSubclass(map, object);
+ }
#endif // V8_ENABLE_WEBASSEMBLY
int VisitJSWeakCollection(Map map, JSWeakCollection object) {
return VisitJSObjectSubclass(map, object);
}
+ int VisitJSFinalizationRegistry(Map map, JSFinalizationRegistry object) {
+ return VisitJSObjectSubclass(map, object);
+ }
+
int VisitConsString(Map map, ConsString object) {
return VisitFullyWithSnapshot(map, object);
}
@@ -159,7 +167,7 @@ class ConcurrentMarkingVisitor final
}
} else if (marking_state_.IsWhite(value)) {
- weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
+ local_weak_objects_->next_ephemerons_local.Push(Ephemeron{key, value});
}
return false;
}
@@ -169,6 +177,10 @@ class ConcurrentMarkingVisitor final
return marking_state_.GreyToBlack(object);
}
+ bool ShouldVisitUnaccounted(HeapObject object) {
+ return marking_state_.GreyToBlackUnaccounted(object);
+ }
+
private:
// Helper class for collecting in-object slot addresses and values.
class SlotSnapshottingVisitor final : public ObjectVisitorWithCageBases {
@@ -204,19 +216,21 @@ class ConcurrentMarkingVisitor final
void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
// This should never happen, because snapshotting is performed only on
- // JSObjects (and derived classes).
+ // some String subclasses.
UNREACHABLE();
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
// This should never happen, because snapshotting is performed only on
- // JSObjects (and derived classes).
+ // some String subclasses.
UNREACHABLE();
}
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
- DCHECK(host.IsWeakCell() || host.IsJSWeakRef());
+ // This should never happen, because snapshotting is performed only on
+ // some String subclasses.
+ UNREACHABLE();
}
private:
@@ -248,11 +262,15 @@ class ConcurrentMarkingVisitor final
// The length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
Object length = object.unchecked_length(kAcquireLoad);
- if (!ShouldVisit(object)) return 0;
+ // No accounting here to avoid re-reading the length which could already
+ // contain a non-SMI value when left-trimming happens concurrently.
+ if (!ShouldVisitUnaccounted(object)) return 0;
// The cached length must be the actual length as the array is not black.
// Left trimming marks the array black before over-writing the length.
DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
+ marking_state_.IncrementLiveBytes(MemoryChunk::FromHeapObject(object),
+ size);
VisitMapPointer(object);
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
@@ -419,10 +437,6 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
: heap_(heap),
marking_worklists_(marking_worklists),
weak_objects_(weak_objects) {
-#ifndef V8_ATOMIC_MARKING_STATE
- // Concurrent and parallel marking require atomic marking state.
- CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
-#endif
#ifndef V8_ATOMIC_OBJECT_FIELD_WRITES
// Concurrent marking requires atomic object field writes.
CHECK(!FLAG_concurrent_marking);
@@ -438,8 +452,9 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
uint8_t task_id = delegate->GetTaskId() + 1;
TaskState* task_state = &task_state_[task_id];
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
+ WeakObjects::Local local_weak_objects(weak_objects_);
ConcurrentMarkingVisitor visitor(
- task_id, &local_marking_worklists, weak_objects_, heap_,
+ task_id, &local_marking_worklists, &local_weak_objects, heap_,
mark_compact_epoch, code_flush_mode,
heap_->local_embedder_heap_tracer()->InUse(), should_keep_ages_unchanged,
&task_state->memory_chunk_data);
@@ -460,8 +475,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
{
Ephemeron ephemeron;
-
- while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
+ while (local_weak_objects.current_ephemerons_local.Pop(&ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
@@ -529,8 +543,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
if (done) {
Ephemeron ephemeron;
-
- while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
+ while (local_weak_objects.discovered_ephemerons_local.Pop(&ephemeron)) {
if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
@@ -538,18 +551,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
}
local_marking_worklists.Publish();
- weak_objects_->transition_arrays.FlushToGlobal(task_id);
- weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
- weak_objects_->current_ephemerons.FlushToGlobal(task_id);
- weak_objects_->next_ephemerons.FlushToGlobal(task_id);
- weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
- weak_objects_->weak_references.FlushToGlobal(task_id);
- weak_objects_->js_weak_refs.FlushToGlobal(task_id);
- weak_objects_->weak_cells.FlushToGlobal(task_id);
- weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
- weak_objects_->code_flushing_candidates.FlushToGlobal(task_id);
- weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
- weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
+ local_weak_objects.Publish();
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
@@ -570,10 +572,10 @@ size_t ConcurrentMarking::GetMaxConcurrency(size_t worker_count) {
marking_items += worklist.worklist->Size();
return std::min<size_t>(
kMaxTasks,
- worker_count + std::max<size_t>(
- {marking_items,
- weak_objects_->discovered_ephemerons.GlobalPoolSize(),
- weak_objects_->current_ephemerons.GlobalPoolSize()}));
+ worker_count +
+ std::max<size_t>({marking_items,
+ weak_objects_->discovered_ephemerons.Size(),
+ weak_objects_->current_ephemerons.Size()}));
}
void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
@@ -594,8 +596,8 @@ void ConcurrentMarking::RescheduleJobIfNeeded(TaskPriority priority) {
if (heap_->IsTearingDown()) return;
if (marking_worklists_->shared()->IsEmpty() &&
- weak_objects_->current_ephemerons.IsGlobalPoolEmpty() &&
- weak_objects_->discovered_ephemerons.IsGlobalPoolEmpty()) {
+ weak_objects_->current_ephemerons.IsEmpty() &&
+ weak_objects_->discovered_ephemerons.IsEmpty()) {
return;
}
if (!job_handle_ || !job_handle_->IsValid()) {
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 12ee70da56..caba9450b5 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -16,7 +16,6 @@
#include "src/heap/memory-measurement.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
-#include "src/heap/worklist.h"
#include "src/init/v8.h"
#include "src/tasks/cancelable-task.h"
#include "src/utils/allocation.h"
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 6a7173a478..15737881ef 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -10,6 +10,7 @@
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
+#include "include/v8-isolate.h"
#include "include/v8-local-handle.h"
#include "include/v8-platform.h"
#include "src/base/logging.h"
@@ -18,6 +19,7 @@
#include "src/base/platform/time.h"
#include "src/execution/isolate-inl.h"
#include "src/flags/flags.h"
+#include "src/handles/global-handles.h"
#include "src/handles/handles.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc-js/cpp-snapshot.h"
@@ -220,10 +222,8 @@ void UnifiedHeapMarker::AddObject(void* object) {
void FatalOutOfMemoryHandlerImpl(const std::string& reason,
const SourceLocation&, HeapBase* heap) {
- FatalProcessOutOfMemory(
- reinterpret_cast<v8::internal::Isolate*>(
- static_cast<v8::internal::CppHeap*>(heap)->isolate()),
- reason.c_str());
+ FatalProcessOutOfMemory(static_cast<v8::internal::CppHeap*>(heap)->isolate(),
+ reason.c_str());
}
} // namespace
@@ -330,7 +330,9 @@ CppHeap::CppHeap(
: cppgc::internal::HeapBase(
std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
cppgc::internal::HeapBase::StackSupport::
- kSupportsConservativeStackScan),
+ kSupportsConservativeStackScan,
+ cppgc::internal::HeapBase::MarkingType::kIncrementalAndConcurrent,
+ cppgc::internal::HeapBase::SweepingType::kIncrementalAndConcurrent),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);
@@ -363,11 +365,8 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
isolate_->heap_profiler()->AddBuildEmbedderGraphCallback(
&CppGraphBuilder::Run, this);
}
- isolate_->heap()->SetEmbedderHeapTracer(this);
- isolate_->heap()->local_embedder_heap_tracer()->SetWrapperDescriptor(
- wrapper_descriptor_);
SetMetricRecorder(std::make_unique<MetricRecorderAdapter>(*this));
- SetStackStart(base::Stack::GetStackStart());
+ isolate_->global_handles()->SetStackStart(base::Stack::GetStackStart());
oom_handler().SetCustomHandler(&FatalOutOfMemoryHandlerImpl);
no_gc_scope_--;
}
@@ -379,17 +378,20 @@ void CppHeap::DetachIsolate() {
// Delegate to existing EmbedderHeapTracer API to finish any ongoing garbage
// collection.
- FinalizeTracing();
+ if (isolate_->heap()->incremental_marking()->IsMarking()) {
+ isolate_->heap()->FinalizeIncrementalMarkingAtomically(
+ i::GarbageCollectionReason::kExternalFinalize);
+ }
sweeper_.FinishIfRunning();
- if (isolate_->heap_profiler()) {
- isolate_->heap_profiler()->RemoveBuildEmbedderGraphCallback(
- &CppGraphBuilder::Run, this);
+ auto* heap_profiler = isolate_->heap_profiler();
+ if (heap_profiler) {
+ heap_profiler->RemoveBuildEmbedderGraphCallback(&CppGraphBuilder::Run,
+ this);
}
SetMetricRecorder(nullptr);
isolate_ = nullptr;
// Any future garbage collections will ignore the V8->C++ references.
- isolate()->SetEmbedderHeapTracer(nullptr);
oom_handler().SetCustomHandler(nullptr);
// Enter no GC scope.
no_gc_scope_++;
@@ -408,36 +410,42 @@ void CppHeap::RegisterV8References(
namespace {
-bool ShouldReduceMemory(CppHeap::TraceFlags flags) {
- return (flags == CppHeap::TraceFlags::kReduceMemory) ||
- (flags == CppHeap::TraceFlags::kForced);
+bool IsMemoryReducingGC(CppHeap::GarbageCollectionFlags flags) {
+ return flags & CppHeap::GarbageCollectionFlagValues::kReduceMemory;
}
-} // namespace
+bool IsForceGC(CppHeap::GarbageCollectionFlags flags) {
+ return flags & CppHeap::GarbageCollectionFlagValues::kForced;
+}
-void CppHeap::TracePrologue(TraceFlags flags) {
+bool ShouldReduceMemory(CppHeap::GarbageCollectionFlags flags) {
+ return IsMemoryReducingGC(flags) || IsForceGC(flags);
+}
+
+} // namespace
+void CppHeap::TracePrologue(GarbageCollectionFlags gc_flags) {
CHECK(!sweeper_.IsSweepingInProgress());
#if defined(CPPGC_YOUNG_GENERATION)
cppgc::internal::SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
- current_flags_ = flags;
+ current_gc_flags_ = gc_flags;
+
const UnifiedHeapMarker::MarkingConfig marking_config{
UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
cppgc::Heap::StackState::kNoHeapPointers,
- ((current_flags_ & TraceFlags::kForced) &&
- !force_incremental_marking_for_testing_)
+ (IsForceGC(current_gc_flags_) && !force_incremental_marking_for_testing_)
? UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic
: UnifiedHeapMarker::MarkingConfig::MarkingType::
kIncrementalAndConcurrent,
- flags & TraceFlags::kForced
+ IsForceGC(current_gc_flags_)
? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
: UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
DCHECK_IMPLIES(!isolate_, (cppgc::Heap::MarkingType::kAtomic ==
marking_config.marking_type) ||
force_incremental_marking_for_testing_);
- if (ShouldReduceMemory(flags)) {
+ if (ShouldReduceMemory(current_gc_flags_)) {
// Only enable compaction when in a memory reduction garbage collection as
// it may significantly increase the final garbage collection pause.
compactor_.InitializeIfShouldCompact(marking_config.marking_type,
@@ -450,7 +458,7 @@ void CppHeap::TracePrologue(TraceFlags flags) {
marking_done_ = false;
}
-bool CppHeap::AdvanceTracing(double deadline_in_ms) {
+bool CppHeap::AdvanceTracing(double max_duration) {
is_in_v8_marking_step_ = true;
cppgc::internal::StatsCollector::EnabledScope stats_scope(
stats_collector(),
@@ -458,7 +466,7 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
: cppgc::internal::StatsCollector::kIncrementalMark);
const v8::base::TimeDelta deadline =
in_atomic_pause_ ? v8::base::TimeDelta::Max()
- : v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms);
+ : v8::base::TimeDelta::FromMillisecondsD(max_duration);
const size_t marked_bytes_limit = in_atomic_pause_ ? SIZE_MAX : 0;
DCHECK_NOT_NULL(marker_);
// TODO(chromium:1056170): Replace when unified heap transitions to
@@ -472,20 +480,18 @@ bool CppHeap::AdvanceTracing(double deadline_in_ms) {
bool CppHeap::IsTracingDone() { return marking_done_; }
-void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
+void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
in_atomic_pause_ = true;
if (override_stack_state_) {
stack_state = *override_stack_state_;
}
marker_->EnterAtomicPause(stack_state);
- if (compactor_.CancelIfShouldNotCompact(cppgc::Heap::MarkingType::kAtomic,
- stack_state)) {
- marker_->NotifyCompactionCancelled();
- }
+ compactor_.CancelIfShouldNotCompact(cppgc::Heap::MarkingType::kAtomic,
+ stack_state);
}
-void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
+void CppHeap::TraceEpilogue() {
CHECK(in_atomic_pause_);
CHECK(marking_done_);
{
@@ -523,12 +529,12 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
// In case the GC was forced, also finalize sweeping right away.
- current_flags_ & TraceFlags::kForced
+ IsForceGC(current_gc_flags_)
? cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic
: cppgc::internal::Sweeper::SweepingConfig::SweepingType::
kIncrementalAndConcurrent,
compactable_space_handling,
- ShouldReduceMemory(current_flags_)
+ ShouldReduceMemory(current_gc_flags_)
? cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
kDiscardWherePossible
: cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
@@ -539,9 +545,6 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
sweeping_config.sweeping_type);
sweeper().Start(sweeping_config);
}
- DCHECK_NOT_NULL(trace_summary);
- trace_summary->allocated_size = SIZE_MAX;
- trace_summary->time = 0;
in_atomic_pause_ = false;
sweeper().NotifyDoneIfNeeded();
}
@@ -561,7 +564,7 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
// finalizations where not allowed.
// - Recursive sweeping.
// - GC forbidden scope.
- if (sweeper().IsSweepingOnMutatorThread() || in_no_gc_scope()) {
+ if (sweeper().IsSweepingOnMutatorThread() || in_no_gc_scope() || !isolate_) {
return;
}
@@ -571,10 +574,12 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
const int64_t bytes_to_report = buffered_allocated_bytes_;
buffered_allocated_bytes_ = 0;
+ auto* const tracer = isolate_->heap()->local_embedder_heap_tracer();
+ DCHECK_NOT_NULL(tracer);
if (bytes_to_report < 0) {
- DecreaseAllocatedSize(static_cast<size_t>(-bytes_to_report));
+ tracer->DecreaseAllocatedSize(static_cast<size_t>(-bytes_to_report));
} else {
- IncreaseAllocatedSize(static_cast<size_t>(bytes_to_report));
+ tracer->IncreaseAllocatedSize(static_cast<size_t>(bytes_to_report));
}
}
@@ -588,17 +593,16 @@ void CppHeap::CollectGarbageForTesting(
SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
if (isolate_) {
- // Go through EmbedderHeapTracer API and perform a unified heap collection.
- GarbageCollectionForTesting(stack_state);
+ reinterpret_cast<v8::Isolate*>(isolate_)
+ ->RequestGarbageCollectionForTesting(
+ v8::Isolate::kFullGarbageCollection, stack_state);
} else {
// Perform an atomic GC, with starting incremental/concurrent marking and
// immediately finalizing the garbage collection.
- if (!IsMarking()) TracePrologue(TraceFlags::kForced);
+ if (!IsMarking()) TracePrologue(GarbageCollectionFlagValues::kForced);
EnterFinalPause(stack_state);
AdvanceTracing(std::numeric_limits<double>::infinity());
- TraceSummary trace_summary;
- TraceEpilogue(&trace_summary);
- DCHECK_EQ(SIZE_MAX, trace_summary.allocated_size);
+ TraceEpilogue();
}
}
@@ -616,12 +620,12 @@ void CppHeap::StartIncrementalGarbageCollectionForTesting() {
DCHECK_NULL(isolate_);
if (IsMarking()) return;
force_incremental_marking_for_testing_ = true;
- TracePrologue(TraceFlags::kForced);
+ TracePrologue(GarbageCollectionFlagValues::kForced);
force_incremental_marking_for_testing_ = false;
}
void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
- EmbedderStackState stack_state) {
+ cppgc::EmbedderStackState stack_state) {
DCHECK(!in_no_gc_scope());
DCHECK_NULL(isolate_);
DCHECK(IsMarking());
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index a2d11bcd39..3f9e8d9ec7 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -12,8 +12,8 @@ static_assert(
#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
-#include "include/v8-embedder-heap.h"
#include "include/v8-metrics.h"
+#include "src/base/flags.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -29,9 +29,16 @@ namespace internal {
class V8_EXPORT_PRIVATE CppHeap final
: public cppgc::internal::HeapBase,
public v8::CppHeap,
- public v8::EmbedderHeapTracer,
public cppgc::internal::StatsCollector::AllocationObserver {
public:
+ enum GarbageCollectionFlagValues : uint8_t {
+ kNoFlags = 0,
+ kReduceMemory = 1 << 1,
+ kForced = 1 << 2,
+ };
+
+ using GarbageCollectionFlags = base::Flags<GarbageCollectionFlagValues>;
+
class MetricRecorderAdapter final : public cppgc::internal::MetricRecorder {
public:
static constexpr int kMaxBatchedEvents = 16;
@@ -106,14 +113,13 @@ class V8_EXPORT_PRIVATE CppHeap final
void FinishSweepingIfRunning();
- // v8::EmbedderHeapTracer interface.
void RegisterV8References(
- const std::vector<std::pair<void*, void*> >& embedder_fields) final;
- void TracePrologue(TraceFlags flags) final;
- bool AdvanceTracing(double deadline_in_ms) final;
- bool IsTracingDone() final;
- void TraceEpilogue(TraceSummary* trace_summary) final;
- void EnterFinalPause(EmbedderStackState stack_state) final;
+ const std::vector<std::pair<void*, void*>>& embedder_fields);
+ void TracePrologue(GarbageCollectionFlags);
+ bool AdvanceTracing(double max_duration);
+ bool IsTracingDone();
+ void TraceEpilogue();
+ void EnterFinalPause(cppgc::EmbedderStackState stack_state);
// StatsCollector::AllocationObserver interface.
void AllocatedObjectSizeIncreased(size_t) final;
@@ -122,6 +128,12 @@ class V8_EXPORT_PRIVATE CppHeap final
MetricRecorderAdapter* GetMetricRecorder() const;
+ v8::WrapperDescriptor wrapper_descriptor() const {
+ return wrapper_descriptor_;
+ }
+
+ Isolate* isolate() const { return isolate_; }
+
private:
void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) final {
@@ -132,11 +144,12 @@ class V8_EXPORT_PRIVATE CppHeap final
void ReportBufferedAllocationSizeIfPossible();
void StartIncrementalGarbageCollectionForTesting() final;
- void FinalizeIncrementalGarbageCollectionForTesting(EmbedderStackState) final;
+ void FinalizeIncrementalGarbageCollectionForTesting(
+ cppgc::EmbedderStackState) final;
Isolate* isolate_ = nullptr;
bool marking_done_ = false;
- TraceFlags current_flags_ = TraceFlags::kNoFlags;
+ GarbageCollectionFlags current_gc_flags_;
// Buffered allocated bytes. Reporting allocated bytes to V8 can trigger a GC
// atomic pause. Allocated bytes are buffer in case this is temporarily
@@ -153,6 +166,8 @@ class V8_EXPORT_PRIVATE CppHeap final
friend class MetricRecorderAdapter;
};
+DEFINE_OPERATORS_FOR_FLAGS(CppHeap::GarbageCollectionFlags)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index 9b20b5c0a7..e1065376ea 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -463,7 +463,8 @@ class CppGraphBuilderImpl final {
void AddEdge(State& parent, const TracedReferenceBase& ref,
const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
- v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ v8::Local<v8::Value> v8_value =
+ ref.Get(reinterpret_cast<v8::Isolate*>(cpp_heap_.isolate()));
if (!v8_value.IsEmpty()) {
if (!parent.get_node()) {
parent.set_node(AddNode(*parent.header()));
@@ -836,7 +837,8 @@ void CppGraphBuilderImpl::VisitWeakContainerForVisibility(
void CppGraphBuilderImpl::VisitForVisibility(State& parent,
const TracedReferenceBase& ref) {
- v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
+ v8::Local<v8::Value> v8_value =
+ ref.Get(reinterpret_cast<v8::Isolate*>(cpp_heap_.isolate()));
if (!v8_value.IsEmpty()) {
parent.MarkVisible();
}
diff --git a/deps/v8/src/heap/cppgc/caged-heap.h b/deps/v8/src/heap/cppgc/caged-heap.h
index 89b2f7f112..1d20bbcff3 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.h
+++ b/deps/v8/src/heap/cppgc/caged-heap.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
#define V8_HEAP_CPPGC_CAGED_HEAP_H_
+#include <limits>
#include <memory>
#include "include/cppgc/platform.h"
@@ -22,7 +23,11 @@ class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
- static uintptr_t OffsetFromAddress(const void* address) {
+ template <typename RetType = uintptr_t>
+ static RetType OffsetFromAddress(const void* address) {
+ static_assert(
+ std::numeric_limits<RetType>::max() >= (kCagedHeapReservationSize - 1),
+ "The return type should be large enough");
return reinterpret_cast<uintptr_t>(address) &
(kCagedHeapReservationAlignment - 1);
}
@@ -52,6 +57,8 @@ class CagedHeap final {
reserved_area_.address();
}
+ void* base() const { return reserved_area_.address(); }
+
private:
const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index 91f30445a3..c300793515 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -474,6 +474,7 @@ void Compactor::InitializeIfShouldCompact(
compaction_worklists_ = std::make_unique<CompactionWorklists>();
is_enabled_ = true;
+ is_cancelled_ = false;
}
bool Compactor::CancelIfShouldNotCompact(
@@ -481,15 +482,16 @@ bool Compactor::CancelIfShouldNotCompact(
GarbageCollector::Config::StackState stack_state) {
if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return false;
- DCHECK_NOT_NULL(compaction_worklists_);
- compaction_worklists_->movable_slots_worklist()->Clear();
- compaction_worklists_.reset();
-
+ is_cancelled_ = true;
is_enabled_ = false;
return true;
}
Compactor::CompactableSpaceHandling Compactor::CompactSpacesIfEnabled() {
+ if (is_cancelled_ && compaction_worklists_) {
+ compaction_worklists_->movable_slots_worklist()->Clear();
+ compaction_worklists_.reset();
+ }
if (!is_enabled_) return CompactableSpaceHandling::kSweep;
StatsCollector::EnabledScope stats_scope(heap_.heap()->stats_collector(),
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
index 46a8e1ef53..82ce5291bb 100644
--- a/deps/v8/src/heap/cppgc/compactor.h
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -48,6 +48,7 @@ class V8_EXPORT_PRIVATE Compactor final {
std::unique_ptr<CompactionWorklists> compaction_worklists_;
bool is_enabled_ = false;
+ bool is_cancelled_ = false;
bool enable_for_next_gc_for_testing_ = false;
};
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index d2657ca417..a5c89b6218 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -36,7 +36,7 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
private:
static size_t ObjectSize(const HeapObjectHeader& header) {
- return ObjectView(header).Size();
+ return ObjectView<>(header).Size();
}
bool VisitHeapObjectHeader(HeapObjectHeader& header) {
@@ -53,7 +53,8 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
HeapBase::HeapBase(
std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support)
+ StackSupport stack_support, MarkingType marking_support,
+ SweepingType sweeping_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
@@ -81,7 +82,9 @@ HeapBase::HeapBase(
weak_persistent_region_(*oom_handler_.get()),
strong_cross_thread_persistent_region_(*oom_handler_.get()),
weak_cross_thread_persistent_region_(*oom_handler_.get()),
- stack_support_(stack_support) {
+ stack_support_(stack_support),
+ marking_support_(marking_support),
+ sweeping_support_(sweeping_support) {
stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_);
}
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index b0848dc7b7..041f4cf3bd 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -62,10 +62,6 @@ class V8_EXPORT HeapHandle {
namespace internal {
-namespace testing {
-class TestWithHeap;
-} // namespace testing
-
class FatalOutOfMemoryHandler;
class PageBackend;
class PreFinalizerHandler;
@@ -75,6 +71,8 @@ class StatsCollector;
class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
public:
using StackSupport = cppgc::Heap::StackSupport;
+ using MarkingType = cppgc::Heap::MarkingType;
+ using SweepingType = cppgc::Heap::SweepingType;
static HeapBase& From(cppgc::HeapHandle& heap_handle) {
return static_cast<HeapBase&>(heap_handle);
@@ -85,7 +83,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
HeapBase(std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
- StackSupport stack_support);
+ StackSupport stack_support, MarkingType marking_support,
+ SweepingType sweeping_support);
virtual ~HeapBase();
HeapBase(const HeapBase&) = delete;
@@ -125,6 +124,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
}
MarkerBase* marker() const { return marker_.get(); }
+ std::unique_ptr<MarkerBase>& GetMarkerRefForTesting() { return marker_; }
Compactor& compactor() { return compactor_; }
@@ -206,6 +206,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int GetCreationThreadId() const { return creation_thread_id_; }
+ MarkingType marking_support() const { return marking_support_; }
+
protected:
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
@@ -276,8 +278,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
+ const MarkingType marking_support_;
+ const SweepingType sweeping_support_;
+
friend class MarkerBase::IncrementalMarkingTask;
- friend class testing::TestWithHeap;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
friend class cppgc::subtle::NoGarbageCollectionScope;
friend class cppgc::testing::Heap;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index f1d67df8b5..e5a428a5a9 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -19,6 +19,10 @@
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/globals.h"
+#if defined(CPPGC_CAGED_HEAP)
+#include "src/heap/cppgc/caged-heap.h"
+#endif // defined(CPPGC_CAGED_HEAP)
+
namespace cppgc {
class Visitor;
@@ -102,6 +106,11 @@ class HeapObjectHeader {
inline bool IsFinalizable() const;
void Finalize();
+#if defined(CPPGC_CAGED_HEAP)
+ inline void SetNextUnfinalized(HeapObjectHeader* next);
+ inline HeapObjectHeader* GetNextUnfinalized(uintptr_t cage_base) const;
+#endif // defined(CPPGC_CAGED_HEAP)
+
V8_EXPORT_PRIVATE HeapObjectName GetName() const;
template <AccessMode = AccessMode::kNonAtomic>
@@ -140,7 +149,13 @@ class HeapObjectHeader {
inline void StoreEncoded(uint16_t bits, uint16_t mask);
#if defined(V8_TARGET_ARCH_64_BIT)
+ // If cage is enabled, to save on space required by sweeper metadata, we store
+ // the list of to-be-finalized objects inlined in HeapObjectHeader.
+#if defined(CPPGC_CAGED_HEAP)
+ uint32_t next_unfinalized_ = 0;
+#else // !defined(CPPGC_CAGED_HEAP)
uint32_t padding_ = 0;
+#endif // !defined(CPPGC_CAGED_HEAP)
#endif // defined(V8_TARGET_ARCH_64_BIT)
uint16_t encoded_high_;
uint16_t encoded_low_;
@@ -163,9 +178,9 @@ const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) {
}
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
-#if defined(V8_TARGET_ARCH_64_BIT)
+#if defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
USE(padding_);
-#endif // defined(V8_TARGET_ARCH_64_BIT)
+#endif // defined(V8_TARGET_ARCH_64_BIT) && !defined(CPPGC_CAGED_HEAP)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
@@ -288,6 +303,22 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize;
}
+#if defined(CPPGC_CAGED_HEAP)
+void HeapObjectHeader::SetNextUnfinalized(HeapObjectHeader* next) {
+ next_unfinalized_ = CagedHeap::OffsetFromAddress<uint32_t>(next);
+}
+
+HeapObjectHeader* HeapObjectHeader::GetNextUnfinalized(
+ uintptr_t cage_base) const {
+ DCHECK(cage_base);
+ DCHECK_EQ(0u,
+ CagedHeap::OffsetFromAddress(reinterpret_cast<void*>(cage_base)));
+ return next_unfinalized_ ? reinterpret_cast<HeapObjectHeader*>(
+ cage_base + next_unfinalized_)
+ : nullptr;
+}
+#endif // defined(CPPGC_CAGED_HEAP)
+
template <AccessMode mode>
void HeapObjectHeader::Trace(Visitor* visitor) const {
const GCInfo& gc_info =
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 567f152f94..c7af4e971e 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -8,6 +8,7 @@
#include "include/cppgc/internal/api-constants.h"
#include "src/base/logging.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-space.h"
@@ -239,8 +240,14 @@ void LargePage::Destroy(LargePage* page) {
DCHECK(page);
#if DEBUG
const BaseSpace& space = page->space();
- DCHECK_EQ(space.end(), std::find(space.begin(), space.end(), page));
-#endif
+ {
+ // Destroy() happens on the mutator but another concurrent sweeper task may
+ // add add a live object using `BaseSpace::AddPage()` while iterating the
+ // pages.
+ v8::base::LockGuard<v8::base::Mutex> guard(&space.pages_mutex());
+ DCHECK_EQ(space.end(), std::find(space.begin(), space.end(), page));
+ }
+#endif // DEBUG
page->~LargePage();
PageBackend* backend = page->heap().page_backend();
page->heap().stats_collector()->NotifyFreedMemory(
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
index 0c640e653f..18fe7ba225 100644
--- a/deps/v8/src/heap/cppgc/heap-space.h
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -46,6 +46,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
void AddPage(BasePage*);
void RemovePage(BasePage*);
Pages RemoveAllPages();
+ v8::base::Mutex& pages_mutex() const { return pages_mutex_; }
bool is_compactable() const { return is_compactable_; }
@@ -57,7 +58,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
private:
RawHeap* heap_;
Pages pages_;
- v8::base::Mutex pages_mutex_;
+ mutable v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
const bool is_compactable_;
diff --git a/deps/v8/src/heap/cppgc/heap-state.cc b/deps/v8/src/heap/cppgc/heap-state.cc
index 364f03c643..756c19aa8f 100644
--- a/deps/v8/src/heap/cppgc/heap-state.cc
+++ b/deps/v8/src/heap/cppgc/heap-state.cc
@@ -22,6 +22,13 @@ bool HeapState::IsSweeping(const HeapHandle& heap_handle) {
}
// static
+bool HeapState::IsSweepingOnOwningThread(const HeapHandle& heap_handle) {
+ return internal::HeapBase::From(heap_handle)
+ .sweeper()
+ .IsSweepingOnMutatorThread();
+}
+
+// static
bool HeapState::IsInAtomicPause(const HeapHandle& heap_handle) {
return internal::HeapBase::From(heap_handle).in_atomic_pause();
}
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index a453e847e3..26500a9ca8 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -62,8 +62,8 @@ namespace internal {
namespace {
-void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
- Heap::SweepingType sweeping_support) {
+void CheckConfig(Heap::Config config, HeapBase::MarkingType marking_support,
+ HeapBase::SweepingType sweeping_support) {
CHECK_WITH_MSG(
(config.collection_type != Heap::Config::CollectionType::kMinor) ||
(config.stack_state == Heap::Config::StackState::kNoHeapPointers),
@@ -78,23 +78,29 @@ void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
- : HeapBase(platform, options.custom_spaces, options.stack_support),
+ : HeapBase(platform, options.custom_spaces, options.stack_support,
+ options.marking_support, options.sweeping_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints, options.marking_support,
- options.sweeping_support),
- marking_support_(options.marking_support),
- sweeping_support_(options.sweeping_support) {
- CHECK_IMPLIES(options.marking_support != MarkingType::kAtomic,
+ options.sweeping_support) {
+ CHECK_IMPLIES(options.marking_support != HeapBase::MarkingType::kAtomic,
platform_->GetForegroundTaskRunner());
- CHECK_IMPLIES(options.sweeping_support != SweepingType::kAtomic,
+ CHECK_IMPLIES(options.sweeping_support != HeapBase::SweepingType::kAtomic,
platform_->GetForegroundTaskRunner());
}
Heap::~Heap() {
- subtle::NoGarbageCollectionScope no_gc(*this);
- // Finish already running GC if any, but don't finalize live objects.
- sweeper_.FinishIfRunning();
+ // Gracefully finish already running GC if any, but don't finalize live
+ // objects.
+ FinalizeIncrementalGarbageCollectionIfRunning(
+ {Config::CollectionType::kMajor,
+ Config::StackState::kMayContainHeapPointers,
+ Config::MarkingType::kAtomic, Config::SweepingType::kAtomic});
+ {
+ subtle::NoGarbageCollectionScope no_gc(*this);
+ sweeper_.FinishIfRunning();
+ }
}
void Heap::CollectGarbage(Config config) {
@@ -114,7 +120,7 @@ void Heap::CollectGarbage(Config config) {
void Heap::StartIncrementalGarbageCollection(Config config) {
DCHECK_NE(Config::MarkingType::kAtomic, config.marking_type);
- DCHECK_NE(marking_support_, MarkingType::kAtomic);
+ DCHECK_NE(marking_support_, Config::MarkingType::kAtomic);
CheckConfig(config, marking_support_, sweeping_support_);
if (IsMarking() || in_no_gc_scope()) return;
@@ -125,7 +131,6 @@ void Heap::StartIncrementalGarbageCollection(Config config) {
}
void Heap::FinalizeIncrementalGarbageCollectionIfRunning(Config config) {
- DCHECK_NE(marking_support_, MarkingType::kAtomic);
CheckConfig(config, marking_support_, sweeping_support_);
if (!IsMarking()) return;
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index b57e40b13b..c3504073bc 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -53,9 +53,6 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
GCInvoker gc_invoker_;
HeapGrowing growing_;
- const MarkingType marking_support_;
- const SweepingType sweeping_support_;
-
size_t epoch_ = 0;
};
diff --git a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
index 0eae47e59d..ce7d1aadc3 100644
--- a/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
+++ b/deps/v8/src/heap/cppgc/incremental-marking-schedule.cc
@@ -30,7 +30,6 @@ void IncrementalMarkingSchedule::UpdateMutatorThreadMarkedBytes(
void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
size_t marked_bytes) {
- DCHECK(!incremental_marking_start_time_.IsNull());
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
}
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 0410a4eaea..e792c4c844 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -240,6 +240,7 @@ void MarkerBase::StartMarking() {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
+ concurrent_marking_active_ = true;
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
@@ -255,8 +256,9 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::kMarkAtomicPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
- // Cancel remaining concurrent/incremental tasks.
- concurrent_marker_->Cancel();
+ // Cancel remaining incremental tasks. Concurrent marking jobs are left to
+ // run in parallel with the atomic pause until the mutator thread runs out
+ // of work.
incremental_marking_handle_.Cancel();
heap().stats_collector()->UnregisterObserver(
incremental_marking_allocation_observer_.get());
@@ -276,6 +278,17 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
MarkNotFullyConstructedObjects();
}
}
+ if (heap().marking_support() ==
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ // Start parallel marking.
+ mutator_marking_state_.Publish();
+ if (concurrent_marking_active_) {
+ concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
+ } else {
+ concurrent_marker_->Start();
+ concurrent_marking_active_ = true;
+ }
+ }
}
void MarkerBase::LeaveAtomicPause() {
@@ -414,6 +427,16 @@ void MarkerBase::AdvanceMarkingOnAllocation() {
}
}
+bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
+ if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
+ !concurrent_marking_active_)
+ return false;
+
+ concurrent_marker_->Cancel();
+ concurrent_marking_active_ = false;
+ return true;
+}
+
bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
size_t marked_bytes_limit) {
bool is_done = false;
@@ -433,6 +456,9 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
// adjustment.
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
+ if (is_done && CancelConcurrentMarkingIfNeeded()) {
+ is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
+ }
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}
@@ -592,13 +618,6 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
-void MarkerBase::NotifyCompactionCancelled() {
- // Compaction cannot be cancelled while concurrent marking is active.
- DCHECK_EQ(MarkingConfig::MarkingType::kAtomic, config_.marking_type);
- DCHECK_IMPLIES(concurrent_marker_, !concurrent_marker_->IsActive());
- mutator_marking_state_.NotifyCompactionCancelled();
-}
-
Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: MarkerBase(key, heap, platform, config),
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index f1aa37965a..1f76583177 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -133,8 +133,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
void WaitForConcurrentMarkingForTesting();
- void NotifyCompactionCancelled();
-
bool IsMarking() const { return is_marking_; }
protected:
@@ -173,6 +171,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
void AdvanceMarkingOnAllocation();
+ bool CancelConcurrentMarkingIfNeeded();
+
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
@@ -189,6 +189,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
+ bool concurrent_marking_active_ = false;
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 5f6f0aba37..513c781b96 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -110,12 +110,6 @@ class MarkingStateBase {
return movable_slots_worklist_.get();
}
- void NotifyCompactionCancelled() {
- DCHECK(IsCompactionEnabled());
- movable_slots_worklist_->Clear();
- movable_slots_worklist_.reset();
- }
-
bool DidDiscoverNewEphemeronPairs() const {
return discovered_new_ephemeron_pairs_;
}
@@ -415,15 +409,17 @@ void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
#if DEBUG
const HeapObjectHeader& header =
HeapObjectHeader::FromObject(desc.base_object_payload);
- DCHECK_IMPLIES(header.IsInConstruction(), header.IsMarked());
+ DCHECK_IMPLIES(header.IsInConstruction(),
+ header.IsMarked<AccessMode::kAtomic>());
#endif // DEBUG
weak_callback(LivenessBrokerFactory::Create(), parameter);
}
bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
- const bool result = weak_containers_worklist_.Contains(&header) &&
- !recently_retraced_weak_containers_.Contains(&header);
- DCHECK_IMPLIES(result, header.IsMarked());
+ const bool result =
+ weak_containers_worklist_.Contains<AccessMode::kAtomic>(&header) &&
+ !recently_retraced_weak_containers_.Contains(&header);
+ DCHECK_IMPLIES(result, header.IsMarked<AccessMode::kAtomic>());
DCHECK_IMPLIES(result, !header.IsInConstruction());
return result;
}
@@ -493,7 +489,7 @@ template <AccessMode mode>
void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
- DCHECK(header.IsMarked<mode>());
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
header.Trace<mode>(&visitor);
}
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 05a6e23df8..feb009708d 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -45,6 +45,13 @@ void MarkingVerifierBase::Run(
Heap::Config::StackState stack_state, uintptr_t stack_end,
v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
+// Avoid verifying the stack when running with TSAN as the TSAN runtime changes
+// stack contents when e.g. working with locks. Specifically, the marker uses
+// locks in slow path operations which results in stack changes throughout
+// marking. This means that the conservative iteration below may find more
+// objects then the regular marker. The difference is benign as the delta of
+// objects is not reachable from user code but it prevents verification.
+#if !defined(THREAD_SANITIZER)
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
heap_.stack()->IteratePointersUnsafe(this, stack_end);
@@ -58,6 +65,7 @@ void MarkingVerifierBase::Run(
in_construction_objects_heap_.find(header));
}
}
+#endif // !defined(THREAD_SANITIZER)
if (expected_marked_bytes && verifier_found_marked_bytes_are_exact_) {
CHECK_EQ(expected_marked_bytes.value(), verifier_found_marked_bytes_);
}
@@ -124,7 +132,7 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
}
verifier_found_marked_bytes_ +=
- ObjectView(header).Size() + sizeof(HeapObjectHeader);
+ ObjectView<>(header).Size() + sizeof(HeapObjectHeader);
verification_state_.SetCurrentParent(nullptr);
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index a740d33a84..f2dff286cd 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -4,6 +4,7 @@
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
@@ -54,7 +55,7 @@ ConservativeMarkingVisitor::ConservativeMarkingVisitor(
void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
- if (header.IsMarked()) {
+ if (header.IsMarked<AccessMode::kAtomic>()) {
if (marking_state_.IsMarkedWeakContainer(header))
marking_state_.ReTraceMarkedWeakContainer(visitor_, header);
return;
diff --git a/deps/v8/src/heap/cppgc/object-poisoner.h b/deps/v8/src/heap/cppgc/object-poisoner.h
index 3b738eaeb6..2bcb3caf94 100644
--- a/deps/v8/src/heap/cppgc/object-poisoner.h
+++ b/deps/v8/src/heap/cppgc/object-poisoner.h
@@ -24,7 +24,8 @@ class UnmarkedObjectsPoisoner : public HeapVisitor<UnmarkedObjectsPoisoner> {
bool VisitHeapObjectHeader(HeapObjectHeader& header) {
if (header.IsFree() || header.IsMarked()) return true;
- ASAN_POISON_MEMORY_REGION(header.ObjectStart(), ObjectView(header).Size());
+ ASAN_POISON_MEMORY_REGION(header.ObjectStart(),
+ ObjectView<>(header).Size());
return true;
}
};
diff --git a/deps/v8/src/heap/cppgc/object-size-trait.cc b/deps/v8/src/heap/cppgc/object-size-trait.cc
index 000b8eef9d..c1713f2402 100644
--- a/deps/v8/src/heap/cppgc/object-size-trait.cc
+++ b/deps/v8/src/heap/cppgc/object-size-trait.cc
@@ -14,8 +14,8 @@ namespace internal {
// static
size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollected(
const void* object) {
- return ObjectView(HeapObjectHeader::FromObject(object))
- .Size<AccessMode::kAtomic>();
+ return ObjectView<AccessMode::kAtomic>(HeapObjectHeader::FromObject(object))
+ .Size();
}
// static
@@ -26,7 +26,7 @@ size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollectedMixin(
const auto& header =
BasePage::FromPayload(address)
->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
- DCHECK(!header.IsLargeObject());
+ DCHECK(!header.IsLargeObject<AccessMode::kAtomic>());
return header.ObjectSize<AccessMode::kAtomic>();
}
diff --git a/deps/v8/src/heap/cppgc/object-view.h b/deps/v8/src/heap/cppgc/object-view.h
index 98b378c5ac..159ee901d1 100644
--- a/deps/v8/src/heap/cppgc/object-view.h
+++ b/deps/v8/src/heap/cppgc/object-view.h
@@ -15,13 +15,13 @@ namespace internal {
// ObjectView allows accessing a header within the bounds of the actual object.
// It is not exposed externally and does not keep the underlying object alive.
+template <AccessMode = AccessMode::kNonAtomic>
class ObjectView final {
public:
V8_INLINE explicit ObjectView(const HeapObjectHeader& header);
V8_INLINE Address Start() const;
V8_INLINE ConstAddress End() const;
- template <AccessMode = AccessMode::kNonAtomic>
V8_INLINE size_t Size() const;
private:
@@ -30,25 +30,30 @@ class ObjectView final {
const bool is_large_object_;
};
-ObjectView::ObjectView(const HeapObjectHeader& header)
+template <AccessMode access_mode>
+ObjectView<access_mode>::ObjectView(const HeapObjectHeader& header)
: header_(header),
base_page_(
BasePage::FromPayload(const_cast<HeapObjectHeader*>(&header_))),
- is_large_object_(header_.IsLargeObject()) {
+ is_large_object_(header_.IsLargeObject<access_mode>()) {
DCHECK_EQ(Start() + Size(), End());
}
-Address ObjectView::Start() const { return header_.ObjectStart(); }
+template <AccessMode access_mode>
+Address ObjectView<access_mode>::Start() const {
+ return header_.ObjectStart();
+}
-ConstAddress ObjectView::End() const {
+template <AccessMode access_mode>
+ConstAddress ObjectView<access_mode>::End() const {
return is_large_object_ ? LargePage::From(base_page_)->PayloadEnd()
: header_.ObjectEnd();
}
-template <AccessMode mode>
-size_t ObjectView::Size() const {
+template <AccessMode access_mode>
+size_t ObjectView<access_mode>::Size() const {
return is_large_object_ ? LargePage::From(base_page_)->ObjectSize()
- : header_.ObjectSize<mode>();
+ : header_.ObjectSize<access_mode>();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
index ac6ffb624a..4ff93958d7 100644
--- a/deps/v8/src/heap/cppgc/persistent-node.cc
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -63,10 +63,10 @@ size_t PersistentRegionBase::NodesInUse() const {
return nodes_in_use_;
}
-void PersistentRegionBase::EnsureNodeSlots() {
+void PersistentRegionBase::RefillFreeList() {
auto node_slots = std::make_unique<PersistentNodeSlots>();
if (!node_slots.get()) {
- oom_handler_("Oilpan: PersistentRegionBase::EnsureNodeSlots()");
+ oom_handler_("Oilpan: PersistentRegionBase::RefillFreeList()");
}
nodes_.push_back(std::move(node_slots));
for (auto& node : *nodes_.back()) {
@@ -75,6 +75,14 @@ void PersistentRegionBase::EnsureNodeSlots() {
}
}
+PersistentNode* PersistentRegionBase::RefillFreeListAndAllocateNode(
+ void* owner, TraceCallback trace) {
+ RefillFreeList();
+ auto* node = TryAllocateNodeFromFreeList(owner, trace);
+ CPPGC_DCHECK(node);
+ return node;
+}
+
void PersistentRegionBase::Trace(Visitor* visitor) {
free_list_head_ = nullptr;
for (auto& slots : nodes_) {
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index 9f641d6f4b..1a4c60e3a2 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -52,7 +52,9 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
}
void PreFinalizerHandler::InvokePreFinalizers() {
- StatsCollector::DisabledScope stats_scope(
+ StatsCollector::EnabledScope stats_scope(heap_.stats_collector(),
+ StatsCollector::kAtomicSweep);
+ StatsCollector::EnabledScope nested_stats_scope(
heap_.stats_collector(), StatsCollector::kSweepInvokePreFinalizers);
DCHECK(CurrentThreadIsCreationThread());
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 06f1ffcad0..b063b26f04 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -136,11 +136,15 @@ class ThreadSafeStack {
void Push(T t) {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
vector_.push_back(std::move(t));
+ is_empty_.store(false, std::memory_order_relaxed);
}
Optional<T> Pop() {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
- if (vector_.empty()) return v8::base::nullopt;
+ if (vector_.empty()) {
+ is_empty_.store(true, std::memory_order_relaxed);
+ return v8::base::nullopt;
+ }
T top = std::move(vector_.back());
vector_.pop_back();
// std::move is redundant but is needed to avoid the bug in gcc-7.
@@ -151,22 +155,28 @@ class ThreadSafeStack {
void Insert(It begin, It end) {
v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
vector_.insert(vector_.end(), begin, end);
+ is_empty_.store(false, std::memory_order_relaxed);
}
- bool IsEmpty() const {
- v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
- return vector_.empty();
- }
+ bool IsEmpty() const { return is_empty_.load(std::memory_order_relaxed); }
private:
std::vector<T> vector_;
mutable v8::base::Mutex mutex_;
+ std::atomic<bool> is_empty_{true};
};
struct SpaceState {
struct SweptPageState {
BasePage* page = nullptr;
+#if defined(CPPGC_CAGED_HEAP)
+ // The list of unfinalized objects may be extremely big. To save on space,
+ // if cage is enabled, the list of unfinalized objects is stored inlined in
+ // HeapObjectHeader.
+ HeapObjectHeader* unfinalized_objects_head = nullptr;
+#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects;
+#endif // !defined(CPPGC_CAGED_HEAP)
FreeList cached_free_list;
std::vector<FreeList::Block> unfinalized_free_list;
bool is_empty = false;
@@ -230,7 +240,18 @@ class DeferredFinalizationBuilder final : public FreeHandler {
void AddFinalizer(HeapObjectHeader* header, size_t size) {
if (header->IsFinalizable()) {
+#if defined(CPPGC_CAGED_HEAP)
+ if (!current_unfinalized_) {
+ DCHECK_NULL(result_.unfinalized_objects_head);
+ current_unfinalized_ = header;
+ result_.unfinalized_objects_head = header;
+ } else {
+ current_unfinalized_->SetNextUnfinalized(header);
+ current_unfinalized_ = header;
+ }
+#else // !defined(CPPGC_CAGED_HEAP)
result_.unfinalized_objects.push_back({header});
+#endif // !defined(CPPGC_CAGED_HEAP)
found_finalizer_ = true;
} else {
SetMemoryInaccessible(header, size);
@@ -254,6 +275,7 @@ class DeferredFinalizationBuilder final : public FreeHandler {
private:
ResultType result_;
+ HeapObjectHeader* current_unfinalized_ = 0;
bool found_finalizer_ = false;
};
@@ -369,11 +391,27 @@ class SweepFinalizer final {
BasePage* page = page_state->page;
// Call finalizers.
- for (HeapObjectHeader* object : page_state->unfinalized_objects) {
- const size_t size = object->AllocatedSize();
- object->Finalize();
- SetMemoryInaccessible(object, size);
+ const auto finalize_header = [](HeapObjectHeader* header) {
+ const size_t size = header->AllocatedSize();
+ header->Finalize();
+ SetMemoryInaccessible(header, size);
+ };
+#if defined(CPPGC_CAGED_HEAP)
+ const uint64_t cage_base =
+ reinterpret_cast<uint64_t>(page->heap().caged_heap().base());
+ HeapObjectHeader* next_unfinalized = 0;
+
+ for (auto* unfinalized_header = page_state->unfinalized_objects_head;
+ unfinalized_header; unfinalized_header = next_unfinalized) {
+ next_unfinalized = unfinalized_header->GetNextUnfinalized(cage_base);
+ finalize_header(unfinalized_header);
+ }
+#else // !defined(CPPGC_CAGED_HEAP)
+ for (HeapObjectHeader* unfinalized_header :
+ page_state->unfinalized_objects) {
+ finalize_header(unfinalized_header);
}
+#endif // !defined(CPPGC_CAGED_HEAP)
// Unmap page if empty.
if (page_state->is_empty) {
@@ -576,10 +614,15 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
page.space().AddPage(&page);
return true;
}
+#if defined(CPPGC_CAGED_HEAP)
+ HeapObjectHeader* const unfinalized_objects =
+ header->IsFinalizable() ? page.ObjectHeader() : nullptr;
+#else // !defined(CPPGC_CAGED_HEAP)
std::vector<HeapObjectHeader*> unfinalized_objects;
if (header->IsFinalizable()) {
unfinalized_objects.push_back(page.ObjectHeader());
}
+#endif // !defined(CPPGC_CAGED_HEAP)
const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index];
@@ -611,9 +654,15 @@ class PrepareForSweepVisitor final
PrepareForSweepVisitor(SpaceStates* states,
CompactableSpaceHandling compactable_space_handling)
: states_(states),
- compactable_space_handling_(compactable_space_handling) {}
+ compactable_space_handling_(compactable_space_handling) {
+ DCHECK_NOT_NULL(states);
+ }
- void Run(RawHeap& raw_heap) { Traverse(raw_heap); }
+ void Run(RawHeap& raw_heap) {
+ DCHECK(states_->empty());
+ *states_ = SpaceStates(raw_heap.size());
+ Traverse(raw_heap);
+ }
protected:
bool VisitNormalPageSpace(NormalPageSpace& space) {
@@ -655,9 +704,7 @@ class Sweeper::SweeperImpl final {
public:
SweeperImpl(RawHeap& heap, StatsCollector* stats_collector)
- : heap_(heap),
- stats_collector_(stats_collector),
- space_states_(heap.size()) {}
+ : heap_(heap), stats_collector_(stats_collector) {}
~SweeperImpl() { CancelSweepers(); }
@@ -704,14 +751,21 @@ class Sweeper::SweeperImpl final {
// allocate new memory.
if (is_sweeping_on_mutator_thread_) return false;
+ SpaceState& space_state = space_states_[space->index()];
+
+ // Bail out if there's no pages to be processed for the space at this
+ // moment.
+ if (space_state.swept_unfinalized_pages.IsEmpty() &&
+ space_state.unswept_pages.IsEmpty()) {
+ return false;
+ }
+
StatsCollector::EnabledScope stats_scope(stats_collector_,
StatsCollector::kIncrementalSweep);
StatsCollector::EnabledScope inner_scope(
stats_collector_, StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progresss(*this);
- SpaceState& space_state = space_states_[space->index()];
-
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
@@ -777,6 +831,10 @@ class Sweeper::SweeperImpl final {
void FinalizeSweep() {
// Synchronize with the concurrent sweeper and call remaining finalizers.
SynchronizeAndFinalizeConcurrentSweeping();
+
+ // Clear space taken up by sweeper metadata.
+ space_states_.clear();
+
platform_ = nullptr;
is_in_progress_ = false;
notify_done_pending_ = true;
diff --git a/deps/v8/src/heap/cppgc/visitor.cc b/deps/v8/src/heap/cppgc/visitor.cc
index 2f786b99ac..b4cdee7a53 100644
--- a/deps/v8/src/heap/cppgc/visitor.cc
+++ b/deps/v8/src/heap/cppgc/visitor.cc
@@ -32,7 +32,7 @@ namespace {
void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
const HeapObjectHeader& header) {
- const auto object_view = ObjectView(header);
+ const auto object_view = ObjectView<>(header);
Address* object = reinterpret_cast<Address*>(object_view.Start());
for (size_t i = 0; i < (object_view.Size() / sizeof(Address)); ++i) {
Address maybe_ptr = object[i];
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index b18e82d1c0..08738af3f0 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -15,6 +15,7 @@ namespace v8 {
namespace internal {
void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
+ CHECK_NULL(cpp_heap_);
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
remote_tracer_ = tracer;
@@ -23,21 +24,49 @@ void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
}
+void LocalEmbedderHeapTracer::SetCppHeap(CppHeap* cpp_heap) {
+ CHECK_NULL(remote_tracer_);
+ cpp_heap_ = cpp_heap;
+}
+
+namespace {
+CppHeap::GarbageCollectionFlags ConvertTraceFlags(
+ EmbedderHeapTracer::TraceFlags flags) {
+ CppHeap::GarbageCollectionFlags result;
+ if (flags & EmbedderHeapTracer::TraceFlags::kForced)
+ result |= CppHeap::GarbageCollectionFlagValues::kForced;
+ if (flags & EmbedderHeapTracer::TraceFlags::kReduceMemory)
+ result |= CppHeap::GarbageCollectionFlagValues::kReduceMemory;
+ return result;
+}
+} // namespace
+
void LocalEmbedderHeapTracer::TracePrologue(
EmbedderHeapTracer::TraceFlags flags) {
if (!InUse()) return;
embedder_worklist_empty_ = false;
- remote_tracer_->TracePrologue(flags);
+ if (cpp_heap_)
+ cpp_heap()->TracePrologue(ConvertTraceFlags(flags));
+ else
+ remote_tracer_->TracePrologue(flags);
}
void LocalEmbedderHeapTracer::TraceEpilogue() {
if (!InUse()) return;
- EmbedderHeapTracer::TraceSummary summary;
- remote_tracer_->TraceEpilogue(&summary);
- if (summary.allocated_size == SIZE_MAX) return;
- UpdateRemoteStats(summary.allocated_size, summary.time);
+ // Resetting to state unknown as there may be follow up garbage collections
+ // triggered from callbacks that have a different stack state.
+ embedder_stack_state_ =
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
+
+ if (cpp_heap_) {
+ cpp_heap()->TraceEpilogue();
+ } else {
+ EmbedderHeapTracer::TraceSummary summary;
+ remote_tracer_->TraceEpilogue(&summary);
+ UpdateRemoteStats(summary.allocated_size, summary.time);
+ }
}
void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size,
@@ -55,21 +84,24 @@ void LocalEmbedderHeapTracer::UpdateRemoteStats(size_t allocated_size,
void LocalEmbedderHeapTracer::EnterFinalPause() {
if (!InUse()) return;
- remote_tracer_->EnterFinalPause(embedder_stack_state_);
- // Resetting to state unknown as there may be follow up garbage collections
- // triggered from callbacks that have a different stack state.
- embedder_stack_state_ =
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
+ if (cpp_heap_)
+ cpp_heap()->EnterFinalPause(embedder_stack_state_);
+ else
+ remote_tracer_->EnterFinalPause(embedder_stack_state_);
}
-bool LocalEmbedderHeapTracer::Trace(double deadline) {
+bool LocalEmbedderHeapTracer::Trace(double max_duration) {
if (!InUse()) return true;
- return remote_tracer_->AdvanceTracing(deadline);
+ if (cpp_heap_)
+ return cpp_heap()->AdvanceTracing(max_duration);
+ else
+ return remote_tracer_->AdvanceTracing(max_duration);
}
bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
- return !InUse() || remote_tracer_->IsTracingDone();
+ return !InUse() || (cpp_heap_ ? cpp_heap()->IsTracingDone()
+ : remote_tracer_->IsTracingDone());
}
void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
@@ -107,13 +139,16 @@ bool ExtractWrappableInfo(Isolate* isolate, JSObject js_object,
LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
LocalEmbedderHeapTracer* tracer)
- : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor_) {
+ : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor()) {
wrapper_cache_.reserve(kWrapperCacheSize);
}
LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
if (!wrapper_cache_.empty()) {
- tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ if (tracer_->cpp_heap_)
+ tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
+ else
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
}
}
@@ -121,7 +156,7 @@ LocalEmbedderHeapTracer::WrapperInfo
LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
JSObject js_object) {
WrapperInfo info;
- if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor_, &info)) {
+ if (ExtractWrappableInfo(isolate, js_object, wrapper_descriptor(), &info)) {
return info;
}
return {nullptr, nullptr};
@@ -140,7 +175,10 @@ void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
if (wrapper_cache_.size() == wrapper_cache_.capacity()) {
- tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ if (tracer_->cpp_heap_)
+ tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
+ else
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
wrapper_cache_.clear();
wrapper_cache_.reserve(kWrapperCacheSize);
}
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 1f15a7e826..6b08488aa6 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -9,7 +9,9 @@
#include "include/v8-embedder-heap.h"
#include "include/v8-traced-handle.h"
#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/flags/flags.h"
+#include "src/heap/cppgc-js/cpp-heap.h"
namespace v8 {
namespace internal {
@@ -76,12 +78,19 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
~LocalEmbedderHeapTracer() {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+ // CppHeap is not detached from Isolate here. Detaching is done explciitly
+ // on Isolate/Heap/CppHeap destruction.
}
- bool InUse() const { return remote_tracer_ != nullptr; }
- EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
+ bool InUse() const { return cpp_heap_ || (remote_tracer_ != nullptr); }
+ // This method doesn't take CppHeap into account.
+ EmbedderHeapTracer* remote_tracer() const {
+ DCHECK_NULL(cpp_heap_);
+ return remote_tracer_;
+ }
void SetRemoteTracer(EmbedderHeapTracer* tracer);
+ void SetCppHeap(CppHeap* cpp_heap);
void TracePrologue(EmbedderHeapTracer::TraceFlags flags);
void TraceEpilogue();
void EnterFinalPause();
@@ -124,6 +133,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
void SetWrapperDescriptor(const WrapperDescriptor& wrapper_descriptor) {
+ DCHECK_NULL(cpp_heap_);
wrapper_descriptor_ = wrapper_descriptor;
}
@@ -135,6 +145,10 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void NotifyEmptyEmbedderStack();
+ EmbedderHeapTracer::EmbedderStackState embedder_stack_state() const {
+ return embedder_stack_state_;
+ }
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -150,8 +164,23 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperDescriptor::kUnknownEmbedderId);
}
+ CppHeap* cpp_heap() {
+ DCHECK_NOT_NULL(cpp_heap_);
+ DCHECK_NULL(remote_tracer_);
+ DCHECK_IMPLIES(isolate_, cpp_heap_ == isolate_->heap()->cpp_heap());
+ return cpp_heap_;
+ }
+
+ WrapperDescriptor wrapper_descriptor() {
+ if (cpp_heap_)
+ return cpp_heap()->wrapper_descriptor();
+ else
+ return wrapper_descriptor_;
+ }
+
Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
+ CppHeap* cpp_heap_ = nullptr;
DefaultEmbedderRootsHandler default_embedder_roots_handler_;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 794322e6dd..ff1056ee57 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -81,7 +81,10 @@ Handle<CodeDataContainer> FactoryBase<Impl>::NewCodeDataContainer(
SKIP_WRITE_BARRIER);
data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- impl()->SetExternalCodeSpaceInDataContainer(data_container);
+ Isolate* isolate_for_heap_sandbox = impl()->isolate_for_heap_sandbox();
+ data_container.AllocateExternalPointerEntries(isolate_for_heap_sandbox);
+ data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
+ data_container.set_code_entry_point(isolate_for_heap_sandbox, kNullAddress);
}
data_container.clear_padding();
return handle(data_container, isolate());
@@ -306,6 +309,21 @@ Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfoForLiteral(
}
template <typename Impl>
+Handle<SharedFunctionInfo> FactoryBase<Impl>::CloneSharedFunctionInfo(
+ Handle<SharedFunctionInfo> other) {
+ Map map = read_only_roots().shared_function_info_map();
+
+ SharedFunctionInfo shared =
+ SharedFunctionInfo::cast(NewWithImmortalMap(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+
+ shared.CopyFrom(*other);
+ shared.clear_padding();
+
+ return handle(shared, isolate());
+}
+
+template <typename Impl>
Handle<PreparseData> FactoryBase<Impl>::NewPreparseData(int data_length,
int children_length) {
int size = PreparseData::SizeFor(data_length, children_length);
@@ -340,6 +358,29 @@ FactoryBase<Impl>::NewUncompiledDataWithPreparseData(
}
template <typename Impl>
+Handle<UncompiledDataWithoutPreparseDataWithJob>
+FactoryBase<Impl>::NewUncompiledDataWithoutPreparseDataWithJob(
+ Handle<String> inferred_name, int32_t start_position,
+ int32_t end_position) {
+ return TorqueGeneratedFactory<
+ Impl>::NewUncompiledDataWithoutPreparseDataWithJob(inferred_name,
+ start_position,
+ end_position,
+ kNullAddress,
+ AllocationType::kOld);
+}
+
+template <typename Impl>
+Handle<UncompiledDataWithPreparseDataAndJob>
+FactoryBase<Impl>::NewUncompiledDataWithPreparseDataAndJob(
+ Handle<String> inferred_name, int32_t start_position, int32_t end_position,
+ Handle<PreparseData> preparse_data) {
+ return TorqueGeneratedFactory<Impl>::NewUncompiledDataWithPreparseDataAndJob(
+ inferred_name, start_position, end_position, preparse_data, kNullAddress,
+ AllocationType::kOld);
+}
+
+template <typename Impl>
Handle<SharedFunctionInfo> FactoryBase<Impl>::NewSharedFunctionInfo(
MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
Builtin builtin, FunctionKind kind) {
@@ -578,19 +619,22 @@ Handle<SeqTwoByteString> FactoryBase<Impl>::NewTwoByteInternalizedString(
}
template <typename Impl>
-MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
- int length, AllocationType allocation) {
+template <typename SeqStringT>
+MaybeHandle<SeqStringT> FactoryBase<Impl>::NewRawStringWithMap(
+ int length, Map map, AllocationType allocation) {
+ DCHECK(SeqStringT::IsCompatibleMap(map, read_only_roots()));
+ DCHECK_IMPLIES(!StringShape(map).IsShared(),
+ RefineAllocationTypeForInPlaceInternalizableString(
+ allocation, map) == allocation);
if (length > String::kMaxLength || length < 0) {
- THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqStringT);
}
DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
- int size = SeqOneByteString::SizeFor(length);
- DCHECK_GE(SeqOneByteString::kMaxSize, size);
+ int size = SeqStringT::SizeFor(length);
+ DCHECK_GE(SeqStringT::kMaxSize, size);
- Map map = read_only_roots().one_byte_string_map();
- SeqOneByteString string = SeqOneByteString::cast(AllocateRawWithImmortalMap(
- size, RefineAllocationTypeForInPlaceInternalizableString(allocation, map),
- map));
+ SeqStringT string =
+ SeqStringT::cast(AllocateRawWithImmortalMap(size, allocation, map));
DisallowGarbageCollection no_gc;
string.set_length(length);
string.set_raw_hash_field(String::kEmptyHashField);
@@ -599,24 +643,37 @@ MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
}
template <typename Impl>
-MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawTwoByteString(
+MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawOneByteString(
int length, AllocationType allocation) {
- if (length > String::kMaxLength || length < 0) {
- THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
- }
- DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
- int size = SeqTwoByteString::SizeFor(length);
- DCHECK_GE(SeqTwoByteString::kMaxSize, size);
+ Map map = read_only_roots().one_byte_string_map();
+ return NewRawStringWithMap<SeqOneByteString>(
+ length, map,
+ RefineAllocationTypeForInPlaceInternalizableString(allocation, map));
+}
+template <typename Impl>
+MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawTwoByteString(
+ int length, AllocationType allocation) {
Map map = read_only_roots().string_map();
- SeqTwoByteString string = SeqTwoByteString::cast(AllocateRawWithImmortalMap(
- size, RefineAllocationTypeForInPlaceInternalizableString(allocation, map),
- map));
- DisallowGarbageCollection no_gc;
- string.set_length(length);
- string.set_raw_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, string.Size());
- return handle(string, isolate());
+ return NewRawStringWithMap<SeqTwoByteString>(
+ length, map,
+ RefineAllocationTypeForInPlaceInternalizableString(allocation, map));
+}
+
+template <typename Impl>
+MaybeHandle<SeqOneByteString> FactoryBase<Impl>::NewRawSharedOneByteString(
+ int length) {
+ return NewRawStringWithMap<SeqOneByteString>(
+ length, read_only_roots().shared_one_byte_string_map(),
+ AllocationType::kSharedOld);
+}
+
+template <typename Impl>
+MaybeHandle<SeqTwoByteString> FactoryBase<Impl>::NewRawSharedTwoByteString(
+ int length) {
+ return NewRawStringWithMap<SeqTwoByteString>(
+ length, read_only_roots().shared_string_map(),
+ AllocationType::kSharedOld);
}
template <typename Impl>
@@ -964,9 +1021,11 @@ MaybeHandle<Map> FactoryBase<Impl>::GetInPlaceInternalizedStringMap(
MaybeHandle<Map> map;
switch (instance_type) {
case STRING_TYPE:
+ case SHARED_STRING_TYPE:
map = read_only_roots().internalized_string_map_handle();
break;
case ONE_BYTE_STRING_TYPE:
+ case SHARED_ONE_BYTE_STRING_TYPE:
map = read_only_roots().one_byte_internalized_string_map_handle();
break;
case EXTERNAL_STRING_TYPE:
@@ -984,6 +1043,25 @@ MaybeHandle<Map> FactoryBase<Impl>::GetInPlaceInternalizedStringMap(
}
template <typename Impl>
+Handle<Map> FactoryBase<Impl>::GetStringMigrationSentinelMap(
+ InstanceType from_string_type) {
+ Handle<Map> map;
+ switch (from_string_type) {
+ case SHARED_STRING_TYPE:
+ map = read_only_roots().seq_string_migration_sentinel_map_handle();
+ break;
+ case SHARED_ONE_BYTE_STRING_TYPE:
+ map =
+ read_only_roots().one_byte_seq_string_migration_sentinel_map_handle();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ DCHECK_EQ(map->instance_type(), from_string_type);
+ return map;
+}
+
+template <typename Impl>
AllocationType
FactoryBase<Impl>::RefineAllocationTypeForInPlaceInternalizableString(
AllocationType allocation, Map string_map) {
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index ba44404b32..c3aa816d0b 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -162,6 +162,11 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
+ // Create a copy of a given SharedFunctionInfo for use as a placeholder in
+ // off-thread compilation
+ Handle<SharedFunctionInfo> CloneSharedFunctionInfo(
+ Handle<SharedFunctionInfo> other);
+
Handle<PreparseData> NewPreparseData(int data_length, int children_length);
Handle<UncompiledDataWithoutPreparseData>
@@ -173,6 +178,17 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<String> inferred_name, int32_t start_position,
int32_t end_position, Handle<PreparseData>);
+ Handle<UncompiledDataWithoutPreparseDataWithJob>
+ NewUncompiledDataWithoutPreparseDataWithJob(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position);
+
+ Handle<UncompiledDataWithPreparseDataAndJob>
+ NewUncompiledDataWithPreparseDataAndJob(Handle<String> inferred_name,
+ int32_t start_position,
+ int32_t end_position,
+ Handle<PreparseData>);
+
// Allocates a FeedbackMedata object and zeroes the data section.
Handle<FeedbackMetadata> NewFeedbackMetadata(
int slot_count, int create_closure_slot_count,
@@ -214,6 +230,11 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<String> left, Handle<String> right, int length, bool one_byte,
AllocationType allocation = AllocationType::kYoung);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqOneByteString> NewRawSharedOneByteString(
+ int length);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqTwoByteString> NewRawSharedTwoByteString(
+ int length);
+
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
Handle<FreshlyAllocatedBigInt> NewBigInt(
@@ -242,6 +263,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
MaybeHandle<Map> GetInPlaceInternalizedStringMap(Map from_string_map);
+ Handle<Map> GetStringMigrationSentinelMap(InstanceType from_string_type);
+
AllocationType RefineAllocationTypeForInPlaceInternalizableString(
AllocationType allocation, Map string_map);
@@ -259,7 +282,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
HeapObject NewWithImmortalMap(Map map, AllocationType allocation);
Handle<FixedArray> NewFixedArrayWithFiller(Handle<Map> map, int length,
@@ -270,10 +293,14 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> maybe_name,
MaybeHandle<HeapObject> maybe_function_data, Builtin builtin,
- FunctionKind kind = kNormalFunction);
+ FunctionKind kind = FunctionKind::kNormalFunction);
Handle<String> MakeOrFindTwoCharacterString(uint16_t c1, uint16_t c2);
+ template <typename SeqStringT>
+ MaybeHandle<SeqStringT> NewRawStringWithMap(int length, Map map,
+ AllocationType allocation);
+
private:
friend class WebSnapshotDeserializer;
Impl* impl() { return static_cast<Impl*>(this); }
@@ -281,7 +308,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
ReadOnlyRoots read_only_roots() { return impl()->read_only_roots(); }
HeapObject AllocateRaw(int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
friend TorqueGeneratedFactory<Impl>;
};
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index eddacd32c7..9e05c52472 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -378,10 +378,10 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
return result;
}
-Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
+Handle<HeapObject> Factory::NewFillerObject(int size,
+ AllocationAlignment alignment,
AllocationType allocation,
AllocationOrigin origin) {
- AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
size, allocation, origin, alignment);
@@ -877,12 +877,12 @@ namespace {
} // namespace
-StringInternalizationStrategy Factory::ComputeInternalizationStrategyForString(
+StringTransitionStrategy Factory::ComputeInternalizationStrategyForString(
Handle<String> string, MaybeHandle<Map>* internalized_map) {
// Do not internalize young strings in-place: This allows us to ignore both
// string table and stub cache on scavenges.
if (Heap::InYoungGeneration(*string)) {
- return StringInternalizationStrategy::kCopy;
+ return StringTransitionStrategy::kCopy;
}
DCHECK_NOT_NULL(internalized_map);
DisallowGarbageCollection no_gc;
@@ -892,12 +892,12 @@ StringInternalizationStrategy Factory::ComputeInternalizationStrategyForString(
Map map = string->map();
*internalized_map = GetInPlaceInternalizedStringMap(map);
if (!internalized_map->is_null()) {
- return StringInternalizationStrategy::kInPlace;
+ return StringTransitionStrategy::kInPlace;
}
if (InstanceTypeChecker::IsInternalizedString(map.instance_type())) {
- return StringInternalizationStrategy::kAlreadyInternalized;
+ return StringTransitionStrategy::kAlreadyTransitioned;
}
- return StringInternalizationStrategy::kCopy;
+ return StringTransitionStrategy::kCopy;
}
template <class StringClass>
@@ -921,6 +921,31 @@ template Handle<ExternalOneByteString>
template Handle<ExternalTwoByteString>
Factory::InternalizeExternalString<ExternalTwoByteString>(Handle<String>);
+StringTransitionStrategy Factory::ComputeSharingStrategyForString(
+ Handle<String> string, MaybeHandle<Map>* shared_map) {
+ DCHECK(FLAG_shared_string_table);
+ // Do not share young strings in-place: there is no shared young space.
+ if (Heap::InYoungGeneration(*string)) {
+ return StringTransitionStrategy::kCopy;
+ }
+ DCHECK_NOT_NULL(shared_map);
+ DisallowGarbageCollection no_gc;
+ InstanceType instance_type = string->map().instance_type();
+ if (StringShape(instance_type).IsShared()) {
+ return StringTransitionStrategy::kAlreadyTransitioned;
+ }
+ switch (instance_type) {
+ case STRING_TYPE:
+ *shared_map = read_only_roots().shared_string_map_handle();
+ return StringTransitionStrategy::kInPlace;
+ case ONE_BYTE_STRING_TYPE:
+ *shared_map = read_only_roots().shared_one_byte_string_map_handle();
+ return StringTransitionStrategy::kInPlace;
+ default:
+ return StringTransitionStrategy::kCopy;
+ }
+}
+
Handle<String> Factory::LookupSingleCharacterStringFromCode(uint16_t code) {
if (code <= unibrow::Latin1::kMaxChar) {
{
@@ -1346,14 +1371,6 @@ void Factory::AddToScriptList(Handle<Script> script) {
isolate()->heap()->set_script_list(*scripts);
}
-void Factory::SetExternalCodeSpaceInDataContainer(
- CodeDataContainer data_container) {
- DCHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- data_container.AllocateExternalPointerEntries(isolate());
- data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
- data_container.set_code_entry_point(isolate(), kNullAddress);
-}
-
Handle<Script> Factory::CloneScript(Handle<Script> script) {
Heap* heap = isolate()->heap();
int script_id = isolate()->GetNextScriptId();
@@ -1445,20 +1462,36 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
Address type_address, Handle<Map> opt_parent, int instance_size_bytes,
Handle<WasmInstanceObject> instance) {
- // We pretenure WasmTypeInfo objects because they are refererenced by Maps,
- // which are assumed to be long-lived. The supertypes list is constant
- // after initialization, so we pretenure that too.
- // The subtypes list, however, is expected to grow (and hence be replaced),
- // so we don't pretenure it.
+ // We pretenure WasmTypeInfo objects for two reasons:
+ // (1) They are referenced by Maps, which are assumed to be long-lived,
+ // so pretenuring the WTI is a bit more efficient.
+ // (2) The object visitors need to read the WasmTypeInfo to find tagged
+ // fields in Wasm structs; in the middle of a GC cycle that's only
+ // safe to do if the WTI is in old space.
+ // The supertypes list is constant after initialization, so we pretenure
+ // that too. The subtypes list, however, is expected to grow (and hence be
+ // replaced), so we don't pretenure it.
Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
Handle<FixedArray> supertypes;
if (opt_parent.is_null()) {
- supertypes = NewFixedArray(0);
+ supertypes = NewFixedArray(wasm::kMinimumSupertypeArraySize);
+ for (int i = 0; i < supertypes->length(); i++) {
+ supertypes->set(i, *undefined_value());
+ }
} else {
- supertypes = CopyArrayAndGrow(
- handle(opt_parent->wasm_type_info().supertypes(), isolate()), 1,
- AllocationType::kOld);
- supertypes->set(supertypes->length() - 1, *opt_parent);
+ Handle<FixedArray> parent_supertypes =
+ handle(opt_parent->wasm_type_info().supertypes(), isolate());
+ int last_defined_index = parent_supertypes->length() - 1;
+ while (last_defined_index >= 0 &&
+ parent_supertypes->get(last_defined_index).IsUndefined()) {
+ last_defined_index--;
+ }
+ if (last_defined_index == parent_supertypes->length() - 1) {
+ supertypes = CopyArrayAndGrow(parent_supertypes, 1, AllocationType::kOld);
+ } else {
+ supertypes = CopyFixedArray(parent_supertypes);
+ }
+ supertypes->set(last_defined_index + 1, *opt_parent);
}
Map map = *wasm_type_info_map();
WasmTypeInfo result = WasmTypeInfo::cast(AllocateRawWithImmortalMap(
@@ -1466,7 +1499,7 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
DisallowGarbageCollection no_gc;
result.AllocateExternalPointerEntries(isolate());
result.set_foreign_address(isolate(), type_address);
- result.set_supertypes(*supertypes, SKIP_WRITE_BARRIER);
+ result.set_supertypes(*supertypes);
result.set_subtypes(*subtypes);
result.set_instance_size(instance_size_bytes);
result.set_instance(*instance);
@@ -1479,7 +1512,7 @@ Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
auto result = WasmApiFunctionRef::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.set_foreign_address(isolate(), isolate()->isolate_root());
+ result.set_isolate_root(isolate()->isolate_root());
result.set_native_context(*isolate()->native_context());
if (!callable.is_null()) {
result.set_callable(*callable);
@@ -1489,43 +1522,55 @@ Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
return handle(result, isolate());
}
+Handle<WasmInternalFunction> Factory::NewWasmInternalFunction(
+ Address opt_call_target, Handle<HeapObject> ref, Handle<Map> rtt) {
+ HeapObject raw = AllocateRaw(rtt->instance_size(), AllocationType::kOld);
+ raw.set_map_after_allocation(*rtt);
+ WasmInternalFunction result = WasmInternalFunction::cast(raw);
+ DisallowGarbageCollection no_gc;
+ result.AllocateExternalPointerEntries(isolate());
+ result.set_foreign_address(isolate(), opt_call_target);
+ result.set_ref(*ref);
+ // Default values, will be overwritten by the caller.
+ result.set_code(isolate()->builtins()->code(Builtin::kAbort));
+ result.set_external(*undefined_value());
+ return handle(result, isolate());
+}
+
Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code) {
+ Handle<Code> wrapper_code, Handle<Map> rtt) {
Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(callable);
+ Handle<WasmInternalFunction> internal =
+ NewWasmInternalFunction(opt_call_target, ref, rtt);
Map map = *wasm_js_function_data_map();
WasmJSFunctionData result =
WasmJSFunctionData::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.AllocateExternalPointerEntries(isolate());
- result.set_foreign_address(isolate(), opt_call_target);
- result.set_ref(*ref);
+ result.set_internal(*internal);
result.set_wrapper_code(*wrapper_code);
result.set_serialized_return_count(return_count);
result.set_serialized_parameter_count(parameter_count);
result.set_serialized_signature(*serialized_sig);
- // Default value, will be overwritten by the caller.
- result.set_wasm_to_js_wrapper_code(
- isolate()->builtins()->code(Builtin::kAbort));
return handle(result, isolate());
}
Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
- Address sig_address, int wrapper_budget) {
+ Address sig_address, int wrapper_budget, Handle<Map> rtt) {
Handle<Foreign> sig_foreign = NewForeign(sig_address);
+ Handle<WasmInternalFunction> internal =
+ NewWasmInternalFunction(call_target, Handle<HeapObject>::cast(ref), rtt);
Map map = *wasm_exported_function_data_map();
WasmExportedFunctionData result =
WasmExportedFunctionData::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.AllocateExternalPointerEntries(isolate());
- result.set_foreign_address(isolate(), call_target);
DCHECK(ref->IsWasmInstanceObject() || ref->IsWasmApiFunctionRef());
- result.set_ref(*ref);
+ result.set_internal(*internal);
result.set_wrapper_code(*export_wrapper);
result.set_instance(*instance);
result.set_function_index(func_index);
@@ -1539,17 +1584,17 @@ Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
Handle<WasmCapiFunctionData> Factory::NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code,
+ Handle<Code> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig) {
Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(Handle<JSReceiver>());
+ Handle<WasmInternalFunction> internal =
+ NewWasmInternalFunction(call_target, ref, rtt);
Map map = *wasm_capi_function_data_map();
WasmCapiFunctionData result =
WasmCapiFunctionData::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
- result.AllocateExternalPointerEntries(isolate());
- result.set_foreign_address(isolate(), call_target);
- result.set_ref(*ref);
+ result.set_internal(*internal);
result.set_wrapper_code(*wrapper_code);
result.set_embedder_data(*embedder_data);
result.set_serialized_signature(*serialized_sig);
@@ -1617,7 +1662,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmJSFunction(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
Handle<WasmCapiFunctionData> data) {
return NewSharedFunctionInfo(MaybeHandle<String>(), data,
- Builtin::kNoBuiltinId, kConciseMethod);
+ Builtin::kNoBuiltinId,
+ FunctionKind::kConciseMethod);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1672,8 +1718,9 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
PropertyCell cell = PropertyCell::cast(AllocateRawWithImmortalMap(
PropertyCell::kSize, allocation, *global_property_cell_map()));
DisallowGarbageCollection no_gc;
- cell.set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
+ cell.set_dependent_code(
+ DependentCode::empty_dependent_code(ReadOnlyRoots(isolate())),
+ SKIP_WRITE_BARRIER);
WriteBarrierMode mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
@@ -1765,8 +1812,9 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid),
SKIP_WRITE_BARRIER);
}
- map.set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
+ map.set_dependent_code(
+ DependentCode::empty_dependent_code(ReadOnlyRoots(isolate())),
+ SKIP_WRITE_BARRIER);
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()),
SKIP_WRITE_BARRIER);
map.SetInObjectUnusedPropertyFields(inobject_properties);
@@ -2163,6 +2211,12 @@ Handle<JSObject> Factory::NewExternal(void* value) {
return external;
}
+Handle<DeoptimizationLiteralArray> Factory::NewDeoptimizationLiteralArray(
+ int length) {
+ return Handle<DeoptimizationLiteralArray>::cast(
+ NewWeakFixedArray(length, AllocationType::kOld));
+}
+
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry) {
CHECK_NOT_NULL(isolate()->embedded_blob_code());
@@ -2309,12 +2363,10 @@ Handle<JSObject> Factory::NewSlowJSObjectWithNullProto() {
}
Handle<JSObject> Factory::NewJSObjectWithNullProto() {
- Handle<JSObject> result = NewJSObject(isolate()->object_function());
- Handle<Map> new_map = Map::Copy(
- isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
- Map::SetPrototype(isolate(), new_map, null_value());
- JSObject::MigrateToMap(isolate(), result, new_map);
- return result;
+ Handle<Map> map(isolate()->object_function()->initial_map(), isolate());
+ Handle<Map> map_with_null_proto =
+ Map::TransitionToPrototype(isolate(), map, null_value());
+ return NewJSObjectFromMap(map_with_null_proto);
}
Handle<JSGlobalObject> Factory::NewJSGlobalObject(
@@ -2350,8 +2402,8 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
for (InternalIndex i : map->IterateOwnDescriptors()) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
- DCHECK_EQ(kAccessor, details.kind());
- PropertyDetails d(kAccessor, details.attributes(),
+ DCHECK_EQ(PropertyKind::kAccessor, details.kind());
+ PropertyDetails d(PropertyKind::kAccessor, details.attributes(),
PropertyCellType::kMutable);
Handle<Name> name(descs->GetKey(i), isolate());
Handle<Object> value(descs->GetStrongValue(i), isolate());
@@ -2814,6 +2866,7 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
raw.set_buffer(*buffer, SKIP_WRITE_BARRIER);
raw.set_byte_offset(byte_offset);
raw.set_byte_length(byte_length);
+ raw.set_bit_field(0);
ZeroEmbedderFields(raw);
DCHECK_EQ(raw.GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
@@ -2869,6 +2922,9 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
map, empty_fixed_array(), buffer, byte_offset, byte_length));
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ // TODO(v8:11111): Support creating length tracking DataViews via the API.
+ obj->set_is_length_tracking(false);
+ obj->set_is_backed_by_rab(!buffer->is_shared() && buffer->is_resizable());
return obj;
}
@@ -3763,7 +3819,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
PrepareMap();
PrepareFeedbackCell();
- Handle<Code> code = handle(sfi_->GetCode(), isolate_);
+ Handle<Code> code = handle(FromCodeT(sfi_->GetCode()), isolate_);
Handle<JSFunction> result = BuildRaw(code);
if (code->kind() == CodeKind::BASELINE) {
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index f620f5eb3c..a5dd9ce5a9 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -278,15 +278,15 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Compute the internalization strategy for the input string.
//
- // Old-generation flat strings can be internalized by mutating their map
- // return kInPlace, along with the matching internalized string map for string
- // is stored in internalized_map.
+ // Old-generation sequential strings can be internalized by mutating their map
+ // and return kInPlace, along with the matching internalized string map for
+ // string stored in internalized_map.
//
- // Internalized strings return kAlreadyInternalized.
+ // Internalized strings return kAlreadyTransitioned.
//
// All other strings are internalized by flattening and copying and return
// kCopy.
- V8_WARN_UNUSED_RESULT StringInternalizationStrategy
+ V8_WARN_UNUSED_RESULT StringTransitionStrategy
ComputeInternalizationStrategyForString(Handle<String> string,
MaybeHandle<Map>* internalized_map);
@@ -295,6 +295,20 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
template <class StringClass>
Handle<StringClass> InternalizeExternalString(Handle<String> string);
+ // Compute the sharing strategy for the input string.
+ //
+ // Old-generation sequential and thin strings can be shared by mutating their
+ // map and return kInPlace, along with the matching shared string map for the
+ // string stored in shared_map.
+ //
+ // Already-shared strings return kAlreadyTransitioned.
+ //
+ // All other strings are shared by flattening and copying into a sequential
+ // string then sharing that sequential string, and return kCopy.
+ V8_WARN_UNUSED_RESULT StringTransitionStrategy
+ ComputeSharingStrategyForString(Handle<String> string,
+ MaybeHandle<Map>* shared_map);
+
// Creates a single character string where the character has given code.
// A cache is used for Latin1 codes.
Handle<String> LookupSingleCharacterStringFromCode(uint16_t code);
@@ -430,7 +444,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full.
Handle<HeapObject> NewFillerObject(
- int size, bool double_align, AllocationType allocation,
+ int size, AllocationAlignment alignment, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -570,21 +584,24 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> opt_parent,
int instance_size_bytes,
Handle<WasmInstanceObject> instance);
+ Handle<WasmInternalFunction> NewWasmInternalFunction(Address opt_call_target,
+ Handle<HeapObject> ref,
+ Handle<Map> rtt);
Handle<WasmCapiFunctionData> NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code,
+ Handle<Code> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig);
Handle<WasmExportedFunctionData> NewWasmExportedFunctionData(
Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
- Address sig_address, int wrapper_budget);
+ Address sig_address, int wrapper_budget, Handle<Map> rtt);
Handle<WasmApiFunctionRef> NewWasmApiFunctionRef(Handle<JSReceiver> callable);
// {opt_call_target} is kNullAddress for JavaScript functions, and
// non-null for exported Wasm functions.
Handle<WasmJSFunctionData> NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code);
+ Handle<Code> wrapper_code, Handle<Map> rtt);
Handle<WasmStruct> NewWasmStruct(const wasm::StructType* type,
wasm::WasmValue* args, Handle<Map> map);
Handle<WasmArray> NewWasmArray(const wasm::ArrayType* type,
@@ -658,6 +675,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
+ Handle<DeoptimizationLiteralArray> NewDeoptimizationLiteralArray(int length);
+
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
@@ -723,7 +742,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<SharedFunctionInfo> NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> name, Builtin builtin,
- FunctionKind kind = kNormalFunction);
+ FunctionKind kind = FunctionKind::kNormalFunction);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWebSnapshot();
@@ -973,7 +992,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// ------
// Customization points for FactoryBase
HeapObject AllocateRaw(int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
Isolate* isolate() const {
// Downcast to the privately inherited sub-class using c-style casts to
@@ -982,12 +1001,22 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// NOLINTNEXTLINE (google-readability-casting)
return (Isolate*)this; // NOLINT(readability/casting)
}
+
+ // This is the real Isolate that will be used for allocating and accessing
+ // external pointer entries when V8_HEAP_SANDBOX is enabled.
+ Isolate* isolate_for_heap_sandbox() const {
+#ifdef V8_HEAP_SANDBOX
+ return isolate();
+#else
+ return nullptr;
+#endif // V8_HEAP_SANDBOX
+ }
+
bool CanAllocateInReadOnlySpace();
bool EmptyStringRootIsInitialized();
AllocationType AllocationTypeForInPlaceInternalizableString();
void AddToScriptList(Handle<Script> shared);
- void SetExternalCodeSpaceInDataContainer(CodeDataContainer data_container);
// ------
HeapObject AllocateRawWithAllocationSite(
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 8ddd177c6b..655930859a 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -93,7 +93,7 @@ GCTracer::Scope::~Scope() {
tracer_->AddScopeSample(scope_, duration_ms);
if (scope_ == ScopeId::MC_INCREMENTAL ||
scope_ == ScopeId::MC_INCREMENTAL_START ||
- scope_ == MC_INCREMENTAL_FINALIZE) {
+ scope_ == ScopeId::MC_INCREMENTAL_FINALIZE) {
auto* long_task_stats =
tracer_->heap_->isolate()->GetCurrentLongTaskStats();
long_task_stats->gc_full_incremental_wall_clock_duration_us +=
@@ -411,10 +411,11 @@ void GCTracer::Stop(GarbageCollector collector) {
heap_->UpdateTotalGCTime(duration);
- if ((current_.type == Event::SCAVENGER ||
- current_.type == Event::MINOR_MARK_COMPACTOR) &&
- FLAG_trace_gc_ignore_scavenger)
- return;
+ if (current_.type == Event::SCAVENGER ||
+ current_.type == Event::MINOR_MARK_COMPACTOR) {
+ ReportYoungCycleToRecorder();
+ if (FLAG_trace_gc_ignore_scavenger) return;
+ }
if (FLAG_trace_gc_nvp) {
PrintNVP();
@@ -562,11 +563,12 @@ void GCTracer::Print() const {
Output(
"[%d:%p] "
"%8.0f ms: "
- "%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
+ "%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
"%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
- heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
+ heap_->isolate()->time_millis_since_init(),
+ heap_->IsShared() ? "Shared " : "", current_.TypeName(false),
current_.reduce_memory ? " (reduce)" : "",
static_cast<double>(current_.start_object_size) / MB,
static_cast<double>(current_.start_memory_size) / MB,
@@ -1444,5 +1446,39 @@ void GCTracer::ReportIncrementalMarkingStepToRecorder() {
}
}
+void GCTracer::ReportYoungCycleToRecorder() {
+ const std::shared_ptr<metrics::Recorder>& recorder =
+ heap_->isolate()->metrics_recorder();
+ DCHECK_NOT_NULL(recorder);
+ if (!recorder->HasEmbedderRecorder()) return;
+ v8::metrics::GarbageCollectionYoungCycle event;
+ // Total:
+ const double total_wall_clock_duration_in_us =
+ (current_.scopes[Scope::SCAVENGER] +
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]) *
+ base::Time::kMicrosecondsPerMillisecond;
+ event.total_wall_clock_duration_in_us =
+ static_cast<int64_t>(total_wall_clock_duration_in_us);
+ // MainThread:
+ const double main_thread_wall_clock_duration_in_us =
+ current_.scopes[Scope::SCAVENGER] *
+ base::Time::kMicrosecondsPerMillisecond;
+ event.main_thread_wall_clock_duration_in_us =
+ static_cast<int64_t>(main_thread_wall_clock_duration_in_us);
+ // Collection Rate:
+ event.collection_rate_in_percent =
+ static_cast<double>(current_.survived_young_object_size) /
+ current_.young_object_size;
+ // Efficiency:
+ auto freed_bytes =
+ current_.young_object_size - current_.survived_young_object_size;
+ event.efficiency_in_bytes_per_us =
+ freed_bytes / total_wall_clock_duration_in_us;
+ event.main_thread_efficiency_in_bytes_per_us =
+ freed_bytes / main_thread_wall_clock_duration_in_us;
+
+ recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 6daeadc94b..2c9b7b01ec 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -75,8 +75,8 @@ class V8_EXPORT_PRIVATE GCTracer {
steps = 0;
}
- double duration;
- double longest_step;
+ double duration; // in ms
+ double longest_step; // in ms
int steps;
};
@@ -183,10 +183,11 @@ class V8_EXPORT_PRIVATE GCTracer {
// Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
size_t incremental_marking_bytes;
- // Duration of incremental marking steps for INCREMENTAL_MARK_COMPACTOR.
+ // Duration (in ms) of incremental marking steps for
+ // INCREMENTAL_MARK_COMPACTOR.
double incremental_marking_duration;
- // Amounts of time spent in different scopes during GC.
+ // Amounts of time (in ms) spent in different scopes during GC.
double scopes[Scope::NUMBER_OF_SCOPES];
// Holds details for incremental marking scopes.
@@ -421,6 +422,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void ReportFullCycleToRecorder();
void ReportIncrementalMarkingStepToRecorder();
+ void ReportYoungCycleToRecorder();
// Pointer to the heap that owns this tracer.
Heap* heap_;
@@ -436,8 +438,8 @@ class V8_EXPORT_PRIVATE GCTracer {
// the last mark compact GC.
size_t incremental_marking_bytes_;
- // Duration of incremental marking steps since the end of the last mark-
- // compact event.
+ // Duration (in ms) of incremental marking steps since the end of the last
+ // mark-compact event.
double incremental_marking_duration_;
double incremental_marking_start_time_;
@@ -460,7 +462,7 @@ class V8_EXPORT_PRIVATE GCTracer {
size_t old_generation_allocation_counter_bytes_;
size_t embedder_allocation_counter_bytes_;
- // Accumulated duration and allocated bytes since the last GC.
+ // Accumulated duration (in ms) and allocated bytes since the last GC.
double allocation_duration_since_gc_;
size_t new_space_allocation_in_bytes_since_gc_;
size_t old_generation_allocation_in_bytes_since_gc_;
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 0854ceeb91..7651cebb24 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -145,10 +145,6 @@ size_t MemoryController<Trait>::CalculateAllocationLimit(
factor = 1.0 + FLAG_heap_growing_percent / 100.0;
}
- if (FLAG_heap_growing_percent > 0) {
- factor = 1.0 + FLAG_heap_growing_percent / 100.0;
- }
-
CHECK_LT(1.0, factor);
CHECK_LT(0, current_size);
const uint64_t limit =
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index edefe8e55d..68abf816b0 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -30,6 +30,7 @@
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
+#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/third-party/heap-api.h"
#include "src/objects/allocation-site-inl.h"
@@ -206,7 +207,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
- alignment == AllocationAlignment::kWordAligned);
+ alignment == AllocationAlignment::kTaggedAligned);
DCHECK_EQ(gc_state(), NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
@@ -320,7 +321,7 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
if (allocation == AllocationType::kYoung &&
- alignment == AllocationAlignment::kWordAligned &&
+ alignment == AllocationAlignment::kTaggedAligned &&
size <= MaxRegularHeapObjectSize(allocation) &&
V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
FLAG_gc_interval == -1)) {
@@ -791,13 +792,15 @@ AlwaysAllocateScopeForTesting::AlwaysAllocateScopeForTesting(Heap* heap)
CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) {
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ heap_->safepoint()->AssertActive();
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
heap_->code_space()->SetCodeModificationPermissions();
LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetCodeModificationPermissions();
page = page->next_page();
}
@@ -811,7 +814,7 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetDefaultCodePermissions();
page = page->next_page();
}
@@ -821,21 +824,17 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope(Heap* heap)
: heap_(heap) {
- if (heap_->write_protect_code_memory() &&
- !heap_->code_space_memory_modification_scope_depth()) {
- heap_->EnableUnprotectedMemoryChunksRegistry();
+ if (heap_->write_protect_code_memory()) {
heap_->IncrementCodePageCollectionMemoryModificationScopeDepth();
}
}
CodePageCollectionMemoryModificationScope::
~CodePageCollectionMemoryModificationScope() {
- if (heap_->write_protect_code_memory() &&
- !heap_->code_space_memory_modification_scope_depth()) {
+ if (heap_->write_protect_code_memory()) {
heap_->DecrementCodePageCollectionMemoryModificationScopeDepth();
if (heap_->code_page_collection_memory_modification_scope_depth() == 0) {
heap_->ProtectUnprotectedMemoryChunks();
- heap_->DisableUnprotectedMemoryChunksRegistry();
}
}
}
@@ -866,6 +865,16 @@ CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
}
}
+IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) {
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ heap_->ignore_local_gc_requests_depth_++;
+}
+
+IgnoreLocalGCRequests::~IgnoreLocalGCRequests() {
+ DCHECK_GT(heap_->ignore_local_gc_requests_depth_, 0);
+ heap_->ignore_local_gc_requests_depth_--;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-layout-tracer.cc b/deps/v8/src/heap/heap-layout-tracer.cc
new file mode 100644
index 0000000000..53ac5726a7
--- /dev/null
+++ b/deps/v8/src/heap/heap-layout-tracer.cc
@@ -0,0 +1,73 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/heap-layout-tracer.h"
+
+#include <iostream>
+
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/read-only-spaces.h"
+#include "src/heap/spaces-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+void HeapLayoutTracer::GCProloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags,
+ void* data) {
+ Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ PrintF("Before GC:%d,", heap->gc_count());
+ PrintF("collector_name:%s\n", Heap::CollectorName(gc_type));
+ PrintHeapLayout(std::cout, heap);
+}
+
+// static
+void HeapLayoutTracer::GCEpiloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags,
+ void* data) {
+ Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ PrintF("After GC:%d,", heap->gc_count());
+ PrintF("collector_name:%s\n", Heap::CollectorName(gc_type));
+ PrintHeapLayout(std::cout, heap);
+}
+
+// static
+void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os,
+ BasicMemoryChunk* chunk,
+ const char* owner_name) {
+ os << "{owner:" << owner_name << ","
+ << "address:" << chunk << ","
+ << "size:" << chunk->size() << ","
+ << "allocated_bytes:" << chunk->allocated_bytes() << ","
+ << "wasted_memory:" << chunk->wasted_memory() << "}" << std::endl;
+}
+
+// static
+void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) {
+ for (PageIterator it = heap->new_space()->to_space().begin();
+ it != heap->new_space()->to_space().end(); ++it) {
+ PrintBasicMemoryChunk(os, *it, "to_space");
+ }
+
+ for (PageIterator it = heap->new_space()->from_space().begin();
+ it != heap->new_space()->from_space().end(); ++it) {
+ PrintBasicMemoryChunk(os, *it, "from_space");
+ }
+
+ OldGenerationMemoryChunkIterator it(heap);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ PrintBasicMemoryChunk(os, chunk, chunk->owner()->name());
+ }
+
+ for (ReadOnlyPage* page : heap->read_only_space()->pages()) {
+ PrintBasicMemoryChunk(os, page, "ro_space");
+ }
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/heap-layout-tracer.h b/deps/v8/src/heap/heap-layout-tracer.h
new file mode 100644
index 0000000000..c7d677807e
--- /dev/null
+++ b/deps/v8/src/heap/heap-layout-tracer.h
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_LAYOUT_TRACER_H_
+#define V8_HEAP_HEAP_LAYOUT_TRACER_H_
+
+#include "include/v8-callbacks.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class BasicMemoryChunk;
+
+class HeapLayoutTracer : AllStatic {
+ public:
+ static void GCProloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags, void* data);
+ static void GCEpiloguePrintHeapLayout(v8::Isolate* isolate,
+ v8::GCType gc_type,
+ v8::GCCallbackFlags flags, void* data);
+
+ private:
+ static void PrintBasicMemoryChunk(std::ostream& os, BasicMemoryChunk* chunk,
+ const char* owner_name);
+ static void PrintHeapLayout(std::ostream& os, Heap* heap);
+};
+} // namespace internal
+} // namespace v8
+#endif // V8_HEAP_HEAP_LAYOUT_TRACER_H_
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index d3815617ae..a1b03256af 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -207,7 +207,7 @@ inline bool IsReadOnlyHeapObject(HeapObject object) {
return chunk->InReadOnlySpace();
}
-inline bool IsCodeObject(HeapObject object) {
+inline bool IsCodeSpaceObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->InCodeSpace();
@@ -275,6 +275,14 @@ void WriteBarrier::MarkingFromGlobalHandle(Object value) {
MarkingSlowFromGlobalHandle(*heap, heap_value);
}
+// static
+void WriteBarrier::MarkingFromInternalFields(JSObject host) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return;
+ auto heap = GetHeapIfMarking(host);
+ if (!heap) return;
+ MarkingSlowFromInternalFields(*heap, host);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-write-barrier.cc b/deps/v8/src/heap/heap-write-barrier.cc
index e401df6f09..dce052f00e 100644
--- a/deps/v8/src/heap/heap-write-barrier.cc
+++ b/deps/v8/src/heap/heap-write-barrier.cc
@@ -4,9 +4,11 @@
#include "src/heap/heap-write-barrier.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/js-objects.h"
#include "src/objects/maybe-object.h"
#include "src/objects/slots-inl.h"
@@ -45,6 +47,17 @@ void WriteBarrier::MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value) {
heap->marking_barrier()->WriteWithoutHost(value);
}
+// static
+void WriteBarrier::MarkingSlowFromInternalFields(Heap* heap, JSObject host) {
+ // We are not checking the mark bits of host here as (a) there's no
+ // synchronization with the marker and (b) we are writing into a live object
+ // (independent of the mark bits).
+ if (!heap->local_embedder_heap_tracer()->InUse()) return;
+ LocalEmbedderHeapTracer::ProcessingScope scope(
+ heap->local_embedder_heap_tracer());
+ scope.TracePossibleWrapper(host);
+}
+
void WriteBarrier::MarkingSlow(Heap* heap, Code host, RelocInfo* reloc_info,
HeapObject value) {
MarkingBarrier* marking_barrier = current_marking_barrier
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index e214dba680..b221fae2ed 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -8,6 +8,7 @@
#include "include/v8-internal.h"
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
@@ -57,6 +58,7 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static int MarkingFromCode(Address raw_host, Address raw_slot);
// Invoked from global handles where no host object is available.
static inline void MarkingFromGlobalHandle(Object value);
+ static inline void MarkingFromInternalFields(JSObject host);
static void SetForThread(MarkingBarrier*);
static void ClearForThread(MarkingBarrier*);
@@ -74,6 +76,7 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static void MarkingSlow(Heap* heap, DescriptorArray,
int number_of_own_descriptors);
static void MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value);
+ static void MarkingSlowFromInternalFields(Heap* heap, JSObject host);
};
} // namespace internal
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index c3e549c29a..5f80f2fd4f 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -11,6 +11,7 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
@@ -26,6 +27,7 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/embedder-state.h"
#include "src/execution/isolate-utils-inl.h"
#include "src/execution/microtask-queue.h"
#include "src/execution/runtime-profiler.h"
@@ -49,6 +51,7 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
+#include "src/heap/heap-layout-tracer.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
@@ -66,6 +69,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h"
+#include "src/heap/parked-scope.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
@@ -193,9 +197,6 @@ bool Heap::GCCallbackTuple::operator==(
return other.callback == callback && other.data == data;
}
-Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
- const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default;
-
class ScavengeTaskObserver : public AllocationObserver {
public:
ScavengeTaskObserver(Heap* heap, intptr_t step_size)
@@ -496,6 +497,11 @@ void Heap::SetGCState(HeapState state) {
gc_state_.store(state, std::memory_order_relaxed);
}
+bool Heap::IsGCWithoutStack() const {
+ return local_embedder_heap_tracer()->embedder_stack_state() ==
+ cppgc::EmbedderStackState::kNoHeapPointers;
+}
+
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_,
@@ -932,11 +938,6 @@ void Heap::GarbageCollectionPrologue() {
} else {
maximum_size_scavenges_ = 0;
}
- if (FLAG_track_retaining_path) {
- retainer_.clear();
- ephemeron_retainer_.clear();
- retaining_root_.clear();
- }
memory_allocator()->unmapper()->PrepareForGC();
}
@@ -1331,12 +1332,19 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
collection_barrier_->ResumeThreadsAwaitingCollection();
}
-void Heap::GarbageCollectionEpilogue() {
+void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
AllowGarbageCollection for_the_rest_of_the_epilogue;
UpdateMaximumCommitted();
+ if (FLAG_track_retaining_path &&
+ collector == GarbageCollector::MARK_COMPACTOR) {
+ retainer_.clear();
+ ephemeron_retainer_.clear();
+ retaining_root_.clear();
+ }
+
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
@@ -1389,13 +1397,13 @@ void Heap::HandleGCRequest() {
} else if (CollectionRequested()) {
CheckCollectionRequested();
} else if (incremental_marking()->request_type() ==
- IncrementalMarking::COMPLETE_MARKING) {
+ IncrementalMarking::GCRequestType::COMPLETE_MARKING) {
incremental_marking()->reset_request_type();
CollectAllGarbage(current_gc_flags_,
GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
current_gc_callback_flags_);
} else if (incremental_marking()->request_type() ==
- IncrementalMarking::FINALIZATION &&
+ IncrementalMarking::GCRequestType::FINALIZATION &&
incremental_marking()->IsMarking() &&
!incremental_marking()->finalize_marking_completed()) {
incremental_marking()->reset_request_type();
@@ -1641,20 +1649,6 @@ void Heap::ReportExternalMemoryPressure() {
int64_t Heap::external_memory_limit() { return external_memory_.limit(); }
-void Heap::EnsureFillerObjectAtTop() {
- // There may be an allocation memento behind objects in new space. Upon
- // evacuation of a non-full new space (or if we are on the last page) there
- // may be uninitialized memory behind top. We fill the remainder of the page
- // with a filler.
- if (!new_space_) return;
- Address to_top = new_space_->top();
- Page* page = Page::FromAddress(to_top - kTaggedSize);
- if (page->Contains(to_top)) {
- int remaining_in_page = static_cast<int>(page->area_end() - to_top);
- CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
- }
-}
-
Heap::DevToolsTraceEventScope::DevToolsTraceEventScope(Heap* heap,
const char* event_name,
const char* event_type)
@@ -1714,7 +1708,11 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
#endif
- EnsureFillerObjectAtTop();
+ // There may be an allocation memento behind objects in new space. Upon
+ // evacuation of a non-full new space (or if we are on the last page) there
+ // may be uninitialized memory behind top. We fill the remainder of the page
+ // with a filler.
+ if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
if (IsYoungGenerationCollector(collector) &&
!incremental_marking()->IsStopped()) {
@@ -1765,9 +1763,22 @@ bool Heap::CollectGarbage(AllocationSpace space,
PROFILE(isolate_, CodeMovingGCEvent());
}
- GCType gc_type = collector == GarbageCollector::MARK_COMPACTOR
- ? kGCTypeMarkSweepCompact
- : kGCTypeScavenge;
+ GCType gc_type;
+
+ switch (collector) {
+ case GarbageCollector::MARK_COMPACTOR:
+ gc_type = kGCTypeMarkSweepCompact;
+ break;
+ case GarbageCollector::SCAVENGER:
+ gc_type = kGCTypeScavenge;
+ break;
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
+ gc_type = kGCTypeMinorMarkCompact;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
{
GCCallbacksScope scope(this);
// Temporary override any embedder stack state as callbacks may create
@@ -1827,7 +1838,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
}
- GarbageCollectionEpilogue();
+ GarbageCollectionEpilogue(collector);
if (collector == GarbageCollector::MARK_COMPACTOR &&
FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
@@ -1917,7 +1928,13 @@ void Heap::StartIncrementalMarking(int gc_flags,
CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
}
- SafepointScope safepoint(this);
+ base::Optional<SafepointScope> safepoint_scope;
+
+ {
+ AllowGarbageCollection allow_shared_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
+ safepoint_scope.emplace(this);
+ }
#ifdef DEBUG
VerifyCountersAfterSweeping();
@@ -2150,12 +2167,21 @@ size_t Heap::PerformGarbageCollection(
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
- SafepointScope safepoint_scope(this);
+ base::Optional<SafepointScope> safepoint_scope;
+
+ {
+ AllowGarbageCollection allow_shared_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
+ safepoint_scope.emplace(this);
+ }
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
+ // We don't really perform a GC here but need this scope for the nested
+ // SafepointScope inside Verify().
+ AllowGarbageCollection allow_gc;
Verify();
}
#endif
@@ -2226,6 +2252,9 @@ size_t Heap::PerformGarbageCollection(
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
+ // We don't really perform a GC here but need this scope for the nested
+ // SafepointScope inside Verify().
+ AllowGarbageCollection allow_gc;
Verify();
}
#endif
@@ -2251,37 +2280,27 @@ void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
void Heap::PerformSharedGarbageCollection(Isolate* initiator,
GarbageCollectionReason gc_reason) {
DCHECK(IsShared());
- base::MutexGuard guard(isolate()->client_isolate_mutex());
+
+ // Stop all client isolates attached to this isolate
+ GlobalSafepointScope global_safepoint(initiator);
+
+ // Migrate shared isolate to the main thread of the initiator isolate.
+ v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate()));
+ v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate()));
const char* collector_reason = nullptr;
GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
tracer()->Start(collector, gc_reason, collector_reason);
- isolate()->IterateClientIsolates([initiator](Isolate* client) {
- DCHECK_NOT_NULL(client->shared_isolate());
- Heap* client_heap = client->heap();
+ DCHECK_NOT_NULL(isolate()->global_safepoint());
- IsolateSafepoint::StopMainThread stop_main_thread =
- initiator == client ? IsolateSafepoint::StopMainThread::kNo
- : IsolateSafepoint::StopMainThread::kYes;
-
- client_heap->safepoint()->EnterSafepointScope(stop_main_thread);
- DCHECK(client_heap->deserialization_complete());
-
- client_heap->shared_old_allocator_->FreeLinearAllocationArea();
- client_heap->shared_map_allocator_->FreeLinearAllocationArea();
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ client->heap()->FreeSharedLinearAllocationAreas();
});
PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
- isolate()->IterateClientIsolates([initiator](Isolate* client) {
- IsolateSafepoint::StopMainThread stop_main_thread =
- initiator == client ? IsolateSafepoint::StopMainThread::kNo
- : IsolateSafepoint::StopMainThread::kYes;
- client->heap()->safepoint()->LeaveSafepointScope(stop_main_thread);
- });
-
tracer()->Stop(collector);
}
@@ -2657,14 +2676,14 @@ void Heap::ComputeFastPromotionMode() {
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
UnprotectMemoryOrigin origin) {
- if (unprotected_memory_chunks_registry_enabled_) {
- base::Optional<base::MutexGuard> guard;
- if (origin != UnprotectMemoryOrigin::kMainThread) {
- guard.emplace(&unprotected_memory_chunks_mutex_);
- }
+ if (!write_protect_code_memory()) return;
+ if (code_page_collection_memory_modification_scope_depth_ > 0) {
+ base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
if (unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetCodeModificationPermissions();
}
+ } else {
+ DCHECK_GT(code_space_memory_modification_scope_depth_, 0);
}
}
@@ -2678,10 +2697,10 @@ void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
}
void Heap::ProtectUnprotectedMemoryChunks() {
- DCHECK(unprotected_memory_chunks_registry_enabled_);
+ base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
for (auto chunk = unprotected_memory_chunks_.begin();
chunk != unprotected_memory_chunks_.end(); chunk++) {
- CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
+ DCHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
(*chunk)->SetDefaultCodePermissions();
}
unprotected_memory_chunks_.clear();
@@ -3019,13 +3038,12 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif
-#ifdef V8_HOST_ARCH_32_BIT
-STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
-#endif
+STATIC_ASSERT(!USE_ALLOCATION_ALIGNMENT_BOOL ||
+ (HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
switch (alignment) {
- case kWordAligned:
+ case kTaggedAligned:
return 0;
case kDoubleAligned:
case kDoubleUnaligned:
@@ -3320,6 +3338,10 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
target.address()));
} else if (target.IsNativeContext()) {
+ if (isolate_->current_embedder_state() != nullptr) {
+ isolate_->current_embedder_state()->OnMoveEvent(source.address(),
+ target.address());
+ }
PROFILE(isolate_,
NativeContextMoveEvent(source.address(), target.address()));
}
@@ -3412,7 +3434,14 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
if (FLAG_enable_slow_asserts) {
// Make sure the stack or other roots (e.g., Handles) don't contain pointers
// to the original FixedArray (which is now the filler object).
- SafepointScope scope(this);
+ base::Optional<SafepointScope> safepoint_scope;
+
+ {
+ AllowGarbageCollection allow_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
+ safepoint_scope.emplace(this);
+ }
+
LeftTrimmerVerifierRootVisitor root_visitor(object);
ReadOnlyRoots(this).Iterate(&root_visitor);
IterateRoots(&root_visitor, {});
@@ -3524,13 +3553,45 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
- MakeLocalHeapLabsIterable();
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MakeLinearAllocationAreaIterable();
+ });
+
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ space->MakeLinearAllocationAreaIterable();
+ }
+
+ if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
+}
+
+void Heap::FreeLinearAllocationAreas() {
+ safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
+
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ space->FreeLinearAllocationArea();
+ }
+
+ if (new_space()) new_space()->FreeLinearAllocationArea();
}
-void Heap::MakeLocalHeapLabsIterable() {
+void Heap::FreeSharedLinearAllocationAreas() {
+ if (!isolate()->shared_isolate()) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->MakeLinearAllocationAreaIterable();
+ local_heap->FreeSharedLinearAllocationArea();
});
+ FreeMainThreadSharedLinearAllocationAreas();
+}
+
+void Heap::FreeMainThreadSharedLinearAllocationAreas() {
+ if (!isolate()->shared_isolate()) return;
+ shared_old_allocator_->FreeLinearAllocationArea();
+ shared_map_allocator_->FreeLinearAllocationArea();
+ main_thread_local_heap()->FreeSharedLinearAllocationArea();
}
namespace {
@@ -3748,6 +3809,7 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
ThreadKind::kMain);
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint(this);
InvokeIncrementalMarkingPrologueCallbacks();
incremental_marking()->FinalizeIncrementally();
@@ -3796,7 +3858,7 @@ class SlotCollectingVisitor final : public ObjectVisitor {
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
code_slots_.push_back(slot);
#endif
}
@@ -3812,14 +3874,14 @@ class SlotCollectingVisitor final : public ObjectVisitor {
int number_of_slots() { return static_cast<int>(slots_.size()); }
MaybeObjectSlot slot(int i) { return slots_[i]; }
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
CodeObjectSlot code_slot(int i) { return code_slots_[i]; }
int number_of_code_slots() { return static_cast<int>(code_slots_.size()); }
#endif
private:
std::vector<MaybeObjectSlot> slots_;
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
std::vector<CodeObjectSlot> code_slots_;
#endif
};
@@ -3827,16 +3889,18 @@ class SlotCollectingVisitor final : public ObjectVisitor {
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
if (!FLAG_verify_heap) return;
+ PtrComprCageBase cage_base(isolate());
+
// Check that Heap::NotifyObjectLayoutChange was called for object transitions
// that are not safe for concurrent marking.
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
- if (object.IsJSObject()) {
+ if (object.IsJSObject(cage_base)) {
// Without double unboxing all in-object fields of a JSObject are tagged.
return;
}
- if (object.IsString() &&
+ if (object.IsString(cage_base) &&
(new_map == ReadOnlyRoots(this).thin_string_map() ||
new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
// When transitioning a string to ThinString,
@@ -3844,7 +3908,7 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// tagged fields are introduced.
return;
}
- if (FLAG_shared_string_table && object.IsString() &&
+ if (FLAG_shared_string_table && object.IsString(cage_base) &&
InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
// In-place internalization does not change a string's fields.
//
@@ -3855,19 +3919,19 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
}
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
- object.IterateFast(&old_visitor);
- MapWord old_map_word = object.map_word(kRelaxedLoad);
+ object.IterateFast(cage_base, &old_visitor);
+ MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
// Temporarily set the new map to iterate new slots.
object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
SlotCollectingVisitor new_visitor;
- object.IterateFast(&new_visitor);
+ object.IterateFast(cage_base, &new_visitor);
// Restore the old map.
object.set_map_word(old_map_word, kRelaxedStore);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
}
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
DCHECK_EQ(new_visitor.number_of_code_slots(),
old_visitor.number_of_code_slots());
for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
@@ -4172,6 +4236,7 @@ std::unique_ptr<v8::MeasureMemoryDelegate> Heap::MeasureMemoryDelegate(
void Heap::CollectCodeStatistics() {
TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint_scope(this);
MakeHeapIterable();
CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
@@ -4385,6 +4450,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope safepoint_scope(this);
HandleScope scope(isolate());
@@ -4582,8 +4648,9 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
// In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
chunk->mutex());
+ PtrComprCageBase cage_base(isolate());
Address start = object.address();
- Address end = start + object.Size();
+ Address end = start + object.Size(cage_base);
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address>> typed_old_to_new;
if (!InYoungGeneration(object)) {
@@ -4591,7 +4658,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
OldToNewSlotVerifyingVisitor visitor(isolate(), &old_to_new,
&typed_old_to_new,
&this->ephemeron_remembered_set_);
- object.IterateBody(&visitor);
+ object.IterateBody(cage_base, &visitor);
}
// TODO(v8:11797): Add old to old slot set verification once all weak objects
// have their own instance types and slots are recorded for all weak fields.
@@ -4600,8 +4667,6 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- MakeLocalHeapLabsIterable();
-
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
@@ -4685,7 +4750,13 @@ void Heap::IterateSmiRoots(RootVisitor* v) {
// the sweeper might actually free the underlying page).
class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
public:
- explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
+ explicit ClearStaleLeftTrimmedHandlesVisitor(Heap* heap)
+ : heap_(heap)
+#if V8_COMPRESS_POINTERS
+ ,
+ cage_base_(heap->isolate())
+#endif // V8_COMPRESS_POINTERS
+ {
USE(heap_);
}
@@ -4701,20 +4772,32 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
}
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
private:
inline void FixHandle(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
- if (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
- current.IsFreeSpaceOrFiller()) {
+ if (!current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller(cage_base())) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
- while (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
- current.IsFreeSpaceOrFiller()) {
+ while (
+ !current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() &&
+ current.IsFreeSpaceOrFiller(cage_base())) {
Address next = current.ptr();
- if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
+ if (current.map(cage_base()) ==
+ ReadOnlyRoots(heap_).one_pointer_filler_map()) {
next += kTaggedSize;
- } else if (current.map() ==
+ } else if (current.map(cage_base()) ==
ReadOnlyRoots(heap_).two_pointer_filler_map()) {
next += 2 * kTaggedSize;
} else {
@@ -4722,14 +4805,19 @@ class ClearStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
current = HeapObject::cast(Object(next));
}
- DCHECK(current.map_word(kRelaxedLoad).IsForwardingAddress() ||
- current.IsFixedArrayBase());
+ DCHECK(
+ current.map_word(cage_base(), kRelaxedLoad).IsForwardingAddress() ||
+ current.IsFixedArrayBase(cage_base()));
#endif // DEBUG
p.store(Smi::zero());
}
}
Heap* heap_;
+
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
};
void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
@@ -4873,9 +4961,12 @@ void Heap::IterateRootsIncludingClients(RootVisitor* v,
base::EnumSet<SkipRoot> options) {
IterateRoots(v, options);
- isolate()->IterateClientIsolates([v, options](Isolate* client) {
- client->heap()->IterateRoots(v, options);
- });
+ if (isolate()->is_shared()) {
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [v, options](Isolate* client) {
+ client->heap()->IterateRoots(v, options);
+ });
+ }
}
void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
@@ -4886,8 +4977,12 @@ void Heap::IterateBuiltins(RootVisitor* v) {
Builtins* builtins = isolate()->builtins();
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
- v->VisitRootPointer(Root::kBuiltins, Builtins::name(builtin),
- builtins->builtin_slot(builtin));
+ const char* name = Builtins::name(builtin);
+ v->VisitRootPointer(Root::kBuiltins, name, builtins->builtin_slot(builtin));
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ v->VisitRootPointer(Root::kBuiltins, name,
+ builtins->builtin_code_data_container_slot(builtin));
+ }
}
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
@@ -5420,7 +5515,7 @@ void Heap::DisableInlineAllocation() {
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
- CodeSpaceMemoryModificationScope modification_scope(this);
+ CodePageCollectionMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
base::MutexGuard guard(space->mutex());
@@ -5472,14 +5567,16 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
isolate()->counters()->gc_last_resort_from_handles()->Increment();
if (IsSharedAllocationType(allocation)) {
CollectSharedGarbage(GarbageCollectionReason::kLastResort);
+
+ AlwaysAllocateScope scope(isolate()->shared_isolate()->heap());
+ alloc = AllocateRaw(size, allocation, origin, alignment);
} else {
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
- }
- {
AlwaysAllocateScope scope(this);
alloc = AllocateRaw(size, allocation, origin, alignment);
}
+
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -5570,6 +5667,19 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
space_[i] = nullptr;
}
+
+ // Set up layout tracing callback.
+ if (V8_UNLIKELY(FLAG_trace_gc_heap_layout)) {
+ v8::GCType gc_type = kGCTypeMarkSweepCompact;
+ if (V8_UNLIKELY(!FLAG_trace_gc_heap_layout_ignore_minor_gc)) {
+ gc_type = static_cast<v8::GCType>(gc_type | kGCTypeScavenge |
+ kGCTypeMinorMarkCompact);
+ }
+ AddGCPrologueCallback(HeapLayoutTracer::GCProloguePrintHeapLayout, gc_type,
+ nullptr);
+ AddGCEpilogueCallback(HeapLayoutTracer::GCEpiloguePrintHeapLayout, gc_type,
+ nullptr);
+ }
}
void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
@@ -5610,16 +5720,17 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
Heap* heap_;
};
-void Heap::SetUpSpaces() {
+void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
+ LinearAllocationArea* old_allocation_info) {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
const bool has_young_gen = !FLAG_single_generation && !IsShared();
if (has_young_gen) {
- space_[NEW_SPACE] = new_space_ =
- new NewSpace(this, memory_allocator_->data_page_allocator(),
- initial_semispace_size_, max_semi_space_size_);
+ space_[NEW_SPACE] = new_space_ = new NewSpace(
+ this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
+ max_semi_space_size_, new_allocation_info);
}
- space_[OLD_SPACE] = old_space_ = new OldSpace(this);
+ space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
@@ -5818,11 +5929,13 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
CppHeap::From(cpp_heap)->AttachIsolate(isolate());
cpp_heap_ = cpp_heap;
+ local_embedder_heap_tracer()->SetCppHeap(CppHeap::From(cpp_heap));
}
void Heap::DetachCppHeap() {
CppHeap::From(cpp_heap_)->DetachIsolate();
cpp_heap_ = nullptr;
+ local_embedder_heap_tracer()->SetCppHeap(nullptr);
}
EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
@@ -5873,11 +5986,18 @@ void Heap::StartTearDown() {
// threads finish.
collection_barrier_->NotifyShutdownRequested();
+ // Main thread isn't going to allocate anymore.
+ main_thread_local_heap()->FreeLinearAllocationArea();
+
+ FreeMainThreadSharedLinearAllocationAreas();
+
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
// a good time to run heap verification (if requested), before starting to
// tear down parts of the Isolate.
if (FLAG_verify_heap) {
+ AllowGarbageCollection allow_gc;
+ IgnoreLocalGCRequests ignore_gc_requests(this);
SafepointScope scope(this);
Verify();
}
@@ -6118,7 +6238,9 @@ void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
if (map->is_in_retained_map_list()) {
return;
}
- Handle<WeakArrayList> array(context->retained_maps(), isolate());
+
+ Handle<WeakArrayList> array(WeakArrayList::cast(context->retained_maps()),
+ isolate());
if (array->IsFull()) {
CompactRetainedMaps(*array);
}
@@ -6352,7 +6474,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
: ObjectVisitorWithCageBases(filter->heap_), filter_(filter) {}
void VisitMapPointer(HeapObject object) override {
- MarkHeapObject(Map::unchecked_cast(object.map()));
+ MarkHeapObject(Map::unchecked_cast(object.map(cage_base())));
}
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
@@ -6392,7 +6514,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
while (!marking_stack_.empty()) {
HeapObject obj = marking_stack_.back();
marking_stack_.pop_back();
- obj.Iterate(this);
+ obj.Iterate(cage_base(), this);
}
}
@@ -6819,7 +6941,7 @@ std::vector<WeakArrayList> Heap::FindAllRetainedMaps() {
Object context = native_contexts_list();
while (!context.IsUndefined(isolate())) {
NativeContext native_context = NativeContext::cast(context);
- result.push_back(native_context.retained_maps());
+ result.push_back(WeakArrayList::cast(native_context.retained_maps()));
context = native_context.next_context_link();
}
return result;
@@ -6846,6 +6968,7 @@ void VerifyPointersVisitor::VisitCodePointer(HeapObject host,
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation.
if (maybe_code.GetHeapObject(&code)) {
VerifyCodeObjectImpl(code);
} else {
@@ -6900,7 +7023,7 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject host,
// to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
// this by moving that object to POINTER_VISITOR_ID_LIST.
DCHECK_EQ(ObjectFields::kMaybePointers,
- Map::ObjectFieldsFrom(host.map().visitor_id()));
+ Map::ObjectFieldsFrom(host.map(cage_base()).visitor_id()));
VerifyPointersImpl(start, end);
}
@@ -6975,7 +7098,7 @@ Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
PtrComprCageBase cage_base(isolate());
MapWord map_word = object.map_word(cage_base, kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
PtrComprCageBase code_cage_base(isolate()->code_cage_base());
#else
PtrComprCageBase code_cage_base = cage_base;
@@ -6995,7 +7118,8 @@ Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
bool Heap::GcSafeCodeContains(Code code, Address addr) {
Map map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == ReadOnlyRoots(this).code_map());
- Builtin maybe_builtin = InstructionStream::TryLookupCode(isolate(), addr);
+ Builtin maybe_builtin =
+ OffHeapInstructionStream::TryLookupCode(isolate(), addr);
if (Builtins::IsBuiltinId(maybe_builtin) &&
code.builtin_id() == maybe_builtin) {
return true;
@@ -7007,7 +7131,7 @@ bool Heap::GcSafeCodeContains(Code code, Address addr) {
Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
Builtin maybe_builtin =
- InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ OffHeapInstructionStream::TryLookupCode(isolate(), inner_pointer);
if (Builtins::IsBuiltinId(maybe_builtin)) {
return isolate()->builtins()->code(maybe_builtin);
}
@@ -7053,6 +7177,13 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
return code;
}
}
+ // TODO(1241665): Remove once the issue is solved.
+ isolate()->PushParamsAndDie(
+ reinterpret_cast<void*>(inner_pointer),
+ const_cast<uint8_t*>(isolate()->embedded_blob_code()),
+ const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
+ reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()));
+
UNREACHABLE();
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index dbe03936bf..ef8d912bfb 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -86,6 +86,7 @@ class HeapObjectsFilter;
class HeapStats;
class Isolate;
class JSFinalizationRegistry;
+class LinearAllocationArea;
class LocalEmbedderHeapTracer;
class LocalHeap;
class MarkingBarrier;
@@ -499,6 +500,20 @@ class Heap {
return "Unknown collector";
}
+ static inline const char* CollectorName(v8::GCType gc_type) {
+ switch (gc_type) {
+ case kGCTypeScavenge:
+ return "Scavenger";
+ case kGCTypeMarkSweepCompact:
+ return "Mark-Compact";
+ case kGCTypeMinorMarkCompact:
+ return "Minor Mark-Compact";
+ default:
+ break;
+ }
+ return "Unknown collector";
+ }
+
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
@@ -668,18 +683,6 @@ class Heap {
void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
- void EnableUnprotectedMemoryChunksRegistry() {
- unprotected_memory_chunks_registry_enabled_ = true;
- }
-
- void DisableUnprotectedMemoryChunksRegistry() {
- unprotected_memory_chunks_registry_enabled_ = false;
- }
-
- bool unprotected_memory_chunks_registry_enabled() {
- return unprotected_memory_chunks_registry_enabled_;
- }
-
void IncrementCodePageCollectionMemoryModificationScopeDepth() {
code_page_collection_memory_modification_scope_depth_++;
}
@@ -699,8 +702,14 @@ class Heap {
bool IsTearingDown() const { return gc_state() == TEAR_DOWN; }
bool force_oom() const { return force_oom_; }
+ bool ignore_local_gc_requests() const {
+ return ignore_local_gc_requests_depth_ > 0;
+ }
+
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+ bool IsGCWithoutStack() const;
+
// If an object has an AllocationMemento trailing it, return it, otherwise
// return a null AllocationMemento.
template <FindMementoMode mode>
@@ -848,7 +857,8 @@ class Heap {
void ReplaceReadOnlySpace(SharedReadOnlySpace* shared_ro_space);
// Sets up the heap memory without creating any objects.
- void SetUpSpaces();
+ void SetUpSpaces(LinearAllocationArea* new_allocation_info,
+ LinearAllocationArea* old_allocation_info);
// Prepares the heap, setting up for deserialization.
void InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap);
@@ -880,6 +890,7 @@ class Heap {
NewSpace* new_space() { return new_space_; }
OldSpace* old_space() { return old_space_; }
+ OldSpace* shared_old_space() { return shared_old_space_; }
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
OldLargeObjectSpace* lo_space() { return lo_space_; }
@@ -902,7 +913,6 @@ class Heap {
}
inline Isolate* isolate();
- inline const Isolate* isolate() const;
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_.get();
@@ -1068,7 +1078,7 @@ class Heap {
void IterateStackRoots(RootVisitor* v);
// ===========================================================================
- // Store buffer API. =========================================================
+ // Remembered set API. =======================================================
// ===========================================================================
// Used for query incremental marking status in generated code.
@@ -1078,10 +1088,6 @@ class Heap {
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
- V8_EXPORT_PRIVATE Address* store_buffer_top_address();
- static intptr_t store_buffer_mask_constant();
- static Address store_buffer_overflow_function_address();
-
void ClearRecordedSlot(HeapObject object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
static int InsertIntoRememberedSetFromCode(MemoryChunk* chunk, Address slot);
@@ -1666,6 +1672,10 @@ class Heap {
void UpdateEpochFull();
+ // Ensure that we have swept all spaces in such a way that we can iterate
+ // over all objects.
+ void MakeHeapIterable();
+
private:
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
@@ -1735,7 +1745,6 @@ class Heap {
: callback(callback), gc_type(gc_type), data(data) {}
bool operator==(const GCCallbackTuple& other) const;
- GCCallbackTuple& operator=(const GCCallbackTuple& other) V8_NOEXCEPT;
v8::Isolate::GCCallbackWithData callback;
GCType gc_type;
@@ -1790,17 +1799,14 @@ class Heap {
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
- // Make sure there is a filler value behind the top of the new space
- // so that the GC does not confuse some unintialized/stale memory
- // with the allocation memento of the object at the top
- void EnsureFillerObjectAtTop();
+ // Free all LABs in the heap.
+ void FreeLinearAllocationAreas();
- // Ensure that we have swept all spaces in such a way that we can iterate
- // over all objects. May cause a GC.
- void MakeHeapIterable();
+ // Free all shared LABs.
+ void FreeSharedLinearAllocationAreas();
- // Ensure that LABs of local heaps are iterable.
- void MakeLocalHeapLabsIterable();
+ // Free all shared LABs of main thread.
+ void FreeMainThreadSharedLinearAllocationAreas();
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
@@ -1943,7 +1949,7 @@ class Heap {
// reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue();
void GarbageCollectionPrologueInSafepoint();
- void GarbageCollectionEpilogue();
+ void GarbageCollectionEpilogue(GarbageCollector collector);
void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector);
// Performs a major collection in the whole heap.
@@ -2094,7 +2100,7 @@ class Heap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to allocate objects quickly (AllocationType::kYoung)
// otherwise it falls back to a slower path indicated by the mode.
@@ -2103,13 +2109,13 @@ class Heap {
V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -2118,7 +2124,7 @@ class Heap {
// returned.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -2128,7 +2134,7 @@ class Heap {
// If the allocation still fails after that a fatal error is thrown.
V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
@@ -2260,7 +2266,8 @@ class Heap {
uintptr_t code_space_memory_modification_scope_depth_ = 0;
// Holds the number of open CodePageCollectionMemoryModificationScopes.
- uintptr_t code_page_collection_memory_modification_scope_depth_ = 0;
+ std::atomic<uintptr_t> code_page_collection_memory_modification_scope_depth_{
+ 0};
std::atomic<HeapState> gc_state_{NOT_IN_GC};
@@ -2459,6 +2466,8 @@ class Heap {
std::unique_ptr<CollectionBarrier> collection_barrier_;
+ int ignore_local_gc_requests_depth_ = 0;
+
int gc_callbacks_depth_ = 0;
bool deserialization_complete_ = false;
@@ -2476,7 +2485,6 @@ class Heap {
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
- bool unprotected_memory_chunks_registry_enabled_ = false;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// If the --gc-interval flag is set to a positive value, this
@@ -2516,6 +2524,7 @@ class Heap {
friend class GCTracer;
friend class HeapObjectIterator;
friend class ScavengeTaskObserver;
+ friend class IgnoreLocalGCRequests;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
@@ -2674,6 +2683,15 @@ class V8_NODISCARD CodePageMemoryModificationScope {
DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
};
+class V8_NODISCARD IgnoreLocalGCRequests {
+ public:
+ explicit inline IgnoreLocalGCRequests(Heap* heap);
+ inline ~IgnoreLocalGCRequests();
+
+ private:
+ Heap* heap_;
+};
+
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
// point into the heap to a location that has a map pointer at its first word.
@@ -2773,8 +2791,6 @@ class V8_EXPORT_PRIVATE HeapObjectIterator {
private:
HeapObject NextObject();
- DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
-
Heap* heap_;
std::unique_ptr<SafepointScope> safepoint_scope_;
HeapObjectsFiltering filtering_;
@@ -2783,6 +2799,8 @@ class V8_EXPORT_PRIVATE HeapObjectIterator {
SpaceIterator* space_iterator_;
// Object iterator for the space currently being iterated.
std::unique_ptr<ObjectIterator> object_iterator_;
+
+ DISALLOW_GARBAGE_COLLECTION(no_heap_allocation_)
};
// Abstract base class for checking whether a weak object should be retained.
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 92489422d4..2dc1555929 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -41,6 +41,14 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
return false;
}
+void IncrementalMarking::MarkRootObject(Root root, HeapObject obj) {
+ if (heap_->incremental_marking()->WhiteToGreyAndPush(obj)) {
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainingRoot(root, obj);
+ }
+ }
+}
+
void IncrementalMarking::RestartIfNotMarking() {
if (state_ == COMPLETE) {
state_ = MARKING;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 55f2d6998c..a653877f40 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -47,13 +47,15 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
incremental_marking_->EnsureBlackAllocated(addr, size);
}
-IncrementalMarking::IncrementalMarking(Heap* heap,
- WeakObjects* weak_objects)
+IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
: heap_(heap),
collector_(heap->mark_compact_collector()),
weak_objects_(weak_objects),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
- old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
+ old_generation_observer_(this, kOldGenerationAllocatedThreshold),
+ marking_state_(heap->isolate()),
+ atomic_marking_state_(heap->isolate()),
+ non_atomic_marking_state_(heap->isolate()) {
SetState(STOPPED);
}
@@ -109,19 +111,19 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
DCHECK(!MapWord::IsPacked((*p).ptr()));
- MarkObjectByPointer(p);
+ MarkObjectByPointer(root, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
DCHECK(!MapWord::IsPacked((*p).ptr()));
- MarkObjectByPointer(p);
+ MarkObjectByPointer(root, p);
}
}
private:
- void MarkObjectByPointer(FullObjectSlot p) {
+ void MarkObjectByPointer(Root root, FullObjectSlot p) {
Object object = *p;
if (!object.IsHeapObject()) return;
DCHECK(!MapWord::IsPacked(object.ptr()));
@@ -129,7 +131,7 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
BasicMemoryChunk* target_page =
BasicMemoryChunk::FromHeapObject(heap_object);
if (target_page->InSharedHeap()) return;
- heap_->incremental_marking()->WhiteToGreyAndPush(heap_object);
+ heap_->incremental_marking()->MarkRootObject(root, heap_object);
}
Heap* heap_;
@@ -231,7 +233,8 @@ void IncrementalMarking::StartMarking() {
heap_->InvokeIncrementalMarkingPrologueCallbacks();
- is_compacting_ = !FLAG_never_compact && collector_->StartCompaction();
+ is_compacting_ = collector_->StartCompaction(
+ MarkCompactCollector::StartCompactionMode::kIncremental);
collector_->StartMarking();
SetState(MARKING);
@@ -435,6 +438,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
+ PtrComprCageBase cage_base(heap_->isolate());
collector_->marking_worklists()->Update(
[
#ifdef DEBUG
@@ -444,11 +448,11 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
#ifdef ENABLE_MINOR_MC
minor_marking_state,
#endif
- filler_map](HeapObject obj, HeapObject* out) -> bool {
+ cage_base, filler_map](HeapObject obj, HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
- MapWord map_word = obj.map_word(kRelaxedLoad);
+ MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
@@ -489,10 +493,10 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
return true;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj),
- obj.IsFreeSpaceOrFiller());
+ obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
- if (obj.map() != filler_map) {
+ if (obj.map(cage_base) != filler_map) {
*out = obj;
return true;
}
@@ -500,6 +504,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
}
});
+ collector_->local_weak_objects()->Publish();
weak_objects_->UpdateAfterScavenge();
}
@@ -638,7 +643,7 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) {
"[IncrementalMarking] requesting finalization of incremental "
"marking.\n");
}
- request_type_ = FINALIZATION;
+ request_type_ = GCRequestType::FINALIZATION;
if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
@@ -708,7 +713,7 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Complete (normal).\n");
}
- request_type_ = COMPLETE_MARKING;
+ request_type_ = GCRequestType::COMPLETE_MARKING;
if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 40fb9b7dac..5ea92e6bad 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -33,7 +33,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
- enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
+ enum class GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
using MarkingState = MarkCompactCollector::MarkingState;
using AtomicMarkingState = MarkCompactCollector::AtomicMarkingState;
@@ -81,11 +81,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
static constexpr size_t kEmbedderActivationThreshold = 0;
#endif
-#ifdef V8_ATOMIC_MARKING_STATE
static const AccessMode kAtomicity = AccessMode::ATOMIC;
-#else
- static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
-#endif
IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
@@ -123,17 +119,18 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
inline bool IsComplete() const { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
- return request_type_ == FINALIZATION && !finalize_marking_completed_;
+ return request_type_ == GCRequestType::FINALIZATION &&
+ !finalize_marking_completed_;
}
inline bool NeedsFinalization() {
- return IsMarking() &&
- (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
+ return IsMarking() && (request_type_ == GCRequestType::FINALIZATION ||
+ request_type_ == GCRequestType::COMPLETE_MARKING);
}
GCRequestType request_type() const { return request_type_; }
- void reset_request_type() { request_type_ = NONE; }
+ void reset_request_type() { request_type_ = GCRequestType::NONE; }
bool CanBeActivated();
@@ -181,6 +178,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// from white to grey.
V8_INLINE bool WhiteToGreyAndPush(HeapObject obj);
+ // Marks object referenced from roots.
+ V8_INLINE void MarkRootObject(Root root, HeapObject obj);
+
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
// the concurrent marker.
@@ -310,7 +310,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool finalize_marking_completed_ = false;
IncrementalMarkingJob incremental_marking_job_;
- std::atomic<GCRequestType> request_type_{NONE};
+ std::atomic<GCRequestType> request_type_{GCRequestType::NONE};
Observer new_generation_observer_;
Observer old_generation_observer_;
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 546667b2b2..7d28b750e2 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -9,9 +9,7 @@
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
-#include "src/objects/objects-body-descriptors-inl.h"
-#include "src/objects/objects-body-descriptors.h"
-#include "src/objects/objects.h"
+#include "src/objects/objects-inl.h"
#include "src/utils/allocation.h"
namespace v8 {
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 63dc4b4e12..7d79c5cdd4 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -186,6 +186,9 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
+ if (identity() == CODE_LO_SPACE) {
+ heap()->isolate()->AddCodeMemoryChunk(page);
+ }
return object;
}
@@ -264,7 +267,8 @@ void OldLargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
- size_t object_size = static_cast<size_t>(page->GetObject().Size());
+ PtrComprCageBase cage_base(heap()->isolate());
+ size_t object_size = static_cast<size_t>(page->GetObject().Size(cage_base));
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE);
AddPage(page, object_size);
@@ -297,11 +301,12 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
// Right-trimming does not update the objects_size_ counter. We are lazily
// updating it after every GC.
size_t surviving_object_size = 0;
+ PtrComprCageBase cage_base(heap()->isolate());
while (current) {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
- size_t size = static_cast<size_t>(object.Size());
+ size_t size = static_cast<size_t>(object.Size(cage_base));
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
@@ -313,7 +318,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
- current->area_start() + object.Size());
+ current->area_start() + object.Size(cage_base));
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
@@ -403,7 +408,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// Byte arrays and strings don't have interior pointers.
if (object.IsAbstractCode(cage_base)) {
VerifyPointersVisitor code_visitor(heap());
- object.IterateBody(map, object.Size(), &code_visitor);
+ object.IterateBody(map, object.Size(cage_base), &code_visitor);
} else if (object.IsFixedArray(cage_base)) {
FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array.length(); j++) {
@@ -517,11 +522,12 @@ void NewLargeObjectSpace::FreeDeadObjects(
bool is_marking = heap()->incremental_marking()->IsMarking();
size_t surviving_object_size = 0;
bool freed_pages = false;
+ PtrComprCageBase cage_base(heap()->isolate());
for (auto it = begin(); it != end();) {
LargePage* page = *it;
it++;
HeapObject object = page->GetObject();
- size_t size = static_cast<size_t>(object.Size());
+ size_t size = static_cast<size_t>(object.Size(cage_base));
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
diff --git a/deps/v8/src/heap/linear-allocation-area.h b/deps/v8/src/heap/linear-allocation-area.h
index a03285c046..93d0c16619 100644
--- a/deps/v8/src/heap/linear-allocation-area.h
+++ b/deps/v8/src/heap/linear-allocation-area.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_LINEAR_ALLOCATION_AREA_H_
#define V8_HEAP_LINEAR_ALLOCATION_AREA_H_
+// This header file is included outside of src/heap/.
+// Avoid including src/heap/ internals.
#include "include/v8-internal.h"
#include "src/common/checks.h"
@@ -100,6 +102,8 @@ class LinearAllocationArea final {
#endif // DEBUG
}
+ static constexpr int kSize = 3 * kSystemPointerSize;
+
private:
// The start of the LAB. Initially coincides with `top_`. As top is moved
// ahead, the area [start_, top_[ denotes a range of new objects. This range
@@ -111,6 +115,10 @@ class LinearAllocationArea final {
Address limit_ = kNullAddress;
};
+static_assert(sizeof(LinearAllocationArea) == LinearAllocationArea::kSize,
+ "LinearAllocationArea's size must be small because it "
+ "is included in IsolateData.");
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
index d28d1a6464..3d769906a6 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -85,7 +85,7 @@ AllocationResult EvacuationAllocator::AllocateInLAB(
bool EvacuationAllocator::NewLocalAllocationBuffer() {
if (lab_allocation_will_fail_) return false;
AllocationResult result =
- new_space_->AllocateRawSynchronized(kLabSize, kWordAligned);
+ new_space_->AllocateRawSynchronized(kLabSize, kTaggedAligned);
if (result.IsRetry()) {
lab_allocation_will_fail_ = true;
return false;
diff --git a/deps/v8/src/heap/local-factory.cc b/deps/v8/src/heap/local-factory.cc
index a581cfee60..d8c2ce898a 100644
--- a/deps/v8/src/heap/local-factory.cc
+++ b/deps/v8/src/heap/local-factory.cc
@@ -40,7 +40,8 @@ void LocalFactory::AddToScriptList(Handle<Script> shared) {
HeapObject LocalFactory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
- DCHECK_EQ(allocation, AllocationType::kOld);
+ DCHECK(allocation == AllocationType::kOld ||
+ allocation == AllocationType::kSharedOld);
return HeapObject::FromAddress(isolate()->heap()->AllocateRawOrFail(
size, allocation, AllocationOrigin::kRuntime, alignment));
}
diff --git a/deps/v8/src/heap/local-factory.h b/deps/v8/src/heap/local-factory.h
index 4423e7ff45..8737e3bfa1 100644
--- a/deps/v8/src/heap/local-factory.h
+++ b/deps/v8/src/heap/local-factory.h
@@ -57,7 +57,7 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// ------
// Customization points for FactoryBase.
HeapObject AllocateRaw(int size, AllocationType allocation,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
LocalIsolate* isolate() {
// Downcast to the privately inherited sub-class using c-style casts to
@@ -66,19 +66,29 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// NOLINTNEXTLINE (google-readability-casting)
return (LocalIsolate*)this; // NOLINT(readability/casting)
}
+
+ // This is the real Isolate that will be used for allocating and accessing
+ // external pointer entries when V8_HEAP_SANDBOX is enabled.
+ Isolate* isolate_for_heap_sandbox() {
+#ifdef V8_HEAP_SANDBOX
+ return isolate_for_heap_sandbox_;
+#else
+ return nullptr;
+#endif // V8_HEAP_SANDBOX
+ }
+
inline bool CanAllocateInReadOnlySpace() { return false; }
inline bool EmptyStringRootIsInitialized() { return true; }
inline AllocationType AllocationTypeForInPlaceInternalizableString();
// ------
void AddToScriptList(Handle<Script> shared);
-
- void SetExternalCodeSpaceInDataContainer(CodeDataContainer data_container) {
- UNREACHABLE();
- }
// ------
ReadOnlyRoots roots_;
+#ifdef V8_HEAP_SANDBOX
+ Isolate* isolate_for_heap_sandbox_;
+#endif
#ifdef DEBUG
bool a_script_was_added_to_the_script_list_ = false;
#endif
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index a6dc45a161..030e5b1932 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -25,7 +25,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
- alignment == AllocationAlignment::kWordAligned);
+ alignment == AllocationAlignment::kTaggedAligned);
Heap::HeapState state = heap()->gc_state();
DCHECK(state == Heap::TEAR_DOWN || state == Heap::NOT_IN_GC);
DCHECK(IsRunning());
@@ -47,16 +47,24 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
}
HeapObject object;
if (alloc.To(&object) && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ heap()->UnprotectAndRegisterMemoryChunk(
+ object, UnprotectMemoryOrigin::kMaybeOffMainThread);
heap()->ZapCodeObject(object.address(), size_in_bytes);
}
return alloc;
}
- CHECK_EQ(type, AllocationType::kOld);
- if (large_object)
- return heap()->lo_space()->AllocateRawBackground(this, size_in_bytes);
- else
- return old_space_allocator()->AllocateRaw(size_in_bytes, alignment, origin);
+ if (type == AllocationType::kOld) {
+ if (large_object)
+ return heap()->lo_space()->AllocateRawBackground(this, size_in_bytes);
+ else
+ return old_space_allocator()->AllocateRaw(size_in_bytes, alignment,
+ origin);
+ }
+
+ DCHECK_EQ(type, AllocationType::kSharedOld);
+ return shared_old_space_allocator()->AllocateRaw(size_in_bytes, alignment,
+ origin);
}
Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index e5edc993c9..0485158799 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "src/base/logging.h"
+#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
@@ -17,6 +18,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/parked-scope.h"
@@ -53,7 +55,9 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
next_(nullptr),
handles_(new LocalHandles),
persistent_handles_(std::move(persistent_handles)) {
+ DCHECK_IMPLIES(!is_main_thread(), heap_->deserialization_complete());
if (!is_main_thread()) SetUp();
+
heap_->safepoint()->AddLocalHeap(this, [this] {
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
@@ -108,6 +112,12 @@ void LocalHeap::SetUp() {
code_space_allocator_ =
std::make_unique<ConcurrentAllocator>(this, heap_->code_space());
+ DCHECK_NULL(shared_old_space_allocator_);
+ if (heap_->isolate()->shared_isolate()) {
+ shared_old_space_allocator_ =
+ std::make_unique<ConcurrentAllocator>(this, heap_->shared_old_space());
+ }
+
DCHECK_NULL(marking_barrier_);
marking_barrier_ = std::make_unique<MarkingBarrier>(this);
}
@@ -173,13 +183,42 @@ void LocalHeap::ParkSlowPath() {
DCHECK(current_state.IsRunning());
if (is_main_thread()) {
- DCHECK(current_state.IsCollectionRequested());
- heap_->CollectGarbageForBackground(this);
+ DCHECK(current_state.IsSafepointRequested() ||
+ current_state.IsCollectionRequested());
+
+ if (current_state.IsSafepointRequested()) {
+ ThreadState old_state = state_.SetParked();
+ heap_->safepoint()->NotifyPark();
+ if (old_state.IsCollectionRequested())
+ heap_->collection_barrier_->CancelCollectionAndResumeThreads();
+ return;
+ }
+
+ if (current_state.IsCollectionRequested()) {
+ if (!heap()->ignore_local_gc_requests()) {
+ heap_->CollectGarbageForBackground(this);
+ continue;
+ }
+
+ DCHECK(!current_state.IsSafepointRequested());
+
+ if (state_.CompareExchangeStrong(current_state,
+ current_state.SetParked())) {
+ heap_->collection_barrier_->CancelCollectionAndResumeThreads();
+ return;
+ } else {
+ continue;
+ }
+ }
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
- CHECK(state_.CompareExchangeStrong(current_state,
- current_state.SetParked()));
+
+ ThreadState old_state = state_.SetParked();
+ CHECK(old_state.IsRunning());
+ CHECK(old_state.IsSafepointRequested());
+ CHECK(!old_state.IsCollectionRequested());
+
heap_->safepoint()->NotifyPark();
return;
}
@@ -196,52 +235,105 @@ void LocalHeap::UnparkSlowPath() {
DCHECK(current_state.IsParked());
if (is_main_thread()) {
- DCHECK(current_state.IsCollectionRequested());
- CHECK(state_.CompareExchangeStrong(current_state,
- current_state.SetRunning()));
- heap_->CollectGarbageForBackground(this);
- return;
+ DCHECK(current_state.IsSafepointRequested() ||
+ current_state.IsCollectionRequested());
+
+ if (current_state.IsSafepointRequested()) {
+ SleepInUnpark();
+ continue;
+ }
+
+ if (current_state.IsCollectionRequested()) {
+ DCHECK(!current_state.IsSafepointRequested());
+
+ if (!state_.CompareExchangeStrong(current_state,
+ current_state.SetRunning()))
+ continue;
+
+ if (!heap()->ignore_local_gc_requests()) {
+ heap_->CollectGarbageForBackground(this);
+ }
+
+ return;
+ }
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
- TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_UNPARK,
- ThreadKind::kBackground);
- heap_->safepoint()->WaitInUnpark();
+
+ SleepInUnpark();
}
}
}
+void LocalHeap::SleepInUnpark() {
+ GCTracer::Scope::ScopeId scope_id;
+ ThreadKind thread_kind;
+
+ if (is_main_thread()) {
+ scope_id = GCTracer::Scope::UNPARK;
+ thread_kind = ThreadKind::kMain;
+ } else {
+ scope_id = GCTracer::Scope::BACKGROUND_UNPARK;
+ thread_kind = ThreadKind::kBackground;
+ }
+
+ TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
+ heap_->safepoint()->WaitInUnpark();
+}
+
void LocalHeap::EnsureParkedBeforeDestruction() {
DCHECK_IMPLIES(!is_main_thread(), IsParked());
}
void LocalHeap::SafepointSlowPath() {
-#ifdef DEBUG
ThreadState current_state = state_.load_relaxed();
DCHECK(current_state.IsRunning());
-#endif
if (is_main_thread()) {
- DCHECK(current_state.IsCollectionRequested());
- heap_->CollectGarbageForBackground(this);
+ DCHECK(current_state.IsSafepointRequested() ||
+ current_state.IsCollectionRequested());
+
+ if (current_state.IsSafepointRequested()) {
+ SleepInSafepoint();
+ }
+
+ if (current_state.IsCollectionRequested()) {
+ heap_->CollectGarbageForBackground(this);
+ }
} else {
DCHECK(current_state.IsSafepointRequested());
DCHECK(!current_state.IsCollectionRequested());
- TRACE_GC1(heap_->tracer(), GCTracer::Scope::BACKGROUND_SAFEPOINT,
- ThreadKind::kBackground);
-
- // Parking the running thread here is an optimization. We do not need to
- // wake this thread up to reach the next safepoint.
- ThreadState old_state = state_.SetParked();
- CHECK(old_state.IsRunning());
- CHECK(old_state.IsSafepointRequested());
- CHECK(!old_state.IsCollectionRequested());
+ SleepInSafepoint();
+ }
+}
- heap_->safepoint()->WaitInSafepoint();
+void LocalHeap::SleepInSafepoint() {
+ GCTracer::Scope::ScopeId scope_id;
+ ThreadKind thread_kind;
- Unpark();
+ if (is_main_thread()) {
+ scope_id = GCTracer::Scope::SAFEPOINT;
+ thread_kind = ThreadKind::kMain;
+ } else {
+ scope_id = GCTracer::Scope::BACKGROUND_SAFEPOINT;
+ thread_kind = ThreadKind::kBackground;
}
+
+ TRACE_GC1(heap_->tracer(), scope_id, thread_kind);
+
+ // Parking the running thread here is an optimization. We do not need to
+ // wake this thread up to reach the next safepoint.
+ ThreadState old_state = state_.SetParked();
+ CHECK(old_state.IsRunning());
+ CHECK(old_state.IsSafepointRequested());
+ CHECK_IMPLIES(old_state.IsCollectionRequested(), is_main_thread());
+
+ heap_->safepoint()->WaitInSafepoint();
+
+ base::Optional<IgnoreLocalGCRequests> ignore_gc_requests;
+ if (is_main_thread()) ignore_gc_requests.emplace(heap());
+ Unpark();
}
void LocalHeap::FreeLinearAllocationArea() {
@@ -249,6 +341,10 @@ void LocalHeap::FreeLinearAllocationArea() {
code_space_allocator_->FreeLinearAllocationArea();
}
+void LocalHeap::FreeSharedLinearAllocationArea() {
+ shared_old_space_allocator_->FreeLinearAllocationArea();
+}
+
void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_->MakeLinearAllocationAreaIterable();
code_space_allocator_->MakeLinearAllocationAreaIterable();
@@ -270,7 +366,7 @@ bool LocalHeap::TryPerformCollection() {
return true;
} else {
DCHECK(IsRunning());
- heap_->collection_barrier_->RequestGC();
+ if (!heap_->collection_barrier_->TryRequestGC()) return false;
LocalHeap* main_thread = heap_->main_thread_local_heap();
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index 8ea5a6f336..0b5e96ac1a 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -8,6 +8,7 @@
#include <atomic>
#include <memory>
+#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
@@ -99,6 +100,9 @@ class V8_EXPORT_PRIVATE LocalHeap {
ConcurrentAllocator* code_space_allocator() {
return code_space_allocator_.get();
}
+ ConcurrentAllocator* shared_old_space_allocator() {
+ return shared_old_space_allocator_.get();
+ }
void RegisterCodeObject(Handle<Code> code) {
heap()->RegisterCodeObject(code);
@@ -111,6 +115,9 @@ class V8_EXPORT_PRIVATE LocalHeap {
// Give up linear allocation areas. Used for mark-compact GC.
void FreeLinearAllocationArea();
+ // Free all shared LABs. Used by the shared mark-compact GC.
+ void FreeSharedLinearAllocationArea();
+
// Create filler object in linear allocation areas. Verifying requires
// iterable heap.
void MakeLinearAllocationAreaIterable();
@@ -130,14 +137,14 @@ class V8_EXPORT_PRIVATE LocalHeap {
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
// Allocates an uninitialized object and crashes when object
// cannot be allocated.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
int size_in_bytes, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kWordAligned);
+ AllocationAlignment alignment = kTaggedAligned);
inline void CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode);
@@ -278,6 +285,8 @@ class V8_EXPORT_PRIVATE LocalHeap {
void UnparkSlowPath();
void EnsureParkedBeforeDestruction();
void SafepointSlowPath();
+ void SleepInSafepoint();
+ void SleepInUnpark();
void EnsurePersistentHandles();
@@ -305,13 +314,16 @@ class V8_EXPORT_PRIVATE LocalHeap {
std::unique_ptr<ConcurrentAllocator> old_space_allocator_;
std::unique_ptr<ConcurrentAllocator> code_space_allocator_;
+ std::unique_ptr<ConcurrentAllocator> shared_old_space_allocator_;
friend class CollectionBarrier;
friend class ConcurrentAllocator;
+ friend class GlobalSafepoint;
friend class IsolateSafepoint;
friend class Heap;
friend class Isolate;
friend class ParkedScope;
+ friend class SafepointScope;
friend class UnparkedScope;
};
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index a623360197..e945c34cef 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -45,7 +45,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InYoungGeneration(obj) &&
non_atomic_marking_state_.WhiteToGrey(obj)) {
- worklist_->Push(kMainThreadTask, obj);
+ main_thread_worklist_local_.Push(obj);
}
}
@@ -89,7 +89,7 @@ void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
}
void MarkCompactCollector::AddTransitionArray(TransitionArray array) {
- weak_objects_.transition_arrays.Push(kMainThreadTask, array);
+ local_weak_objects()->transition_arrays_local.Push(array);
}
template <typename MarkingState>
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 640b127d19..2977b4219d 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -6,6 +6,7 @@
#include <unordered_map>
+#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/compilation-cache.h"
@@ -20,6 +21,7 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -39,7 +41,7 @@
#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
-#include "src/heap/worklist.h"
+#include "src/heap/weak-object-worklists.h"
#include "src/ic/stub-cache.h"
#include "src/init/v8.h"
#include "src/logging/tracing-flags.h"
@@ -118,7 +120,9 @@ class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
VerifyRootPointers(start, end);
}
- void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+ void VisitMapPointer(HeapObject object) override {
+ VerifyMap(object.map(cage_base()));
+ }
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
@@ -147,7 +151,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
if (current >= end) break;
CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
- object.Iterate(this);
+ object.Iterate(cage_base(), this);
next_object_must_be_here_or_later = current + size;
// The object is either part of a black area of black allocation or a
// regular black object
@@ -189,7 +193,7 @@ void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
- obj.Iterate(this);
+ obj.Iterate(cage_base(), this);
}
}
}
@@ -240,6 +244,7 @@ class FullMarkingVerifier : public MarkingVerifier {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
@@ -314,7 +319,9 @@ class EvacuationVerifier : public ObjectVisitorWithCageBases,
VerifyRootPointers(start, end);
}
- void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+ void VisitMapPointer(HeapObject object) override {
+ VerifyMap(object.map(cage_base()));
+ }
protected:
explicit EvacuationVerifier(Heap* heap)
@@ -345,8 +352,10 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
HeapObject object = HeapObject::FromAddress(current);
- if (!object.IsFreeSpaceOrFiller(cage_base())) object.Iterate(this);
- current += object.Size();
+ if (!object.IsFreeSpaceOrFiller(cage_base())) {
+ object.Iterate(cage_base(), this);
+ }
+ current += object.Size(cage_base());
}
}
@@ -419,6 +428,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Object maybe_code = slot.load(code_cage_base());
HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
if (maybe_code.GetHeapObject(&code)) {
VerifyHeapObjectImpl(code);
}
@@ -466,16 +476,12 @@ int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
- page_parallel_job_semaphore_(0),
#ifdef DEBUG
state_(IDLE),
#endif
is_shared_heap_(heap->IsShared()),
- was_marked_incrementally_(false),
- evacuation_(false),
- compacting_(false),
- black_allocation_(false),
- have_code_to_deoptimize_(false),
+ marking_state_(heap->isolate()),
+ non_atomic_marking_state_(heap->isolate()),
sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
}
@@ -490,12 +496,13 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() {
AbortCompaction();
- AbortWeakObjects();
if (heap()->incremental_marking()->IsMarking()) {
local_marking_worklists()->Publish();
heap()->marking_barrier()->Publish();
// Marking barriers of LocalHeaps will be published in their destructors.
marking_worklists()->Clear();
+ local_weak_objects()->Publish();
+ weak_objects()->Clear();
}
sweeper()->TearDown();
}
@@ -522,28 +529,32 @@ static void TraceFragmentation(PagedSpace* space) {
static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
-bool MarkCompactCollector::StartCompaction() {
- if (!compacting_) {
- DCHECK(evacuation_candidates_.empty());
-
- if (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())
- return false;
+bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
+ DCHECK(!compacting_);
+ DCHECK(evacuation_candidates_.empty());
- CollectEvacuationCandidates(heap()->old_space());
+ // Bailouts for completely disabled compaction.
+ if (!FLAG_compact ||
+ (mode == StartCompactionMode::kAtomic && !heap()->IsGCWithoutStack() &&
+ !FLAG_compact_with_stack) ||
+ (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())) {
+ return false;
+ }
- if (FLAG_compact_code_space) {
- CollectEvacuationCandidates(heap()->code_space());
- } else if (FLAG_trace_fragmentation) {
- TraceFragmentation(heap()->code_space());
- }
+ CollectEvacuationCandidates(heap()->old_space());
- if (FLAG_trace_fragmentation) {
- TraceFragmentation(heap()->map_space());
- }
+ if (FLAG_compact_code_space &&
+ (heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
+ CollectEvacuationCandidates(heap()->code_space());
+ } else if (FLAG_trace_fragmentation) {
+ TraceFragmentation(heap()->code_space());
+ }
- compacting_ = !evacuation_candidates_.empty();
+ if (FLAG_trace_fragmentation) {
+ TraceFragmentation(heap()->map_space());
}
+ compacting_ = !evacuation_candidates_.empty();
return compacting_;
}
@@ -561,9 +572,11 @@ void MarkCompactCollector::StartMarking() {
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
+ local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
marking_visitor_ = std::make_unique<MarkingVisitor>(
- marking_state(), local_marking_worklists(), weak_objects(), heap_,
- epoch(), code_flush_mode(), heap_->local_embedder_heap_tracer()->InUse(),
+ marking_state(), local_marking_worklists(), local_weak_objects_.get(),
+ heap_, epoch(), code_flush_mode(),
+ heap_->local_embedder_heap_tracer()->InUse(),
heap_->ShouldCurrentGCKeepAgesUnchanged());
// Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP
@@ -731,7 +744,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
const bool in_standard_path =
!(FLAG_manual_evacuation_candidates_selection ||
FLAG_stress_compaction_random || FLAG_stress_compaction ||
- FLAG_always_compact);
+ FLAG_compact_on_every_full_gc);
// Those variables will only be initialized if |in_standard_path|, and are not
// used otherwise.
size_t max_evacuated_bytes;
@@ -843,7 +856,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
for (size_t i = 0; i < pages.size(); i++) {
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
- if (FLAG_always_compact ||
+ if (FLAG_compact_on_every_full_gc ||
((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
candidate_count++;
total_live_bytes += live_bytes;
@@ -866,7 +879,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
- if ((estimated_released_pages == 0) && !FLAG_always_compact) {
+ if ((estimated_released_pages == 0) && !FLAG_compact_on_every_full_gc) {
candidate_count = 0;
}
for (int i = 0; i < candidate_count; i++) {
@@ -906,7 +919,6 @@ void MarkCompactCollector::Prepare() {
state_ = PREPARE_GC;
#endif
- DCHECK(!FLAG_never_compact || !FLAG_always_compact);
DCHECK(!sweeping_in_progress());
if (!was_marked_incrementally_) {
@@ -915,22 +927,18 @@ void MarkCompactCollector::Prepare() {
heap_->local_embedder_heap_tracer()->TracePrologue(
heap_->flags_for_embedder_tracer());
}
- if (!FLAG_never_compact) {
- StartCompaction();
- }
+ StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
}
+ heap_->FreeLinearAllocationAreas();
+
PagedSpaceIterator spaces(heap());
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
- // Fill and reset all background thread LABs
- heap_->safepoint()->IterateLocalHeaps(
- [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
-
// All objects are guaranteed to be initialized in atomic pause
if (heap()->new_lo_space()) {
heap()->new_lo_space()->ResetPendingObject();
@@ -987,6 +995,8 @@ void MarkCompactCollector::Finish() {
CHECK(weak_objects_.current_ephemerons.IsEmpty());
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ local_weak_objects_->next_ephemerons_local.Publish();
+ local_weak_objects_.reset();
weak_objects_.next_ephemerons.Clear();
sweeper()->StartSweeperTasks();
@@ -1040,6 +1050,36 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
}
}
+ void VisitRunningCode(FullObjectSlot p) final {
+ Code code = Code::cast(*p);
+
+ // If Code is currently executing, then we must not remove its
+ // deoptimization literals, which it might need in order to successfully
+ // deoptimize.
+ //
+ // Must match behavior in RootsReferencesExtractor::VisitRunningCode, so
+ // that heap snapshots accurately describe the roots.
+ if (code.kind() != CodeKind::BASELINE) {
+ DeoptimizationData deopt_data =
+ DeoptimizationData::cast(code.deoptimization_data());
+ if (deopt_data.length() > 0) {
+ DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
+ int literals_length = literals.length();
+ for (int i = 0; i < literals_length; ++i) {
+ MaybeObject maybe_literal = literals.Get(i);
+ HeapObject heap_literal;
+ if (maybe_literal.GetHeapObject(&heap_literal)) {
+ MarkObjectByPointer(Root::kStackRoots,
+ FullObjectSlot(&heap_literal));
+ }
+ }
+ }
+ }
+
+ // And then mark the Code itself.
+ VisitRootPointer(Root::kStackRoots, nullptr, p);
+ }
+
private:
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
Object object = *p;
@@ -1090,9 +1130,7 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- // At the moment, custom roots cannot contain CodeDataContainers - the only
- // objects that can contain Code pointers.
- UNREACHABLE();
+ MarkObject(host, slot.load(code_cage_base()));
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
@@ -1101,11 +1139,11 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
UNREACHABLE();
}
- // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
MarkObject(host, target);
}
+
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
MarkObject(host, rinfo->target_object(cage_base()));
}
@@ -1119,6 +1157,70 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkCompactCollector* const collector_;
};
+class MarkCompactCollector::SharedHeapObjectVisitor final
+ : public ObjectVisitorWithCageBases {
+ public:
+ explicit SharedHeapObjectVisitor(MarkCompactCollector* collector)
+ : ObjectVisitorWithCageBases(collector->isolate()),
+ collector_(collector) {}
+
+ void VisitPointer(HeapObject host, ObjectSlot p) final {
+ MarkObject(host, p.load(cage_base()));
+ }
+
+ void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ MaybeObject object = p.load(cage_base());
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) MarkObject(host, heap_object);
+ }
+
+ void VisitMapPointer(HeapObject host) final {
+ MarkObject(host, host.map(cage_base()));
+ }
+
+ void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
+ // The map slot should be handled in VisitMapPointer.
+ DCHECK_NE(host.map_slot(), p);
+ DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
+ MarkObject(host, p.load(cage_base()));
+ }
+ }
+
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ MarkObject(host, slot.load(code_cage_base()));
+ }
+
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
+ // The map slot should be handled in VisitMapPointer.
+ DCHECK_NE(host.map_slot(), ObjectSlot(p));
+ VisitPointer(host, p);
+ }
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ MarkObject(host, target);
+ }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ MarkObject(host, rinfo->target_object(cage_base()));
+ }
+
+ private:
+ V8_INLINE void MarkObject(HeapObject host, Object object) {
+ DCHECK(!BasicMemoryChunk::FromHeapObject(host)->InSharedHeap());
+ if (!object.IsHeapObject()) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (!BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap()) return;
+ collector_->MarkObject(host, heap_object);
+ }
+
+ MarkCompactCollector* const collector_;
+};
+
class InternalizedStringTableCleaner : public RootVisitor {
public:
explicit InternalizedStringTableCleaner(Heap* heap)
@@ -1389,6 +1491,14 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
protected:
enum MigrationMode { kFast, kObserved };
+ PtrComprCageBase cage_base() {
+#if V8_COMPRESS_POINTERS
+ return PtrComprCageBase{heap_->isolate()};
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size,
AllocationSpace dest);
@@ -1398,7 +1508,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst.address();
Address src_addr = src.address();
- DCHECK(base->heap_->AllowedToBeMigrated(src.map(), src, dest));
+ PtrComprCageBase cage_base = base->cage_base();
+ DCHECK(base->heap_->AllowedToBeMigrated(src.map(cage_base), src, dest));
DCHECK_NE(dest, LO_SPACE);
DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
@@ -1407,7 +1518,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
+ dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
@@ -1418,7 +1529,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
code.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
+ dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1447,7 +1558,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (FLAG_stress_compaction && AbortCompactionForTesting(object))
return false;
#endif // DEBUG
- Map map = object.map();
+ Map map = object.map(cage_base());
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation;
if (ShouldPromoteIntoSharedHeap(map)) {
@@ -1524,7 +1635,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
Heap* heap, EvacuationAllocator* local_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
- bool always_promote_young)
+ AlwaysPromoteYoung always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
@@ -1537,7 +1648,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object;
- if (always_promote_young_) {
+ if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
@@ -1621,7 +1732,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
intptr_t semispace_copied_size_;
Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_;
- bool always_promote_young_;
+ AlwaysPromoteYoung always_promote_young_;
};
template <PageEvacuationMode mode>
@@ -1656,7 +1767,9 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
- object.IterateBodyFast(record_visitor_);
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
+ PtrComprCageBase cage_base = GetPtrComprCageBase(object);
+ object.IterateBodyFast(cage_base, record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
@@ -1684,7 +1797,8 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
- DCHECK(object.map_word(kRelaxedLoad).IsForwardingAddress());
+ DCHECK(object.map_word(heap_->isolate(), kRelaxedLoad)
+ .IsForwardingAddress());
return true;
}
return false;
@@ -1698,7 +1812,9 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
- object.IterateBodyFast(&visitor);
+ DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
+ PtrComprCageBase cage_base = GetPtrComprCageBase(object);
+ object.IterateBodyFast(cage_base, &visitor);
return true;
}
@@ -1724,9 +1840,28 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
// Custom marking for top optimized frame.
ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
- isolate()->IterateClientIsolates(
- [this, custom_root_body_visitor](Isolate* client) {
- ProcessTopOptimizedFrame(custom_root_body_visitor, client);
+ if (isolate()->is_shared()) {
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [this, custom_root_body_visitor](Isolate* client) {
+ ProcessTopOptimizedFrame(custom_root_body_visitor, client);
+ });
+ }
+}
+
+void MarkCompactCollector::MarkObjectsFromClientHeaps() {
+ if (!isolate()->is_shared()) return;
+
+ SharedHeapObjectVisitor visitor(this);
+
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [&visitor](Isolate* client) {
+ Heap* heap = client->heap();
+ HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
+ PtrComprCageBase cage_base(client);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ obj.IterateFast(cage_base, &visitor);
+ }
});
}
@@ -1739,7 +1874,7 @@ void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
- marking_visitor_->Visit(obj.map(), obj);
+ marking_visitor_->Visit(obj.map(marking_visitor_->cage_base()), obj);
}
bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
@@ -1758,7 +1893,9 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
// Move ephemerons from next_ephemerons into current_ephemerons to
// drain them in this iteration.
- weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
+ DCHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
{
@@ -1774,8 +1911,10 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
FinishConcurrentMarking();
}
- CHECK(weak_objects_.current_ephemerons.IsEmpty());
- CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ CHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ CHECK(local_weak_objects()
+ ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
++iterations;
} while (another_ephemeron_iteration_main_thread ||
@@ -1785,8 +1924,9 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
- CHECK(weak_objects_.current_ephemerons.IsEmpty());
- CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ CHECK(local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ CHECK(local_weak_objects()
+ ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
return true;
}
@@ -1796,7 +1936,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
- while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
@@ -1815,15 +1955,15 @@ bool MarkCompactCollector::ProcessEphemerons() {
// Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
- while (weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
another_ephemeron_iteration = true;
}
}
// Flush local ephemerons for main task to global pool.
- weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
- weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
+ local_weak_objects()->ephemeron_hash_tables_local.Publish();
+ local_weak_objects()->next_ephemerons_local.Publish();
return another_ephemeron_iteration;
}
@@ -1835,10 +1975,10 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
Ephemeron ephemeron;
- DCHECK(weak_objects_.current_ephemerons.IsEmpty());
- weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
-
- while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ DCHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
+ while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
@@ -1865,8 +2005,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
kTrackNewlyDiscoveredObjects>(0);
}
- while (
- weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
@@ -1877,6 +2016,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
if (ephemeron_marking_.newly_discovered_overflowed) {
// If newly_discovered was overflowed just visit all ephemerons in
// next_ephemerons.
+ local_weak_objects()->next_ephemerons_local.Publish();
weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
@@ -1904,7 +2044,8 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
work_to_do = !local_marking_worklists()->IsEmpty() ||
!local_marking_worklists()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
- CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
+ CHECK(local_weak_objects()
+ ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
}
ResetNewlyDiscovered();
@@ -1915,8 +2056,8 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
// Flush local ephemerons for main task to global pool.
- weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
- weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
+ local_weak_objects()->ephemeron_hash_tables_local.Publish();
+ local_weak_objects()->next_ephemerons_local.Publish();
}
void MarkCompactCollector::PerformWrapperTracing() {
@@ -2007,9 +2148,8 @@ bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
}
} else if (marking_state()->IsWhite(value)) {
- weak_objects_.next_ephemerons.Push(kMainThreadTask, Ephemeron{key, value});
+ local_weak_objects()->next_ephemerons_local.Push(Ephemeron{key, value});
}
-
return false;
}
@@ -2018,7 +2158,7 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
// Incremental marking might leave ephemerons in main task's local
// buffer, flush it into global pool.
- weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
+ local_weak_objects()->next_ephemerons_local.Publish();
if (!ProcessEphemeronsUntilFixpoint()) {
// Fixpoint iteration needed too many iterations and was cancelled. Use the
@@ -2030,9 +2170,10 @@ void MarkCompactCollector::ProcessEphemeronMarking() {
if (FLAG_verify_heap) {
Ephemeron ephemeron;
- weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
-
- while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
+ DCHECK(
+ local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
+ weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
+ while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
}
}
@@ -2050,7 +2191,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
- Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
+ PtrComprCageBase cage_base(isolate);
+ Code::BodyDescriptor::IterateBody(code.map(cage_base), code, visitor);
}
return;
}
@@ -2116,6 +2258,11 @@ void MarkCompactCollector::MarkLiveObjects() {
}
{
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_CLIENT_HEAPS);
+ MarkObjectsFromClientHeaps();
+ }
+
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
@@ -2279,8 +2426,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
std::pair<HeapObject, Code> weak_object_in_code;
- while (weak_objects_.weak_objects_in_code.Pop(kMainThreadTask,
- &weak_object_in_code)) {
+ while (local_weak_objects()->weak_objects_in_code_local.Pop(
+ &weak_object_in_code)) {
HeapObject object = weak_object_in_code.first;
Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
@@ -2395,8 +2542,8 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
weak_objects_.code_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate;
- while (weak_objects_.code_flushing_candidates.Pop(kMainThreadTask,
- &flushing_candidate)) {
+ while (local_weak_objects()->code_flushing_candidates_local.Pop(
+ &flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
@@ -2446,8 +2593,8 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
void MarkCompactCollector::ClearFlushedJsFunctions() {
DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
JSFunction flushed_js_function;
- while (weak_objects_.flushed_js_functions.Pop(kMainThreadTask,
- &flushed_js_function)) {
+ while (local_weak_objects()->flushed_js_functions_local.Pop(
+ &flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
@@ -2460,8 +2607,8 @@ void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
DCHECK(FLAG_flush_baseline_code ||
weak_objects_.baseline_flushing_candidates.IsEmpty());
JSFunction flushed_js_function;
- while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,
- &flushed_js_function)) {
+ while (local_weak_objects()->baseline_flushing_candidates_local.Pop(
+ &flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
@@ -2477,7 +2624,7 @@ void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
void MarkCompactCollector::ClearFullMapTransitions() {
TransitionArray array;
- while (weak_objects_.transition_arrays.Pop(kMainThreadTask, &array)) {
+ while (local_weak_objects()->transition_arrays_local.Pop(&array)) {
int num_transitions = array.number_of_entries();
if (num_transitions > 0) {
Map map;
@@ -2655,8 +2802,7 @@ void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
EphemeronHashTable table;
-
- while (weak_objects_.ephemeron_hash_tables.Pop(kMainThreadTask, &table)) {
+ while (local_weak_objects()->ephemeron_hash_tables_local.Pop(&table)) {
for (InternalIndex i : table.IterateEntries()) {
HeapObject key = HeapObject::cast(table.KeyAt(i));
#ifdef VERIFY_HEAP
@@ -2689,7 +2835,7 @@ void MarkCompactCollector::ClearWeakReferences() {
std::pair<HeapObject, HeapObjectSlot> slot;
HeapObjectReference cleared_weak_ref =
HeapObjectReference::ClearedValue(isolate());
- while (weak_objects_.weak_references.Pop(kMainThreadTask, &slot)) {
+ while (local_weak_objects()->weak_references_local.Pop(&slot)) {
HeapObject value;
// The slot could have been overwritten, so we have to treat it
// as MaybeObjectSlot.
@@ -2712,7 +2858,7 @@ void MarkCompactCollector::ClearWeakReferences() {
void MarkCompactCollector::ClearJSWeakRefs() {
JSWeakRef weak_ref;
- while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) {
+ while (local_weak_objects()->js_weak_refs_local.Pop(&weak_ref)) {
HeapObject target = HeapObject::cast(weak_ref.target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
@@ -2723,7 +2869,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
}
}
WeakCell weak_cell;
- while (weak_objects_.weak_cells.Pop(kMainThreadTask, &weak_cell)) {
+ while (local_weak_objects()->weak_cells_local.Pop(&weak_cell)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
if (target.IsHeapObject()) {
@@ -2777,21 +2923,6 @@ void MarkCompactCollector::ClearJSWeakRefs() {
heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
}
-void MarkCompactCollector::AbortWeakObjects() {
- weak_objects_.transition_arrays.Clear();
- weak_objects_.ephemeron_hash_tables.Clear();
- weak_objects_.current_ephemerons.Clear();
- weak_objects_.next_ephemerons.Clear();
- weak_objects_.discovered_ephemerons.Clear();
- weak_objects_.weak_references.Clear();
- weak_objects_.weak_objects_in_code.Clear();
- weak_objects_.js_weak_refs.Clear();
- weak_objects_.weak_cells.Clear();
- weak_objects_.code_flushing_candidates.Clear();
- weak_objects_.baseline_flushing_candidates.Clear();
- weak_objects_.flushed_js_functions.Clear();
-}
-
bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
}
@@ -2834,6 +2965,12 @@ void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
if (info.should_record) {
+ // Access to TypeSlots need to be protected, since LocalHeaps might
+ // publish code in the background thread.
+ base::Optional<base::MutexGuard> opt_guard;
+ if (FLAG_concurrent_sparkplug) {
+ opt_guard.emplace(info.memory_chunk->mutex());
+ }
RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
info.offset);
}
@@ -2978,10 +3115,13 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
} // namespace
+static constexpr bool kClientHeap = true;
+
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitorWithCageBases,
- public RootVisitor {
+template <bool in_client_heap = false>
+class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
+ public RootVisitor {
public:
explicit PointersUpdatingVisitor(Heap* heap)
: ObjectVisitorWithCageBases(heap) {}
@@ -3035,14 +3175,34 @@ class PointersUpdatingVisitor : public ObjectVisitorWithCageBases,
}
}
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- // This visitor nevers visits code objects.
- UNREACHABLE();
+ void VisitMapPointer(HeapObject object) override {
+ if (in_client_heap) {
+ UpdateStrongSlotInternal(cage_base(), object.map_slot());
+ } else {
+ UNREACHABLE();
+ }
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- // This visitor nevers visits code objects.
- UNREACHABLE();
+ if (in_client_heap) {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ CHECK_WITH_MSG(!target.InSharedHeap(),
+ "refs into shared heap not yet supported here.");
+ } else {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
+ }
+ }
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
+ if (in_client_heap) {
+ HeapObject target = rinfo->target_object(cage_base());
+ CHECK_WITH_MSG(!target.InSharedHeap(),
+ "refs into shared heap not yet supported here.");
+ } else {
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
+ }
}
private:
@@ -3120,13 +3280,13 @@ void MarkCompactCollector::EvacuatePrologue() {
}
void MarkCompactCollector::EvacuateEpilogue() {
- aborted_evacuation_candidates_.clear();
+ aborted_evacuation_candidates_due_to_oom_.clear();
+ aborted_evacuation_candidates_due_to_flags_.clear();
// New space.
if (heap()->new_space()) {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
- DCHECK_IMPLIES(FLAG_always_promote_young_mc,
- heap()->new_space()->Size() == 0);
+ DCHECK_EQ(0, heap()->new_space()->Size());
}
// Deallocate unmarked large objects.
@@ -3195,7 +3355,8 @@ class Evacuator : public Malloced {
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
- EvacuationAllocator* local_allocator, bool always_promote_young)
+ EvacuationAllocator* local_allocator,
+ AlwaysPromoteYoung always_promote_young)
: heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(heap_, local_allocator, record_visitor,
@@ -3307,7 +3468,7 @@ class FullEvacuator : public Evacuator {
public:
explicit FullEvacuator(MarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
- FLAG_always_promote_young_mc),
+ AlwaysPromoteYoung::kYes),
record_visitor_(collector, &ephemeron_remembered_set_),
local_allocator_(heap_,
CompactionSpaceKind::kCompactionSpaceForMarkCompact),
@@ -3386,8 +3547,8 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
} else {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object.address(),
- chunk);
+ collector_->ReportAbortedEvacuationCandidateDueToOOM(
+ failed_object.address(), static_cast<Page*>(chunk));
}
}
break;
@@ -3486,13 +3647,14 @@ size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
return wanted_num_tasks;
}
-bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
- bool always_promote_young) {
+bool MarkCompactCollectorBase::ShouldMovePage(
+ Page* p, intptr_t live_bytes, AlwaysPromoteYoung always_promote_young) {
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
(live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
- (always_promote_young || !p->Contains(age_mark)) &&
+ (always_promote_young == AlwaysPromoteYoung::kYes ||
+ !p->Contains(age_mark)) &&
heap()->CanExpandOldGeneration(live_bytes);
}
@@ -3526,24 +3688,33 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page,
- FLAG_always_promote_young_mc)) {
- if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) ||
- FLAG_always_promote_young_mc) {
- EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
- DCHECK_EQ(heap()->old_space(), page->owner());
- // The move added page->allocated_bytes to the old space, but we are
- // going to sweep the page and add page->live_byte_count.
- heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
- page);
- } else {
- EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
- }
+ if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes)) {
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
+ DCHECK_EQ(heap()->old_space(), page->owner());
+ // The move added page->allocated_bytes to the old space, but we are
+ // going to sweep the page and add page->live_byte_count.
+ heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
+ page);
}
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
+ if (!heap()->IsGCWithoutStack()) {
+ if (!FLAG_compact_with_stack || !FLAG_compact_code_space_with_stack) {
+ for (Page* page : old_space_evacuation_pages_) {
+ if (!FLAG_compact_with_stack || page->owner_identity() == CODE_SPACE) {
+ ReportAbortedEvacuationCandidateDueToFlags(page->area_start(), page);
+ // Set this flag early on in this case to allow filtering such pages
+ // below.
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ }
+ }
+ }
+ }
+
for (Page* page : old_space_evacuation_pages_) {
+ if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) continue;
+
live_bytes += non_atomic_marking_state()->live_bytes(page);
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
@@ -3567,16 +3738,16 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
}
- if (evacuation_items.empty()) return;
-
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "MarkCompactCollector::EvacuatePagesInParallel", "pages",
- evacuation_items.size());
-
const size_t pages_count = evacuation_items.size();
- const size_t wanted_num_tasks =
- CreateAndExecuteEvacuationTasks<FullEvacuator>(
- this, std::move(evacuation_items), nullptr);
+ size_t wanted_num_tasks = 0;
+ if (!evacuation_items.empty()) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "MarkCompactCollector::EvacuatePagesInParallel", "pages",
+ evacuation_items.size());
+
+ wanted_num_tasks = CreateAndExecuteEvacuationTasks<FullEvacuator>(
+ this, std::move(evacuation_items), nullptr);
+ }
// After evacuation there might still be swept pages that weren't
// added to one of the compaction space but still reside in the
@@ -3866,10 +4037,10 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor visitor(heap_);
+ PointersUpdatingVisitor<> visitor(heap_);
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
- Map map = object.map();
+ Map map = object.map(visitor.cage_base());
int size = object.SizeFromMap(map);
object.IterateBodyFast(map, size, &visitor);
cur += size;
@@ -3881,10 +4052,10 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor visitor(heap_);
+ PointersUpdatingVisitor<> visitor(heap_);
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
- object_and_size.first.IterateBodyFast(&visitor);
+ object_and_size.first.IterateBodyFast(visitor.cage_base(), &visitor);
}
}
@@ -3984,9 +4155,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
- FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
if (slots == 0) {
chunk_->ReleaseSlotSet<OLD_TO_NEW>();
@@ -4010,9 +4179,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::FREE_EMPTY_BUCKETS);
- DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR &&
- FLAG_always_promote_young_mc,
- slots == 0);
+ DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
if (slots == 0) {
chunk_->ReleaseSweepingSlotSet();
@@ -4049,7 +4216,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
(chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>() !=
nullptr)) {
PtrComprCageBase cage_base = heap_->isolate();
-#if V8_EXTERNAL_CODE_SPACE
+#ifdef V8_EXTERNAL_CODE_SPACE
PtrComprCageBase code_cage_base(heap_->isolate()->code_cage_base());
#else
PtrComprCageBase code_cage_base = cage_base;
@@ -4110,12 +4277,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSetUpdatingMode updating_mode_;
};
-std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) {
- return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
- heap(), chunk, start, end, non_atomic_marking_state());
-}
-
std::unique_ptr<UpdatingItem>
MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
@@ -4124,24 +4285,6 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem(
heap(), non_atomic_marking_state(), chunk, updating_mode);
}
-int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
- std::vector<std::unique_ptr<UpdatingItem>>* items) {
- if (!heap()->new_space()) return 0;
-
- // Seed to space pages.
- const Address space_start = heap()->new_space()->first_allocatable_address();
- const Address space_end = heap()->new_space()->top();
- int pages = 0;
- for (Page* page : PageRange(space_start, space_end)) {
- Address start =
- page->Contains(space_start) ? space_start : page->area_start();
- Address end = page->Contains(space_end) ? space_end : page->area_end();
- items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
- pages++;
- }
- return pages;
-}
-
template <typename IterateableSpace>
int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
@@ -4233,12 +4376,11 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
-
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
// The external string table is updated at the end.
+ PointersUpdatingVisitor<> updating_visitor(heap());
heap_->IterateRootsIncludingClients(
&updating_visitor,
base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
@@ -4246,6 +4388,12 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_CLIENT_HEAPS);
+ UpdatePointersInClientHeaps();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
@@ -4260,7 +4408,11 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
RememberedSetUpdatingMode::ALL);
- CollectToSpaceUpdatingItems(&updating_items);
+ // Iterating to space may require a valid body descriptor for e.g.
+ // WasmStruct which races with updating a slot in Map. Since to space is
+ // empty after a full GC, such races can't happen.
+ DCHECK_IMPLIES(heap()->new_space(), heap()->new_space()->Size() == 0);
+
updating_items.push_back(
std::make_unique<EphemeronTableUpdatingItem>(heap()));
@@ -4285,52 +4437,88 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
-void MarkCompactCollector::ReportAbortedEvacuationCandidate(
- Address failed_start, MemoryChunk* chunk) {
+void MarkCompactCollector::UpdatePointersInClientHeaps() {
+ if (!isolate()->is_shared()) return;
+
+ PointersUpdatingVisitor<kClientHeap> visitor(heap());
+
+ isolate()->global_safepoint()->IterateClientIsolates(
+ [&visitor](Isolate* client) {
+ Heap* heap = client->heap();
+ HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
+ PtrComprCageBase cage_base(client);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ obj.IterateFast(cage_base, &visitor);
+ }
+ });
+}
+
+void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
+ Address failed_start, Page* page) {
+ base::MutexGuard guard(&mutex_);
+ aborted_evacuation_candidates_due_to_oom_.push_back(
+ std::make_pair(failed_start, page));
+}
+
+void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
+ Address failed_start, Page* page) {
base::MutexGuard guard(&mutex_);
+ aborted_evacuation_candidates_due_to_flags_.push_back(
+ std::make_pair(failed_start, page));
+}
- aborted_evacuation_candidates_.push_back(
- std::make_pair(failed_start, static_cast<Page*>(chunk)));
+namespace {
+
+void ReRecordPage(
+ Heap* heap,
+ v8::internal::MarkCompactCollector::NonAtomicMarkingState* marking_state,
+ Address failed_start, Page* page) {
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ // Aborted compaction page. We have to record slots here, since we
+ // might not have recorded them in first place.
+
+ // Remove outdated slots.
+ RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
+ failed_start);
+
+ // Remove invalidated slots.
+ if (failed_start > page->area_start()) {
+ InvalidatedSlotsCleanup old_to_new_cleanup =
+ InvalidatedSlotsCleanup::OldToNew(page);
+ old_to_new_cleanup.Free(page->area_start(), failed_start);
+ }
+
+ // Recompute live bytes.
+ LiveObjectVisitor::RecomputeLiveBytes(page, marking_state);
+ // Re-record slots.
+ EvacuateRecordOnlyVisitor record_visitor(heap);
+ LiveObjectVisitor::VisitBlackObjectsNoFail(
+ page, marking_state, &record_visitor, LiveObjectVisitor::kKeepMarking);
+ // Array buffers will be processed during pointer updating.
}
+} // namespace
+
size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
- aborted_evacuation_candidates_.empty());
-
- for (auto start_and_page : aborted_evacuation_candidates_) {
- Address failed_start = start_and_page.first;
- Page* page = start_and_page.second;
- page->SetFlag(Page::COMPACTION_WAS_ABORTED);
- // Aborted compaction page. We have to record slots here, since we
- // might not have recorded them in first place.
-
- // Remove outdated slots.
- RememberedSetSweeping::RemoveRange(page, page->address(), failed_start,
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
- SlotSet::FREE_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
- failed_start);
-
- // Remove invalidated slots.
- if (failed_start > page->area_start()) {
- InvalidatedSlotsCleanup old_to_new_cleanup =
- InvalidatedSlotsCleanup::OldToNew(page);
- old_to_new_cleanup.Free(page->area_start(), failed_start);
- }
-
- // Recompute live bytes.
- LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
- // Re-record slots.
- EvacuateRecordOnlyVisitor record_visitor(heap());
- LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
- &record_visitor,
- LiveObjectVisitor::kKeepMarking);
- // Array buffers will be processed during pointer updating.
- }
- const int aborted_pages =
- static_cast<int>(aborted_evacuation_candidates_.size());
- int aborted_pages_verified = 0;
+ aborted_evacuation_candidates_due_to_oom_.empty());
+ for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
+ ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
+ start_and_page.second);
+ }
+ for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
+ ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
+ start_and_page.second);
+ }
+ const size_t aborted_pages =
+ aborted_evacuation_candidates_due_to_oom_.size() +
+ aborted_evacuation_candidates_due_to_flags_.size();
+ size_t aborted_pages_verified = 0;
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
// After clearing the evacuation candidate flag the page is again in a
@@ -4547,8 +4735,12 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
void VerifyCodePointer(CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Code code = Code::unchecked_cast(slot.load(code_cage_base()));
- VerifyHeapObjectImpl(code);
+ Object maybe_code = slot.load(code_cage_base());
+ HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyHeapObjectImpl(code);
+ }
}
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -4578,9 +4770,9 @@ class YoungGenerationMarkingVisitor final
public:
YoungGenerationMarkingVisitor(
Isolate* isolate, MinorMarkCompactCollector::MarkingState* marking_state,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
+ MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local)
: NewSpaceVisitor(isolate),
- worklist_(global_worklist, task_id),
+ worklist_local_(worklist_local),
marking_state_(marking_state) {}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
@@ -4649,11 +4841,11 @@ class YoungGenerationMarkingVisitor final
inline void MarkObjectViaMarkingWorklist(HeapObject object) {
if (marking_state_->WhiteToGrey(object)) {
// Marking deque overflow is unsupported for the young generation.
- CHECK(worklist_.Push(object));
+ worklist_local_->Push(object);
}
}
- MinorMarkCompactCollector::MarkingWorklist::View worklist_;
+ MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local_;
MinorMarkCompactCollector::MarkingState* marking_state_;
};
@@ -4661,16 +4853,18 @@ void MinorMarkCompactCollector::SetUp() {}
void MinorMarkCompactCollector::TearDown() {}
+// static
+constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
+
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
+ main_thread_worklist_local_(worklist_),
+ marking_state_(heap->isolate()),
+ non_atomic_marking_state_(heap->isolate()),
main_marking_visitor_(new YoungGenerationMarkingVisitor(
- heap->isolate(), marking_state(), worklist_, kMainMarker)),
- page_parallel_job_semaphore_(0) {
- static_assert(
- kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
- "more marker tasks than marking deque can handle");
-}
+ heap->isolate(), marking_state(), &main_thread_worklist_local_)),
+ page_parallel_job_semaphore_(0) {}
MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete worklist_;
@@ -4768,7 +4962,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor<> updating_visitor(heap());
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
// Create batches of global handles.
@@ -4925,7 +5119,8 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map map = object.map(kAcquireLoad);
+ PtrComprCageBase cage_base(p->heap()->isolate());
+ Map map = object.map(cage_base, kAcquireLoad);
int size = object.SizeFromMap(map);
free_start = free_end + size;
}
@@ -5054,6 +5249,22 @@ void MinorMarkCompactCollector::EvacuateEpilogue() {
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
+int MinorMarkCompactCollector::CollectToSpaceUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items) {
+ // Seed to space pages.
+ const Address space_start = heap()->new_space()->first_allocatable_address();
+ const Address space_end = heap()->new_space()->top();
+ int pages = 0;
+ for (Page* page : PageRange(space_start, space_end)) {
+ Address start =
+ page->Contains(space_start) ? space_start : page->area_start();
+ Address end = page->Contains(space_end) ? space_end : page->area_end();
+ items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
+ pages++;
+ }
+ return pages;
+}
+
std::unique_ptr<UpdatingItem>
MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start,
@@ -5078,10 +5289,10 @@ class YoungGenerationMarkingTask {
public:
YoungGenerationMarkingTask(
Isolate* isolate, MinorMarkCompactCollector* collector,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : marking_worklist_(global_worklist, task_id),
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist)
+ : marking_worklist_local_(global_worklist),
marking_state_(collector->marking_state()),
- visitor_(isolate, marking_state_, global_worklist, task_id) {
+ visitor_(isolate, marking_state_, &marking_worklist_local_) {
local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
Page::kPageSize);
}
@@ -5097,7 +5308,7 @@ class YoungGenerationMarkingTask {
void EmptyMarkingWorklist() {
HeapObject object;
- while (marking_worklist_.Pop(&object)) {
+ while (marking_worklist_local_.Pop(&object)) {
const int size = visitor_.Visit(object);
IncrementLiveBytes(object, size);
}
@@ -5114,7 +5325,7 @@ class YoungGenerationMarkingTask {
}
private:
- MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
+ MinorMarkCompactCollector::MarkingWorklist::Local marking_worklist_local_;
MinorMarkCompactCollector::MarkingState* marking_state_;
YoungGenerationMarkingVisitor visitor_;
std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
@@ -5221,13 +5432,13 @@ class YoungGenerationMarkingJob : public v8::JobTask {
// the amount of marking that is required.
const int kPagesPerTask = 2;
size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
- size_t num_tasks = std::max((items + 1) / kPagesPerTask,
- global_worklist_->GlobalPoolSize());
+ size_t num_tasks =
+ std::max((items + 1) / kPagesPerTask, global_worklist_->Size());
if (!FLAG_parallel_marking) {
num_tasks = std::min<size_t>(1, num_tasks);
}
- return std::min<size_t>(
- num_tasks, MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks);
+ return std::min<size_t>(num_tasks,
+ MinorMarkCompactCollector::kMaxParallelTasks);
}
private:
@@ -5235,8 +5446,7 @@ class YoungGenerationMarkingJob : public v8::JobTask {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
- YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_,
- delegate->GetTaskId());
+ YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_);
ProcessMarkingItems(&task);
task.EmptyMarkingWorklist();
task.FlushLiveBytes();
@@ -5303,7 +5513,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
// The main thread might hold local items, while GlobalPoolSize() == 0.
// Flush to ensure these items are visible globally and picked up by the
// job.
- worklist()->FlushToGlobal(kMainThreadTask);
+ main_thread_worklist_local_.Publish();
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
V8::GetCurrentPlatform()
->PostJob(v8::TaskPriority::kUserBlocking,
@@ -5312,6 +5522,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
->Join();
DCHECK(worklist()->IsEmpty());
+ DCHECK(main_thread_worklist_local_.IsLocalEmpty());
}
}
}
@@ -5348,17 +5559,16 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
}
void MinorMarkCompactCollector::DrainMarkingWorklist() {
- MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
PtrComprCageBase cage_base(isolate());
HeapObject object;
- while (marking_worklist.Pop(&object)) {
+ while (main_thread_worklist_local_.Pop(&object)) {
DCHECK(!object.IsFreeSpaceOrFiller(cage_base));
DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(non_atomic_marking_state()->IsGrey(object));
main_marking_visitor()->Visit(object);
}
- DCHECK(marking_worklist.IsLocalEmpty());
+ DCHECK(main_thread_worklist_local_.IsLocalEmpty());
}
void MinorMarkCompactCollector::TraceFragmentation() {
@@ -5462,7 +5672,7 @@ class YoungGenerationEvacuator : public Evacuator {
public:
explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
: Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
- false),
+ AlwaysPromoteYoung::kNo),
record_visitor_(collector->heap()->mark_compact_collector()),
local_allocator_(
heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
@@ -5550,7 +5760,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page, false)) {
+ if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 5a7a450e38..ecfb5adc64 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -8,6 +8,8 @@
#include <atomic>
#include <vector>
+#include "include/v8-internal.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
@@ -183,8 +185,9 @@ class LiveObjectVisitor : AllStatic {
static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
};
+enum class AlwaysPromoteYoung { kYes, kNo };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
-enum MarkingTreatmentMode { KEEP, CLEAR };
+enum class MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
@@ -214,8 +217,6 @@ class MarkCompactCollectorBase {
virtual void Evacuate() = 0;
virtual void EvacuatePagesInParallel() = 0;
virtual void UpdatePointersAfterEvacuation() = 0;
- virtual std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) = 0;
virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
@@ -227,10 +228,9 @@ class MarkCompactCollectorBase {
MigrationObserver* migration_observer);
// Returns whether this page should be moved according to heuristics.
- bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
+ bool ShouldMovePage(Page* p, intptr_t live_bytes,
+ AlwaysPromoteYoung promote_young);
- int CollectToSpaceUpdatingItems(
- std::vector<std::unique_ptr<UpdatingItem>>* items);
template <typename IterateableSpace>
int CollectRememberedSetUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items,
@@ -244,6 +244,9 @@ class MarkCompactCollectorBase {
class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public:
+ explicit MinorMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
@@ -267,6 +270,9 @@ class MinorNonAtomicMarkingState final
: public MarkingStateBase<MinorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
+ explicit MinorNonAtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return MemoryChunk::cast(chunk)
@@ -293,6 +299,9 @@ class MinorNonAtomicMarkingState final
class MajorMarkingState final
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
public:
+ explicit MajorMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
@@ -318,6 +327,9 @@ class MajorMarkingState final
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
+ explicit MajorAtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::ATOMIC>();
@@ -332,6 +344,9 @@ class MajorNonAtomicMarkingState final
: public MarkingStateBase<MajorNonAtomicMarkingState,
AccessMode::NON_ATOMIC> {
public:
+ explicit MajorNonAtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const BasicMemoryChunk* chunk) const {
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
@@ -375,13 +390,13 @@ class MainMarkingVisitor final
MainMarkingVisitor(MarkingState* marking_state,
MarkingWorklists::Local* local_marking_worklists,
- WeakObjects* weak_objects, Heap* heap,
+ WeakObjects::Local* local_weak_objects, Heap* heap,
unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool embedder_tracing_enabled,
bool should_keep_ages_unchanged)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
- kMainThreadTask, local_marking_worklists, weak_objects, heap,
+ local_marking_worklists, local_weak_objects, heap,
mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
should_keep_ages_unchanged),
marking_state_(marking_state),
@@ -429,11 +444,7 @@ class MainMarkingVisitor final
// Collector for young and old generation.
class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
-#ifdef V8_ATOMIC_MARKING_STATE
using MarkingState = MajorMarkingState;
-#else
- using MarkingState = MajorNonAtomicMarkingState;
-#endif // V8_ATOMIC_MARKING_STATE
using AtomicMarkingState = MajorAtomicMarkingState;
using NonAtomicMarkingState = MajorNonAtomicMarkingState;
@@ -441,6 +452,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
class RootMarkingVisitor;
class CustomRootBodyMarkingVisitor;
+ class SharedHeapObjectVisitor;
enum IterationMode {
kKeepMarking,
@@ -452,6 +464,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
kTrackNewlyDiscoveredObjects
};
+ enum class StartCompactionMode {
+ kIncremental,
+ kAtomic,
+ };
+
MarkingState* marking_state() { return &marking_state_; }
NonAtomicMarkingState* non_atomic_marking_state() {
@@ -475,7 +492,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// it to complete as requested by |stop_request|).
void FinishConcurrentMarking();
- bool StartCompaction();
+ // Returns whether compaction is running.
+ bool StartCompaction(StartCompactionMode mode);
void AbortCompaction();
@@ -531,6 +549,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
WeakObjects* weak_objects() { return &weak_objects_; }
+ WeakObjects::Local* local_weak_objects() { return local_weak_objects_.get(); }
+
inline void AddTransitionArray(TransitionArray array);
void AddNewlyDiscovered(HeapObject object) {
@@ -616,6 +636,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor);
+ // Mark all objects that are directly referenced from one of the clients
+ // heaps.
+ void MarkObjectsFromClientHeaps();
+
+ // Updates pointers to shared objects from client heaps.
+ void UpdatePointersInClientHeaps();
+
// Marks object reachable from harmony weak maps and wrapper tracing.
void ProcessEphemeronMarking();
@@ -698,8 +725,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// those with dead values.
void ClearJSWeakRefs();
- void AbortWeakObjects();
-
// Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks.
void StartSweepSpaces();
@@ -711,17 +736,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void EvacuatePagesInParallel() override;
void UpdatePointersAfterEvacuation() override;
- std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
- Address start,
- Address end) override;
std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
void ReleaseEvacuationCandidates();
// Returns number of aborted pages.
size_t PostProcessEvacuationCandidates();
- void ReportAbortedEvacuationCandidate(Address failed_start,
- MemoryChunk* chunk);
+ void ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,
+ Page* page);
+ void ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,
+ Page* page);
static const int kEphemeronChunkSize = 8 * KB;
@@ -730,7 +754,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
base::Mutex mutex_;
- base::Semaphore page_parallel_job_semaphore_;
+ base::Semaphore page_parallel_job_semaphore_{0};
#ifdef DEBUG
enum CollectorState{IDLE,
@@ -747,17 +771,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
const bool is_shared_heap_;
- bool was_marked_incrementally_;
-
- bool evacuation_;
-
+ bool was_marked_incrementally_ = false;
+ bool evacuation_ = false;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
- bool compacting_;
-
- bool black_allocation_;
-
- bool have_code_to_deoptimize_;
+ bool compacting_ = false;
+ bool black_allocation_ = false;
+ bool have_code_to_deoptimize_ = false;
MarkingWorklists marking_worklists_;
@@ -766,6 +786,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
std::unique_ptr<MarkingVisitor> marking_visitor_;
std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
+ std::unique_ptr<WeakObjects::Local> local_weak_objects_;
NativeContextInferrer native_context_inferrer_;
NativeContextStats native_context_stats_;
@@ -774,13 +795,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Pages that are actually processed during evacuation.
std::vector<Page*> old_space_evacuation_pages_;
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<std::pair<Address, Page*>> aborted_evacuation_candidates_;
-
- Sweeper* sweeper_;
+ std::vector<std::pair<Address, Page*>>
+ aborted_evacuation_candidates_due_to_oom_;
+ std::vector<std::pair<Address, Page*>>
+ aborted_evacuation_candidates_due_to_flags_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
+ Sweeper* sweeper_;
+
// Counts the number of major mark-compact collections. The counter is
// incremented right after marking. This is used for:
// - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
@@ -819,6 +843,8 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
using MarkingState = MinorMarkingState;
using NonAtomicMarkingState = MinorNonAtomicMarkingState;
+ static constexpr size_t kMaxParallelTasks = 8;
+
explicit MinorMarkCompactCollector(Heap* heap);
~MinorMarkCompactCollector() override;
@@ -837,7 +863,8 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void CleanupSweepToIteratePages();
private:
- using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
+ using MarkingWorklist =
+ ::heap::base::Worklist<HeapObject, 64 /* segment size */>;
class RootMarkingVisitor;
static const int kNumMarkers = 8;
@@ -864,22 +891,26 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
Address start,
- Address end) override;
+ Address end);
std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
+ int CollectToSpaceUpdatingItems(
+ std::vector<std::unique_ptr<UpdatingItem>>* items);
+
void SweepArrayBufferExtensions();
MarkingWorklist* worklist_;
+ MarkingWorklist::Local main_thread_worklist_local_;
+
+ MarkingState marking_state_;
+ NonAtomicMarkingState non_atomic_marking_state_;
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> sweep_to_iterate_pages_;
- MarkingState marking_state_;
- NonAtomicMarkingState non_atomic_marking_state_;
-
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
friend class YoungGenerationMarkingVisitor;
diff --git a/deps/v8/src/heap/marking-barrier-inl.h b/deps/v8/src/heap/marking-barrier-inl.h
index 03e89a68e4..656abe5883 100644
--- a/deps/v8/src/heap/marking-barrier-inl.h
+++ b/deps/v8/src/heap/marking-barrier-inl.h
@@ -28,6 +28,8 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
// visits the host object.
return false;
}
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value);
+ if (is_shared_heap_ != target_page->InSharedHeap()) return false;
if (WhiteToGreyAndPush(value)) {
if (is_main_thread_barrier_) {
incremental_marking_->RestartIfNotMarking();
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 1b9931d2d5..6a7571af79 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -27,14 +27,18 @@ MarkingBarrier::MarkingBarrier(Heap* heap)
collector_(heap_->mark_compact_collector()),
incremental_marking_(heap_->incremental_marking()),
worklist_(collector_->marking_worklists()->shared()),
- is_main_thread_barrier_(true) {}
+ marking_state_(heap_->isolate()),
+ is_main_thread_barrier_(true),
+ is_shared_heap_(heap_->IsShared()) {}
MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
: heap_(local_heap->heap()),
collector_(heap_->mark_compact_collector()),
incremental_marking_(nullptr),
worklist_(collector_->marking_worklists()->shared()),
- is_main_thread_barrier_(false) {}
+ marking_state_(heap_->isolate()),
+ is_main_thread_barrier_(false),
+ is_shared_heap_(heap_->IsShared()) {}
MarkingBarrier::~MarkingBarrier() { DCHECK(worklist_.IsLocalEmpty()); }
@@ -156,6 +160,12 @@ void MarkingBarrier::Publish() {
worklist_.Publish();
for (auto& it : typed_slots_map_) {
MemoryChunk* memory_chunk = it.first;
+ // Access to TypeSlots need to be protected, since LocalHeaps might
+ // publish code in the background thread.
+ base::Optional<base::MutexGuard> opt_guard;
+ if (FLAG_concurrent_sparkplug) {
+ opt_guard.emplace(memory_chunk->mutex());
+ }
std::unique_ptr<TypedSlots>& typed_slots = it.second;
RememberedSet<OLD_TO_OLD>::MergeTyped(memory_chunk,
std::move(typed_slots));
diff --git a/deps/v8/src/heap/marking-barrier.h b/deps/v8/src/heap/marking-barrier.h
index deb49a46d0..d7cc79315f 100644
--- a/deps/v8/src/heap/marking-barrier.h
+++ b/deps/v8/src/heap/marking-barrier.h
@@ -72,6 +72,7 @@ class MarkingBarrier {
bool is_compacting_ = false;
bool is_activated_ = false;
bool is_main_thread_barrier_;
+ bool is_shared_heap_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index ab84a32b1a..8f65a61dab 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -63,7 +63,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessWeakHeapObject(
// If we do not know about liveness of the value, we have to process
// the reference when we know the liveness of the whole transitive
// closure.
- weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
+ local_weak_objects_->weak_references_local.Push(std::make_pair(host, slot));
}
}
@@ -114,8 +114,8 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
rinfo->target_object(ObjectVisitorWithCageBases::cage_base());
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
- weak_objects_->weak_objects_in_code.Push(task_id_,
- std::make_pair(object, host));
+ local_weak_objects_->weak_objects_in_code_local.Push(
+ std::make_pair(object, host));
} else {
MarkObject(host, object);
}
@@ -155,7 +155,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
if (js_function.ShouldFlushBaselineCode(code_flush_mode_)) {
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
- weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
+ local_weak_objects_->baseline_flushing_candidates_local.Push(js_function);
} else {
VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
@@ -163,7 +163,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
// baseline code and remove this check here.
if (IsByteCodeFlushingEnabled(code_flush_mode_) &&
js_function.NeedsResetDueToFlushedBytecode()) {
- weak_objects_->flushed_js_functions.Push(task_id_, js_function);
+ local_weak_objects_->flushed_js_functions_local.Push(js_function);
}
}
return size;
@@ -194,11 +194,11 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
VisitPointer(baseline_code,
baseline_code.RawField(
Code::kDeoptimizationDataOrInterpreterDataOffset));
- weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
+ local_weak_objects_->code_flushing_candidates_local.Push(shared_info);
} else {
// In other cases, record as a flushing candidate since we have old
// bytecode.
- weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
+ local_weak_objects_->code_flushing_candidates_local.Push(shared_info);
}
return size;
}
@@ -306,7 +306,7 @@ template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
Map map, EphemeronHashTable table) {
if (!concrete_visitor()->ShouldVisit(table)) return 0;
- weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
+ local_weak_objects_->ephemeron_hash_tables_local.Push(table);
for (InternalIndex i : table.IterateEntries()) {
ObjectSlot key_slot =
@@ -332,8 +332,8 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
if (concrete_visitor()->marking_state()->IsWhite(value)) {
- weak_objects_->discovered_ephemerons.Push(task_id_,
- Ephemeron{key, value});
+ local_weak_objects_->discovered_ephemerons_local.Push(
+ Ephemeron{key, value});
}
}
}
@@ -357,7 +357,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSWeakRef(
} else {
// JSWeakRef points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
- weak_objects_->js_weak_refs.Push(task_id_, weak_ref);
+ local_weak_objects_->js_weak_refs_local.Push(weak_ref);
}
}
return size;
@@ -387,7 +387,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
// WeakCell points to a potentially dead object or a dead unregister
// token. We have to process them when we know the liveness of the whole
// transitive closure.
- weak_objects_->weak_cells.Push(task_id_, weak_cell);
+ local_weak_objects_->weak_cells_local.Push(weak_cell);
}
return size;
}
@@ -505,7 +505,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitTransitionArray(
this->VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
- weak_objects_->transition_arrays.Push(task_id_, array);
+ local_weak_objects_->transition_arrays_local.Push(array);
return size;
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 8be5ab065b..26ebf5713f 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -12,7 +12,6 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/weak-object-worklists.h"
-#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
@@ -26,6 +25,23 @@ struct EphemeronMarking {
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
+ explicit MarkingStateBase(PtrComprCageBase cage_base)
+#if V8_COMPRESS_POINTERS
+ : cage_base_(cage_base)
+#endif
+ {
+ }
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ V8_INLINE PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
}
@@ -73,14 +89,23 @@ class MarkingStateBase {
MarkBit markbit = MarkBitFrom(chunk, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
static_cast<ConcreteState*>(this)->IncrementLiveBytes(
- MemoryChunk::cast(chunk), obj.Size());
+ MemoryChunk::cast(chunk), obj.Size(cage_base()));
return true;
}
+ V8_INLINE bool GreyToBlackUnaccounted(HeapObject obj) {
+ return Marking::GreyToBlack<access_mode>(MarkBitFrom(obj));
+ }
+
void ClearLiveness(MemoryChunk* chunk) {
static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
+
+ private:
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
};
// The base class for all marking visitors. It implements marking logic with
@@ -101,18 +126,17 @@ class MarkingStateBase {
template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
- MarkingVisitorBase(int task_id,
- MarkingWorklists::Local* local_marking_worklists,
- WeakObjects* weak_objects, Heap* heap,
- unsigned mark_compact_epoch,
+ MarkingVisitorBase(MarkingWorklists::Local* local_marking_worklists,
+ WeakObjects::Local* local_weak_objects,
+ // WeakObjects* weak_objects,
+ Heap* heap, unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool is_embedder_tracing_enabled,
bool should_keep_ages_unchanged)
: HeapVisitor<int, ConcreteVisitor>(heap),
local_marking_worklists_(local_marking_worklists),
- weak_objects_(weak_objects),
+ local_weak_objects_(local_weak_objects),
heap_(heap),
- task_id_(task_id),
mark_compact_epoch_(mark_compact_epoch),
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
@@ -205,9 +229,8 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
MarkingWorklists::Local* const local_marking_worklists_;
- WeakObjects* const weak_objects_;
+ WeakObjects::Local* const local_weak_objects_;
Heap* const heap_;
- const int task_id_;
const unsigned mark_compact_epoch_;
const base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_embedder_tracing_enabled_;
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index c2cff9fc66..d9552149c2 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -409,7 +409,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
MemoryChunk* chunk =
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
+#ifdef DEBUG
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
+#endif // DEBUG
return chunk;
}
@@ -458,7 +460,11 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
if (executable == EXECUTABLE) {
DCHECK_GE(size_executable_, size);
size_executable_ -= size;
+#ifdef DEBUG
UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
+#endif // DEBUG
+ chunk->heap()->UnregisterUnprotectedMemoryChunk(
+ static_cast<MemoryChunk*>(chunk));
}
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
@@ -578,10 +584,8 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
ReadOnlySpace* owner) {
- BasicMemoryChunk* chunk = nullptr;
- if (chunk == nullptr) {
- chunk = AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
- }
+ BasicMemoryChunk* chunk =
+ AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
}
@@ -679,7 +683,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
- PageAllocator::kReadWrite)) {
+ MemoryChunk::GetCodeModificationPermission())) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index d405aefa53..49b5a769cf 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -18,7 +18,6 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/heap/code-range.h"
-#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
#include "src/tasks/cancelable-task.h"
@@ -226,11 +225,14 @@ class MemoryAllocator {
void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
size_t bytes_to_free, Address new_area_end);
+#ifdef DEBUG
// Checks if an allocated MemoryChunk was intended to be used for executable
// memory.
bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
+ base::MutexGuard guard(&executable_memory_mutex_);
return executable_memory_.find(chunk) != executable_memory_.end();
}
+#endif // DEBUG
// Commit memory region owned by given reservation object. Returns true if
// it succeeded and false otherwise.
@@ -311,6 +313,7 @@ class MemoryAllocator {
}
}
+#ifdef DEBUG
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
base::MutexGuard guard(&executable_memory_mutex_);
DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
@@ -322,8 +325,8 @@ class MemoryAllocator {
base::MutexGuard guard(&executable_memory_mutex_);
DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
executable_memory_.erase(chunk);
- chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
}
+#endif // DEBUG
Isolate* isolate_;
@@ -359,9 +362,12 @@ class MemoryAllocator {
VirtualMemory last_chunk_;
Unmapper unmapper_;
+#ifdef DEBUG
// Data structure to remember allocated executable memory chunks.
+ // This data structure is used only in DCHECKs.
std::unordered_set<MemoryChunk*> executable_memory_;
base::Mutex executable_memory_mutex_;
+#endif // DEBUG
friend class heap::TestCodePageAllocatorScope;
friend class heap::TestMemoryAllocatorScope;
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 959501724f..d4d1116683 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -93,10 +93,9 @@ void MemoryChunk::SetCodeModificationPermissions() {
// We may use RWX pages to write code. Some CPUs have optimisations to push
// updates to code to the icache through a fast path, and they may filter
// updates based on the written memory being executable.
- CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
- FLAG_write_code_using_rwx
- ? PageAllocator::kReadWriteExecute
- : PageAllocator::kReadWrite));
+ CHECK(reservation_.SetPermissions(
+ unprotect_start, unprotect_size,
+ MemoryChunk::GetCodeModificationPermission()));
}
}
@@ -390,7 +389,7 @@ void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
}
- if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
+ if (slot_set_[OLD_TO_NEW] != nullptr)
RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
}
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index 761ea9a83a..de6f09234b 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -189,6 +189,11 @@ class MemoryChunk : public BasicMemoryChunk {
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
+ static PageAllocator::Permission GetCodeModificationPermission() {
+ return FLAG_write_code_using_rwx ? PageAllocator::kReadWriteExecute
+ : PageAllocator::kReadWrite;
+ }
+
V8_EXPORT_PRIVATE void SetReadable();
V8_EXPORT_PRIVATE void SetReadAndExecutable();
diff --git a/deps/v8/src/heap/memory-measurement-inl.h b/deps/v8/src/heap/memory-measurement-inl.h
index f6c75b6ca6..6924bbf1b1 100644
--- a/deps/v8/src/heap/memory-measurement-inl.h
+++ b/deps/v8/src/heap/memory-measurement-inl.h
@@ -29,6 +29,7 @@ bool NativeContextInferrer::Infer(Isolate* isolate, Map map, HeapObject object,
native_context);
case kVisitJSApiObject:
case kVisitJSArrayBuffer:
+ case kVisitJSFinalizationRegistry:
case kVisitJSObject:
case kVisitJSObjectFast:
case kVisitJSTypedArray:
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index 0ef5d7550b..0aeef39910 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -338,11 +338,12 @@ std::unique_ptr<v8::MeasureMemoryDelegate> MemoryMeasurement::DefaultDelegate(
bool NativeContextInferrer::InferForContext(Isolate* isolate, Context context,
Address* native_context) {
- Map context_map = context.map(kAcquireLoad);
+ PtrComprCageBase cage_base(isolate);
+ Map context_map = context.map(cage_base, kAcquireLoad);
Object maybe_native_context =
TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
- Acquire_Load(isolate, context_map);
- if (maybe_native_context.IsNativeContext()) {
+ Acquire_Load(cage_base, context_map);
+ if (maybe_native_context.IsNativeContext(cage_base)) {
*native_context = maybe_native_context.ptr();
return true;
}
@@ -401,7 +402,7 @@ void NativeContextStats::IncrementExternalSize(Address context, Map map,
InstanceType instance_type = map.instance_type();
size_t external_size = 0;
if (instance_type == JS_ARRAY_BUFFER_TYPE) {
- external_size = JSArrayBuffer::cast(object).allocation_length();
+ external_size = JSArrayBuffer::cast(object).GetByteLength();
} else {
DCHECK(InstanceTypeChecker::IsExternalString(instance_type));
external_size = ExternalString::cast(object).ExternalPayloadSize();
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index c47c949388..72112d2426 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -96,7 +96,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult result;
- if (alignment != kWordAligned) {
+ if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment, origin);
} else {
result = AllocateFastUnaligned(size_in_bytes, origin);
@@ -111,11 +111,11 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
- if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
+ if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj =
- HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes));
+ HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
@@ -130,15 +130,15 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationResult NewSpace::AllocateFastAligned(
int size_in_bytes, int* result_aligned_size_in_bytes,
AllocationAlignment alignment, AllocationOrigin origin) {
- Address top = allocation_info_.top();
+ Address top = allocation_info_->top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
- if (!allocation_info_.CanIncrementTop(aligned_size_in_bytes)) {
+ if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(
- allocation_info_.IncrementTop(aligned_size_in_bytes));
+ allocation_info_->IncrementTop(aligned_size_in_bytes));
if (result_aligned_size_in_bytes)
*result_aligned_size_in_bytes = aligned_size_in_bytes;
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 70cbbe1799..6155a06f77 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -387,7 +387,7 @@ void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
size_t NewSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.IsCommitted()) {
size += from_space_.CommittedPhysicalMemory();
@@ -400,8 +400,9 @@ size_t NewSpace::CommittedPhysicalMemory() {
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
- size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
+ size_t max_semispace_capacity,
+ LinearAllocationArea* allocation_info)
+ : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
@@ -416,7 +417,7 @@ NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
}
void NewSpace::TearDown() {
- allocation_info_.Reset(kNullAddress, kNullAddress);
+ allocation_info_->Reset(kNullAddress, kNullAddress);
to_space_.TearDown();
from_space_.TearDown();
@@ -468,8 +469,8 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
AdvanceAllocationObservers();
Address new_top = known_top == 0 ? to_space_.page_low() : known_top;
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(new_top, to_space_.page_high());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
+ allocation_info_->Reset(new_top, to_space_.page_high());
// The order of the following two stores is important.
// See the corresponding loads in ConcurrentMarking::Run.
{
@@ -499,7 +500,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, to_space_.page_high());
- allocation_info_.SetLimit(new_limit);
+ allocation_info_->SetLimit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
@@ -508,7 +509,7 @@ void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
}
bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top();
+ Address top = allocation_info_->top();
DCHECK(!OldSpace::IsAtPageStart(top));
// Clear remainder of current page.
@@ -566,7 +567,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
AdvanceAllocationObservers();
- Address old_top = allocation_info_.top();
+ Address old_top = allocation_info_->top();
Address high = to_space_.page_high();
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -584,7 +585,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- old_top = allocation_info_.top();
+ old_top = allocation_info_->top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -595,8 +596,8 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) {
- if (allocation_info_.MergeIfAdjacent(info)) {
- original_top_.store(allocation_info_.top(), std::memory_order_release);
+ if (allocation_info_->MergeIfAdjacent(info)) {
+ original_top_.store(allocation_info_->top(), std::memory_order_release);
}
#if DEBUG
@@ -611,29 +612,19 @@ std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
-#ifdef V8_HOST_ARCH_32_BIT
- return alignment != kWordAligned
+ return USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
-#else
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
-#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes, origin);
-#endif
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
- if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+ if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
return AllocationResult::Retry(NEW_SPACE);
}
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
DCHECK(!result.IsRetry());
@@ -652,7 +643,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return AllocationResult::Retry(NEW_SPACE);
}
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
int aligned_size_in_bytes;
@@ -666,18 +657,33 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
return result;
}
+void NewSpace::MakeLinearAllocationAreaIterable() {
+ Address to_top = top();
+ Page* page = Page::FromAddress(to_top - kTaggedSize);
+ if (page->Contains(to_top)) {
+ int remaining_in_page = static_cast<int>(page->area_end() - to_top);
+ heap_->CreateFillerObjectAt(to_top, remaining_in_page,
+ ClearRecordedSlots::kNo);
+ }
+}
+
+void NewSpace::FreeLinearAllocationArea() {
+ MakeLinearAllocationAreaIterable();
+ UpdateInlineAllocationLimit(0);
+}
+
void NewSpace::VerifyTop() {
// Ensure validity of LAB: start <= top <= limit
- DCHECK_LE(allocation_info_.start(), allocation_info_.top());
- DCHECK_LE(allocation_info_.top(), allocation_info_.limit());
+ DCHECK_LE(allocation_info_->start(), allocation_info_->top());
+ DCHECK_LE(allocation_info_->top(), allocation_info_->limit());
// Ensure that original_top_ always >= LAB start. The delta between start_
// and top_ is still to be processed by allocation observers.
- DCHECK_GE(original_top_, allocation_info_.start());
+ DCHECK_GE(original_top_, allocation_info_->start());
// Ensure that limit() is <= original_limit_, original_limit_ always needs
// to be end of curent to space page.
- DCHECK_LE(allocation_info_.limit(), original_limit_);
+ DCHECK_LE(allocation_info_->limit(), original_limit_);
DCHECK_EQ(original_limit_, to_space_.page_high());
}
@@ -698,6 +704,7 @@ void NewSpace::Verify(Isolate* isolate) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ PtrComprCageBase cage_base(isolate);
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
@@ -708,26 +715,27 @@ void NewSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
+ Map map = object.map(cage_base);
+ CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
- CHECK(!object.IsMap());
- CHECK(!object.IsAbstractCode());
+ CHECK(!object.IsMap(cage_base));
+ CHECK(!object.IsAbstractCode(cage_base));
// The object itself should look OK.
object.ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor(heap());
- int size = object.Size();
+ int size = object.Size(cage_base);
object.IterateBody(map, size, &visitor);
- if (object.IsExternalString()) {
+ if (object.IsExternalString(cage_base)) {
ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
+ size_t string_size = external_string.ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] +=
+ string_size;
}
current += size;
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index 45129acea1..b1bec1b032 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -233,7 +233,8 @@ class V8_EXPORT_PRIVATE NewSpace
using const_iterator = ConstPageIterator;
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
- size_t initial_semispace_capacity, size_t max_semispace_capacity);
+ size_t initial_semispace_capacity, size_t max_semispace_capacity,
+ LinearAllocationArea* allocation_info);
~NewSpace() override { TearDown(); }
@@ -393,6 +394,10 @@ class V8_EXPORT_PRIVATE NewSpace
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
@@ -469,6 +474,12 @@ class V8_EXPORT_PRIVATE NewSpace
return &pending_allocation_mutex_;
}
+ // Creates a filler object in the linear allocation area.
+ void MakeLinearAllocationAreaIterable();
+
+ // Creates a filler object in the linear allocation area and closes it.
+ void FreeLinearAllocationArea();
+
private:
static const int kAllocationBufferParkingThreshold = 4 * KB;
@@ -505,10 +516,6 @@ class V8_EXPORT_PRIVATE NewSpace
AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
- V8_WARN_UNUSED_RESULT AllocationResult
- AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
@@ -521,9 +528,9 @@ class V8_EXPORT_PRIVATE NewSpace
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_DCHECK((space).page_low() <= (info).top() && \
- (info).top() <= (space).page_high() && \
- (info).limit() <= (space).page_high())
+ SLOW_DCHECK((space).page_low() <= (info)->top() && \
+ (info)->top() <= (space).page_high() && \
+ (info)->limit() <= (space).page_high())
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 294bff0e1a..379356a797 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -23,6 +23,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/templates.h"
+#include "src/objects/visitors.h"
#include "src/utils/memcopy.h"
#include "src/utils/ostreams.h"
@@ -31,14 +32,15 @@ namespace internal {
static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-class FieldStatsCollector : public ObjectVisitor {
+class FieldStatsCollector : public ObjectVisitorWithCageBases {
public:
- FieldStatsCollector(size_t* tagged_fields_count,
+ FieldStatsCollector(Heap* heap, size_t* tagged_fields_count,
size_t* embedder_fields_count,
size_t* inobject_smi_fields_count,
size_t* boxed_double_fields_count,
size_t* string_data_count, size_t* raw_fields_count)
- : tagged_fields_count_(tagged_fields_count),
+ : ObjectVisitorWithCageBases(heap),
+ tagged_fields_count_(tagged_fields_count),
embedder_fields_count_(embedder_fields_count),
inobject_smi_fields_count_(inobject_smi_fields_count),
boxed_double_fields_count_(boxed_double_fields_count),
@@ -47,16 +49,16 @@ class FieldStatsCollector : public ObjectVisitor {
void RecordStats(HeapObject host) {
size_t old_pointer_fields_count = *tagged_fields_count_;
- host.Iterate(this);
+ host.Iterate(cage_base(), this);
size_t tagged_fields_count_in_object =
*tagged_fields_count_ - old_pointer_fields_count;
- int object_size_in_words = host.Size() / kTaggedSize;
+ int object_size_in_words = host.Size(cage_base()) / kTaggedSize;
DCHECK_LE(tagged_fields_count_in_object, object_size_in_words);
size_t raw_fields_count_in_object =
object_size_in_words - tagged_fields_count_in_object;
- if (host.IsJSObject()) {
+ if (host.IsJSObject(cage_base())) {
JSObjectFieldStats field_stats = GetInobjectFieldStats(host.map());
// Embedder fields are already included into pointer words.
DCHECK_LE(field_stats.embedded_fields_count_,
@@ -69,11 +71,11 @@ class FieldStatsCollector : public ObjectVisitor {
tagged_fields_count_in_object -= field_stats.smi_fields_count_;
*tagged_fields_count_ -= field_stats.smi_fields_count_;
*inobject_smi_fields_count_ += field_stats.smi_fields_count_;
- } else if (host.IsHeapNumber()) {
+ } else if (host.IsHeapNumber(cage_base())) {
DCHECK_LE(kDoubleSize / kTaggedSize, raw_fields_count_in_object);
raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
*boxed_double_fields_count_ += 1;
- } else if (host.IsSeqString()) {
+ } else if (host.IsSeqString(cage_base())) {
int string_data = SeqString::cast(host).length(kAcquireLoad) *
(String::cast(host).IsOneByteRepresentation() ? 1 : 2) /
kTaggedSize;
@@ -456,7 +458,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()),
field_stats_collector_(
- &stats->tagged_fields_count_, &stats->embedder_fields_count_,
+ heap_, &stats->tagged_fields_count_, &stats->embedder_fields_count_,
&stats->inobject_smi_fields_count_,
&stats->boxed_double_fields_count_, &stats->string_data_count_,
&stats->raw_fields_count_) {}
@@ -1053,8 +1055,11 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
if (context.IsNativeContext()) {
RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context.Size());
- RecordSimpleVirtualObjectStats(context, context.retained_maps(),
- ObjectStats::RETAINED_MAPS_TYPE);
+ if (context.retained_maps().IsWeakArrayList()) {
+ RecordSimpleVirtualObjectStats(
+ context, WeakArrayList::cast(context.retained_maps()),
+ ObjectStats::RETAINED_MAPS_TYPE);
+ }
} else if (context.IsFunctionContext()) {
RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context.Size());
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index cd85ef715c..858e279ec4 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -30,6 +30,7 @@ namespace internal {
V(FixedDoubleArray) \
V(JSArrayBuffer) \
V(JSDataView) \
+ V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
V(JSTypedArray) \
@@ -50,15 +51,17 @@ namespace internal {
V(Symbol) \
V(SyntheticModule) \
V(TransitionArray) \
+ IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmArray) \
IF_WASM(V, WasmCapiFunctionData) \
IF_WASM(V, WasmExportedFunctionData) \
IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmIndirectFunctionTable) \
IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmInternalFunction) \
IF_WASM(V, WasmJSFunctionData) \
- IF_WASM(V, WasmApiFunctionRef) \
IF_WASM(V, WasmStruct) \
+ IF_WASM(V, WasmSuspenderObject) \
IF_WASM(V, WasmTypeInfo)
#define FORWARD_DECLARE(TypeName) class TypeName;
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index d59fd461e0..22b07c7442 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -29,7 +29,7 @@ HeapObject PagedSpaceObjectIterator::Next() {
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
HeapObject obj = HeapObject::FromAddress(cur_addr_);
- const int obj_size = obj.Size();
+ const int obj_size = obj.Size(cage_base());
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller(cage_base())) {
@@ -79,38 +79,39 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
}
bool PagedSpace::TryFreeLast(Address object_address, int object_size) {
- if (allocation_info_.top() != kNullAddress) {
- return allocation_info_.DecrementTopIfAdjacent(object_address, object_size);
+ if (allocation_info_->top() != kNullAddress) {
+ return allocation_info_->DecrementTopIfAdjacent(object_address,
+ object_size);
}
return false;
}
bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
- if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
+ if (allocation_info_->top() + size_in_bytes <= allocation_info_->limit()) {
return true;
}
return RefillLabMain(size_in_bytes, origin);
}
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
- if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
+ if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
return AllocationResult(
- HeapObject::FromAddress(allocation_info_.IncrementTop(size_in_bytes)));
+ HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
}
AllocationResult PagedSpace::AllocateFastAligned(
int size_in_bytes, int* aligned_size_in_bytes,
AllocationAlignment alignment) {
- Address current_top = allocation_info_.top();
+ Address current_top = allocation_info_->top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
- if (!allocation_info_.CanIncrementTop(aligned_size)) {
+ if (!allocation_info_->CanIncrementTop(aligned_size)) {
return AllocationResult::Retry(identity());
}
HeapObject obj =
- HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
+ HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
if (aligned_size_in_bytes) *aligned_size_in_bytes = aligned_size;
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
@@ -176,7 +177,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
- if (alignment != kWordAligned) {
+ if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
result = AllocateFastAligned(size_in_bytes, nullptr, alignment);
} else {
result = AllocateFastUnaligned(size_in_bytes);
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index c8feac3e65..0db2d5f989 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -37,8 +37,8 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
- space_->MakeLinearAllocationAreaIterable();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->MakeHeapIterable();
+ USE(space_);
}
PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
@@ -54,8 +54,7 @@ PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
cage_base_(heap->isolate())
#endif // V8_COMPRESS_POINTERS
{
- space_->MakeLinearAllocationAreaIterable();
- heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->MakeHeapIterable();
#ifdef DEBUG
AllocationSpace owner = page->owner_identity();
DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
@@ -91,8 +90,9 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable, FreeList* free_list,
+ LinearAllocationArea* allocation_info_,
CompactionSpaceKind compaction_space_kind)
- : SpaceWithLinearArea(heap, space, free_list),
+ : SpaceWithLinearArea(heap, space, free_list, allocation_info_),
executable_(executable),
compaction_space_kind_(compaction_space_kind) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
@@ -212,7 +212,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::OS::HasLazyCommits()) return CommittedMemory();
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
base::MutexGuard guard(mutex());
size_t size = 0;
for (Page* page : *this) {
@@ -283,8 +283,8 @@ void PagedSpace::RemovePage(Page* page) {
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(top, limit);
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
+ allocation_info_->Reset(top, limit);
base::Optional<base::SharedMutexGuard<base::kExclusive>> optional_guard;
if (!is_compaction_space())
@@ -309,7 +309,7 @@ void PagedSpace::ResetFreeList() {
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
- BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
FreeLinearAllocationArea();
ResetFreeList();
for (Page* page : *this) {
@@ -483,7 +483,7 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_->EvictFreeListItems(page);
- if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
+ if (Page::FromAllocationAreaAddress(allocation_info_->top()) == page) {
SetTopAndLimit(kNullAddress, kNullAddress);
}
@@ -499,7 +499,7 @@ void PagedSpace::ReleasePage(Page* page) {
void PagedSpace::SetReadable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadable();
}
}
@@ -507,7 +507,7 @@ void PagedSpace::SetReadable() {
void PagedSpace::SetReadAndExecutable() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetReadAndExecutable();
}
}
@@ -515,7 +515,7 @@ void PagedSpace::SetReadAndExecutable() {
void PagedSpace::SetCodeModificationPermissions() {
DCHECK(identity() == CODE_SPACE);
for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ DCHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
page->SetCodeModificationPermissions();
}
}
@@ -559,7 +559,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
Address start = new_node.address();
Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
@@ -603,23 +603,26 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
- // Now contribute to sweeping from background thread and then try to
- // reallocate.
- Sweeper::FreeSpaceMayContainInvalidatedSlots
- invalidated_slots_in_free_space =
- Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- const int kMaxPagesToSweep = 1;
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
- invalidated_slots_in_free_space);
-
- RefillFreeList();
-
- if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
- result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
+ if (IsSweepingAllowedOnThread(local_heap)) {
+ // Now contribute to sweeping from background thread and then try to
+ // reallocate.
+ Sweeper::FreeSpaceMayContainInvalidatedSlots
+ invalidated_slots_in_free_space =
+ Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
+ const int kMaxPagesToSweep = 1;
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
+ identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
+ invalidated_slots_in_free_space);
+
+ RefillFreeList();
+
+ if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
+ result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment,
+ origin);
+ if (result) return result;
+ }
}
}
@@ -634,7 +637,9 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (collector->sweeping_in_progress()) {
// Complete sweeping for this space.
- collector->DrainSweepingWorklistForSpace(identity());
+ if (IsSweepingAllowedOnThread(local_heap)) {
+ collector->DrainSweepingWorklistForSpace(identity());
+ }
RefillFreeList();
@@ -683,12 +688,21 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
DCHECK_LE(limit, end);
DCHECK_LE(min_size_in_bytes, limit - start);
if (limit != end) {
+ if (identity() == CODE_SPACE) {
+ heap()->UnprotectAndRegisterMemoryChunk(
+ page, UnprotectMemoryOrigin::kMaybeOffMainThread);
+ }
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
return std::make_pair(start, used_size_in_bytes);
}
+bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
+ // Code space sweeping is only allowed on main thread.
+ return local_heap->is_main_thread() || identity() != CODE_SPACE;
+}
+
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -696,7 +710,7 @@ void PagedSpace::Print() {}
#ifdef VERIFY_HEAP
void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
- (allocation_info_.top() == allocation_info_.limit());
+ (allocation_info_->top() == allocation_info_->limit());
size_t external_space_bytes[kNumTypes];
size_t external_page_bytes[kNumTypes];
@@ -712,7 +726,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
- if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
+ if (page == Page::FromAllocationAreaAddress(allocation_info_->top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
@@ -725,8 +739,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// The first word should be a map, and we expect all map pointers to
// be in map space.
- Map map = object.map();
- CHECK(map.IsMap());
+ Map map = object.map(cage_base);
+ CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) ||
isolate->heap()->map_space()->Contains(map));
@@ -741,7 +755,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
}
// All the interior pointers should be contained in the heap.
- int size = object.Size();
+ int size = object.Size(cage_base);
object.IterateBody(map, size, visitor);
CHECK(object.address() + size <= top);
end_of_previous_object = object.address() + size;
@@ -780,6 +794,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
void PagedSpace::VerifyLiveBytes() {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
+ PtrComprCageBase cage_base(heap()->isolate());
for (Page* page : *this) {
CHECK(page->SweepingDone());
PagedSpaceObjectIterator it(heap(), this, page);
@@ -787,7 +802,7 @@ void PagedSpace::VerifyLiveBytes() {
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
- black_size += object.Size();
+ black_size += object.Size(cage_base);
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
@@ -799,6 +814,7 @@ void PagedSpace::VerifyLiveBytes() {
void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t total_capacity = 0;
size_t total_allocated = 0;
+ PtrComprCageBase cage_base(heap->isolate());
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
@@ -806,7 +822,7 @@ void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFreeSpaceOrFiller()) {
- real_allocated += object.Size();
+ real_allocated += object.Size(cage_base);
}
}
total_allocated += page->allocated_bytes();
@@ -845,7 +861,7 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// Ensure there are no unaccounted allocations.
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
Address new_limit = ComputeLimit(top(), limit(), min_size);
DCHECK_LE(top(), new_limit);
@@ -857,10 +873,6 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
// OldSpace implementation
void PagedSpace::PrepareForMarkCompact() {
- // We don't have a linear allocation area while sweeping. It will be restored
- // on the first allocation after the sweep.
- FreeLinearAllocationArea();
-
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_->Reset();
}
@@ -984,14 +996,10 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
kGCCallbackScheduleIdleGarbageCollection);
}
-#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result =
- alignment != kWordAligned
+ USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
-#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
-#endif
return result;
}
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index fd101446d6..bdc4dee23f 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -86,7 +86,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Creates a space with an id.
PagedSpace(
Heap* heap, AllocationSpace id, Executability executable,
- FreeList* free_list,
+ FreeList* free_list, LinearAllocationArea* allocation_info_,
CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
~PagedSpace() override { TearDown(); }
@@ -357,6 +357,10 @@ class V8_EXPORT_PRIVATE PagedSpace
bool HasPages() { return first_page() != nullptr; }
+ // Returns whether sweeping of this space is safe on this thread. Code space
+ // sweeping is only allowed on the main thread.
+ bool IsSweepingAllowedOnThread(LocalHeap* local_heap);
+
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
@@ -453,12 +457,15 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
CompactionSpaceKind compaction_space_kind)
: PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
- compaction_space_kind) {
+ &allocation_info_, compaction_space_kind) {
DCHECK(is_compaction_space());
}
const std::vector<Page*>& GetNewPages() { return new_pages_; }
+ private:
+ LinearAllocationArea allocation_info_;
+
protected:
V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
AllocationOrigin origin) override;
@@ -505,9 +512,9 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit OldSpace(Heap* heap)
- : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
- FreeList::CreateFreeList()) {}
+ explicit OldSpace(Heap* heap, LinearAllocationArea* allocation_info)
+ : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
+ allocation_info) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
@@ -529,7 +536,11 @@ class CodeSpace : public PagedSpace {
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit CodeSpace(Heap* heap)
- : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
+ : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList(),
+ &paged_allocation_info_) {}
+
+ private:
+ LinearAllocationArea paged_allocation_info_;
};
// -----------------------------------------------------------------------------
@@ -539,8 +550,8 @@ class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
- : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
- FreeList::CreateFreeList()) {}
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
+ &paged_allocation_info_) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -555,6 +566,9 @@ class MapSpace : public PagedSpace {
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override;
#endif
+
+ private:
+ LinearAllocationArea paged_allocation_info_;
};
// Iterates over the chunks (pages and large object pages) that can contain
diff --git a/deps/v8/src/heap/parked-scope.h b/deps/v8/src/heap/parked-scope.h
index c7bfa38ce1..76d863215e 100644
--- a/deps/v8/src/heap/parked-scope.h
+++ b/deps/v8/src/heap/parked-scope.h
@@ -44,6 +44,8 @@ class V8_NODISCARD UnparkedScope {
LocalHeap* const local_heap_;
};
+// Scope that automatically parks the thread while blocking on the given
+// base::Mutex.
class V8_NODISCARD ParkedMutexGuard {
public:
explicit ParkedMutexGuard(LocalIsolate* local_isolate, base::Mutex* mutex)
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index d88432bbbc..3fa267d26c 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -692,13 +692,10 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
-#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result = alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment)
- : AllocateRawUnaligned(size_in_bytes);
-#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes);
-#endif
+ AllocationResult result =
+ USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned
+ ? AllocateRawAligned(size_in_bytes, alignment)
+ : AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj)) {
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 5eefec989c..13a6fedf47 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -11,12 +11,12 @@
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
#include "src/common/globals.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
-#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
@@ -180,7 +180,7 @@ class RememberedSet : public AllStatic {
template <typename Callback>
static int IterateAndTrackEmptyBuckets(
MemoryChunk* chunk, Callback callback,
- Worklist<MemoryChunk*, 64>::View empty_chunks) {
+ ::heap::base::Worklist<MemoryChunk*, 64>::Local* empty_chunks) {
SlotSet* slot_set = chunk->slot_set<type>();
int slots = 0;
if (slot_set != nullptr) {
@@ -189,7 +189,7 @@ class RememberedSet : public AllStatic {
slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
chunk->buckets(), callback,
possibly_empty_buckets);
- if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
+ if (!possibly_empty_buckets->IsEmpty()) empty_chunks->Push(chunk);
}
return slots;
}
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index bf3e5eaf95..bd4c610004 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -7,6 +7,10 @@
#include <atomic>
#include "src/base/logging.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/handles/local-handles.h"
#include "src/handles/persistent-handles.h"
@@ -14,7 +18,9 @@
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/local-heap.h"
+#include "src/heap/parked-scope.h"
#include "src/logging/counters-scopes.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -22,22 +28,80 @@ namespace internal {
IsolateSafepoint::IsolateSafepoint(Heap* heap)
: heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
-void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
- // Safepoints need to be initiated on the main thread.
- DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+void IsolateSafepoint::EnterLocalSafepointScope() {
+ // Safepoints need to be initiated on some main thread.
DCHECK_NULL(LocalHeap::Current());
+ DCHECK(AllowGarbageCollection::IsAllowed());
+ LockMutex(heap_->isolate()->main_thread_local_heap());
if (++active_safepoint_scopes_ > 1) return;
+ // Local safepoint can only be initiated on the isolate's main thread.
+ DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+
TimedHistogramScope timer(
heap_->isolate()->counters()->gc_time_to_safepoint());
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
- local_heaps_mutex_.Lock();
+ barrier_.Arm();
+ size_t running = SetSafepointRequestedFlags(IncludeMainThread::kNo);
+ barrier_.WaitUntilRunningThreadsInSafepoint(running);
+}
+
+class PerClientSafepointData final {
+ public:
+ explicit PerClientSafepointData(Isolate* isolate) : isolate_(isolate) {}
+
+ void set_locked_and_running(size_t running) {
+ locked_ = true;
+ running_ = running;
+ }
+
+ IsolateSafepoint* safepoint() const { return heap()->safepoint(); }
+ Heap* heap() const { return isolate_->heap(); }
+ Isolate* isolate() const { return isolate_; }
+
+ bool is_locked() const { return locked_; }
+ size_t running() const { return running_; }
+
+ private:
+ Isolate* const isolate_;
+ size_t running_ = 0;
+ bool locked_ = false;
+};
+void IsolateSafepoint::InitiateGlobalSafepointScope(
+ Isolate* initiator, PerClientSafepointData* client_data) {
+ IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
+ LockMutex(initiator->main_thread_local_heap());
+ InitiateGlobalSafepointScopeRaw(initiator, client_data);
+}
+
+void IsolateSafepoint::TryInitiateGlobalSafepointScope(
+ Isolate* initiator, PerClientSafepointData* client_data) {
+ if (!local_heaps_mutex_.TryLock()) return;
+ InitiateGlobalSafepointScopeRaw(initiator, client_data);
+}
+
+void IsolateSafepoint::InitiateGlobalSafepointScopeRaw(
+ Isolate* initiator, PerClientSafepointData* client_data) {
+ CHECK_EQ(++active_safepoint_scopes_, 1);
barrier_.Arm();
- int running = 0;
+ size_t running =
+ SetSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
+ client_data->set_locked_and_running(running);
+}
+
+IsolateSafepoint::IncludeMainThread
+IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
+ const bool is_initiator = heap_->isolate() == initiator;
+ return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
+}
+
+size_t IsolateSafepoint::SetSafepointRequestedFlags(
+ IncludeMainThread include_main_thread) {
+ size_t running = 0;
// There needs to be at least one LocalHeap for the main thread.
DCHECK_NOT_NULL(local_heaps_head_);
@@ -45,7 +109,7 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
- stop_main_thread == StopMainThread::kNo) {
+ include_main_thread == IncludeMainThread::kNo) {
continue;
}
@@ -58,21 +122,42 @@ void IsolateSafepoint::EnterSafepointScope(StopMainThread stop_main_thread) {
CHECK(!old_state.IsSafepointRequested());
}
- barrier_.WaitUntilRunningThreadsInSafepoint(running);
+ return running;
}
-void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
- // Safepoints need to be initiated on the main thread.
- DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
- DCHECK_NULL(LocalHeap::Current());
+void IsolateSafepoint::LockMutex(LocalHeap* local_heap) {
+ if (!local_heaps_mutex_.TryLock()) {
+ ParkedScope parked_scope(local_heap);
+ local_heaps_mutex_.Lock();
+ }
+}
+
+void IsolateSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
+ local_heaps_mutex_.AssertHeld();
+ CHECK_EQ(--active_safepoint_scopes_, 0);
+ ClearSafepointRequestedFlags(IncludeMainThreadUnlessInitiator(initiator));
+ barrier_.Disarm();
+ local_heaps_mutex_.Unlock();
+}
+void IsolateSafepoint::LeaveLocalSafepointScope() {
+ local_heaps_mutex_.AssertHeld();
DCHECK_GT(active_safepoint_scopes_, 0);
- if (--active_safepoint_scopes_ > 0) return;
+ if (--active_safepoint_scopes_ == 0) {
+ ClearSafepointRequestedFlags(IncludeMainThread::kNo);
+ barrier_.Disarm();
+ }
+
+ local_heaps_mutex_.Unlock();
+}
+
+void IsolateSafepoint::ClearSafepointRequestedFlags(
+ IncludeMainThread include_main_thread) {
for (LocalHeap* local_heap = local_heaps_head_; local_heap;
local_heap = local_heap->next_) {
if (local_heap->is_main_thread() &&
- stop_main_thread == StopMainThread::kNo) {
+ include_main_thread == IncludeMainThread::kNo) {
continue;
}
@@ -84,10 +169,6 @@ void IsolateSafepoint::LeaveSafepointScope(StopMainThread stop_main_thread) {
CHECK_IMPLIES(old_state.IsCollectionRequested(),
local_heap->is_main_thread());
}
-
- barrier_.Disarm();
-
- local_heaps_mutex_.Unlock();
}
void IsolateSafepoint::WaitInSafepoint() { barrier_.WaitInSafepoint(); }
@@ -96,6 +177,11 @@ void IsolateSafepoint::WaitInUnpark() { barrier_.WaitInUnpark(); }
void IsolateSafepoint::NotifyPark() { barrier_.NotifyPark(); }
+void IsolateSafepoint::WaitUntilRunningThreadsInSafepoint(
+ const PerClientSafepointData* client_data) {
+ barrier_.WaitUntilRunningThreadsInSafepoint(client_data->running());
+}
+
void IsolateSafepoint::Barrier::Arm() {
base::MutexGuard guard(&mutex_);
DCHECK(!IsArmed());
@@ -112,7 +198,7 @@ void IsolateSafepoint::Barrier::Disarm() {
}
void IsolateSafepoint::Barrier::WaitUntilRunningThreadsInSafepoint(
- int running) {
+ size_t running) {
base::MutexGuard guard(&mutex_);
DCHECK(IsArmed());
while (stopped_ < running) {
@@ -147,16 +233,8 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
}
}
-SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
- safepoint_->EnterSafepointScope(IsolateSafepoint::StopMainThread::kNo);
-}
-
-SafepointScope::~SafepointScope() {
- safepoint_->LeaveSafepointScope(IsolateSafepoint::StopMainThread::kNo);
-}
-
bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
LocalHeap* current = local_heaps_head_;
while (current) {
@@ -168,7 +246,7 @@ bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
}
bool IsolateSafepoint::ContainsAnyLocalHeap() {
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
return local_heaps_head_ != nullptr;
}
@@ -180,5 +258,138 @@ void IsolateSafepoint::Iterate(RootVisitor* visitor) {
}
}
+void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
+ DCHECK_EQ(local_heaps_head_, heap_->main_thread_local_heap());
+ DCHECK_NULL(heap_->main_thread_local_heap()->next_);
+}
+
+SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
+ safepoint_->EnterLocalSafepointScope();
+}
+
+SafepointScope::~SafepointScope() { safepoint_->LeaveLocalSafepointScope(); }
+
+GlobalSafepoint::GlobalSafepoint(Isolate* isolate)
+ : shared_isolate_(isolate), shared_heap_(isolate->heap()) {}
+
+void GlobalSafepoint::AppendClient(Isolate* client) {
+ clients_mutex_.AssertHeld();
+
+ DCHECK_NULL(client->global_safepoint_prev_client_isolate_);
+ DCHECK_NULL(client->global_safepoint_next_client_isolate_);
+ DCHECK_NE(clients_head_, client);
+
+ if (clients_head_) {
+ clients_head_->global_safepoint_prev_client_isolate_ = client;
+ }
+
+ client->global_safepoint_prev_client_isolate_ = nullptr;
+ client->global_safepoint_next_client_isolate_ = clients_head_;
+
+ clients_head_ = client;
+ client->shared_isolate_ = shared_isolate_;
+}
+
+void GlobalSafepoint::RemoveClient(Isolate* client) {
+ DCHECK_EQ(client->heap()->gc_state(), Heap::TEAR_DOWN);
+
+ // A shared heap may have already acquired the client mutex to perform a
+ // shared GC. We need to park the Isolate here to allow for a shared GC.
+ IgnoreLocalGCRequests ignore_gc_requests(client->heap());
+ ParkedMutexGuard guard(client->main_thread_local_heap(), &clients_mutex_);
+
+ if (client->global_safepoint_next_client_isolate_) {
+ client->global_safepoint_next_client_isolate_
+ ->global_safepoint_prev_client_isolate_ =
+ client->global_safepoint_prev_client_isolate_;
+ }
+
+ if (client->global_safepoint_prev_client_isolate_) {
+ client->global_safepoint_prev_client_isolate_
+ ->global_safepoint_next_client_isolate_ =
+ client->global_safepoint_next_client_isolate_;
+ } else {
+ DCHECK_EQ(clients_head_, client);
+ clients_head_ = client->global_safepoint_next_client_isolate_;
+ }
+
+ client->shared_isolate_ = nullptr;
+}
+
+void GlobalSafepoint::AssertNoClients() { DCHECK_NULL(clients_head_); }
+
+void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
+ // Safepoints need to be initiated on some main thread.
+ DCHECK_NULL(LocalHeap::Current());
+
+ if (!clients_mutex_.TryLock()) {
+ IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
+ ParkedScope parked_scope(initiator->main_thread_local_heap());
+ clients_mutex_.Lock();
+ }
+
+ TimedHistogramScope timer(
+ initiator->counters()->gc_time_to_global_safepoint());
+ TRACE_GC(initiator->heap()->tracer(),
+ GCTracer::Scope::TIME_TO_GLOBAL_SAFEPOINT);
+
+ std::vector<PerClientSafepointData> clients;
+
+ // Try to initiate safepoint for all clients. Fail immediately when the
+ // local_heaps_mutex_ can't be locked without blocking.
+ IterateClientIsolates([&clients, initiator](Isolate* client) {
+ clients.emplace_back(client);
+ client->heap()->safepoint()->TryInitiateGlobalSafepointScope(
+ initiator, &clients.back());
+ });
+
+ // Iterate all clients again to initiate the safepoint for all of them - even
+ // if that means blocking.
+ for (PerClientSafepointData& client : clients) {
+ if (client.is_locked()) continue;
+ client.safepoint()->InitiateGlobalSafepointScope(initiator, &client);
+ }
+
+#if DEBUG
+ for (const PerClientSafepointData& client : clients) {
+ DCHECK_EQ(client.isolate()->shared_isolate(), shared_isolate_);
+ DCHECK(client.heap()->deserialization_complete());
+ }
+#endif // DEBUG
+
+ // Now that safepoints were initiated for all clients, wait until all threads
+ // of all clients reached a safepoint.
+ for (const PerClientSafepointData& client : clients) {
+ DCHECK(client.is_locked());
+ client.safepoint()->WaitUntilRunningThreadsInSafepoint(&client);
+ }
+}
+
+void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
+ IterateClientIsolates([initiator](Isolate* client) {
+ Heap* client_heap = client->heap();
+ client_heap->safepoint()->LeaveGlobalSafepointScope(initiator);
+ });
+
+ clients_mutex_.Unlock();
+}
+
+GlobalSafepointScope::GlobalSafepointScope(Isolate* initiator)
+ : initiator_(initiator), shared_isolate_(initiator->shared_isolate()) {
+ if (shared_isolate_) {
+ shared_isolate_->global_safepoint()->EnterGlobalSafepointScope(initiator_);
+ } else {
+ initiator_->heap()->safepoint()->EnterLocalSafepointScope();
+ }
+}
+
+GlobalSafepointScope::~GlobalSafepointScope() {
+ if (shared_isolate_) {
+ shared_isolate_->global_safepoint()->LeaveGlobalSafepointScope(initiator_);
+ } else {
+ initiator_->heap()->safepoint()->LeaveLocalSafepointScope();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index 961bfdf001..8a6823c603 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -7,6 +7,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/local-heap.h"
#include "src/objects/visitors.h"
@@ -16,23 +17,15 @@ namespace internal {
class Heap;
class LocalHeap;
+class PerClientSafepointData;
class RootVisitor;
-// Used to bring all threads with heap access to a safepoint such that e.g. a
-// garbage collection can be performed.
+// Used to bring all threads with heap access in an isolate to a safepoint such
+// that e.g. a garbage collection can be performed.
class IsolateSafepoint final {
public:
explicit IsolateSafepoint(Heap* heap);
- // Wait until unpark operation is safe again
- void WaitInUnpark();
-
- // Enter the safepoint from a running thread
- void WaitInSafepoint();
-
- // Running thread reached a safepoint by parking itself.
- void NotifyPark();
-
V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
@@ -51,6 +44,8 @@ class IsolateSafepoint final {
void AssertActive() { local_heaps_mutex_.AssertHeld(); }
+ void AssertMainThreadIsOnlyThread();
+
private:
class Barrier {
base::Mutex mutex_;
@@ -58,7 +53,7 @@ class IsolateSafepoint final {
base::ConditionVariable cv_stopped_;
bool armed_;
- int stopped_ = 0;
+ size_t stopped_ = 0;
bool IsArmed() { return armed_; }
@@ -67,23 +62,53 @@ class IsolateSafepoint final {
void Arm();
void Disarm();
- void WaitUntilRunningThreadsInSafepoint(int running);
+ void WaitUntilRunningThreadsInSafepoint(size_t running);
void WaitInSafepoint();
void WaitInUnpark();
void NotifyPark();
};
- enum class StopMainThread { kYes, kNo };
+ enum class IncludeMainThread { kYes, kNo };
+
+ // Wait until unpark operation is safe again.
+ void WaitInUnpark();
+
+ // Enter the safepoint from a running thread.
+ void WaitInSafepoint();
+
+ // Running thread reached a safepoint by parking itself.
+ void NotifyPark();
+
+ // Methods for entering/leaving local safepoint scopes.
+ void EnterLocalSafepointScope();
+ void LeaveLocalSafepointScope();
+
+ // Methods for entering/leaving global safepoint scopes.
+ void TryInitiateGlobalSafepointScope(Isolate* initiator,
+ PerClientSafepointData* client_data);
+ void InitiateGlobalSafepointScope(Isolate* initiator,
+ PerClientSafepointData* client_data);
+ void InitiateGlobalSafepointScopeRaw(Isolate* initiator,
+ PerClientSafepointData* client_data);
+ void LeaveGlobalSafepointScope(Isolate* initiator);
+
+ // Blocks until all running threads reached a safepoint.
+ void WaitUntilRunningThreadsInSafepoint(
+ const PerClientSafepointData* client_data);
+
+ IncludeMainThread IncludeMainThreadUnlessInitiator(Isolate* initiator);
+
+ void LockMutex(LocalHeap* local_heap);
- void EnterSafepointScope(StopMainThread stop_main_thread);
- void LeaveSafepointScope(StopMainThread stop_main_thread);
+ size_t SetSafepointRequestedFlags(IncludeMainThread include_main_thread);
+ void ClearSafepointRequestedFlags(IncludeMainThread include_main_thread);
template <typename Callback>
void AddLocalHeap(LocalHeap* local_heap, Callback callback) {
// Safepoint holds this lock in order to stop threads from starting or
// stopping.
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint
callback();
@@ -97,7 +122,7 @@ class IsolateSafepoint final {
template <typename Callback>
void RemoveLocalHeap(LocalHeap* local_heap, Callback callback) {
- base::MutexGuard guard(&local_heaps_mutex_);
+ base::RecursiveMutexGuard guard(&local_heaps_mutex_);
// Additional code protected from safepoint
callback();
@@ -113,12 +138,16 @@ class IsolateSafepoint final {
Barrier barrier_;
Heap* heap_;
- base::Mutex local_heaps_mutex_;
+ // Mutex is used both for safepointing and adding/removing threads. A
+ // RecursiveMutex is needed since we need to support nested SafepointScopes.
+ base::RecursiveMutex local_heaps_mutex_;
LocalHeap* local_heaps_head_;
int active_safepoint_scopes_;
friend class Heap;
+ friend class GlobalSafepoint;
+ friend class GlobalSafepointScope;
friend class LocalHeap;
friend class PersistentHandles;
friend class SafepointScope;
@@ -133,6 +162,48 @@ class V8_NODISCARD SafepointScope {
IsolateSafepoint* safepoint_;
};
+// Used for reaching a global safepoint, a safepoint across all client isolates
+// of the shared isolate.
+class GlobalSafepoint final {
+ public:
+ explicit GlobalSafepoint(Isolate* isolate);
+
+ void AppendClient(Isolate* client);
+ void RemoveClient(Isolate* client);
+
+ template <typename Callback>
+ void IterateClientIsolates(Callback callback) {
+ for (Isolate* current = clients_head_; current;
+ current = current->global_safepoint_next_client_isolate_) {
+ callback(current);
+ }
+ }
+
+ void AssertNoClients();
+
+ private:
+ void EnterGlobalSafepointScope(Isolate* initiator);
+ void LeaveGlobalSafepointScope(Isolate* initiator);
+
+ Isolate* const shared_isolate_;
+ Heap* const shared_heap_;
+ base::Mutex clients_mutex_;
+ Isolate* clients_head_ = nullptr;
+
+ friend class GlobalSafepointScope;
+ friend class Isolate;
+};
+
+class V8_NODISCARD GlobalSafepointScope {
+ public:
+ V8_EXPORT_PRIVATE explicit GlobalSafepointScope(Isolate* initiator);
+ V8_EXPORT_PRIVATE ~GlobalSafepointScope();
+
+ private:
+ Isolate* const initiator_;
+ Isolate* const shared_isolate_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 152bc03613..8a0a1da96b 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -16,93 +16,59 @@
namespace v8 {
namespace internal {
-void Scavenger::PromotionList::View::PushRegularObject(HeapObject object,
- int size) {
- promotion_list_->PushRegularObject(task_id_, object, size);
+void Scavenger::PromotionList::Local::PushRegularObject(HeapObject object,
+ int size) {
+ regular_object_promotion_list_local_.Push({object, size});
}
-void Scavenger::PromotionList::View::PushLargeObject(HeapObject object, Map map,
- int size) {
- promotion_list_->PushLargeObject(task_id_, object, map, size);
+void Scavenger::PromotionList::Local::PushLargeObject(HeapObject object,
+ Map map, int size) {
+ large_object_promotion_list_local_.Push({object, map, size});
}
-bool Scavenger::PromotionList::View::IsEmpty() {
- return promotion_list_->IsEmpty();
+size_t Scavenger::PromotionList::Local::LocalPushSegmentSize() const {
+ return regular_object_promotion_list_local_.PushSegmentSize() +
+ large_object_promotion_list_local_.PushSegmentSize();
}
-size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
- return promotion_list_->LocalPushSegmentSize(task_id_);
-}
-
-bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
- return promotion_list_->Pop(task_id_, entry);
-}
-
-void Scavenger::PromotionList::View::FlushToGlobal() {
- promotion_list_->FlushToGlobal(task_id_);
-}
-
-bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
- return promotion_list_->IsGlobalPoolEmpty();
-}
-
-bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
- return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
-}
-
-void Scavenger::PromotionList::PushRegularObject(int task_id, HeapObject object,
- int size) {
- regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
-}
-
-void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject object,
- Map map, int size) {
- large_object_promotion_list_.Push(task_id, {object, map, size});
-}
-
-bool Scavenger::PromotionList::IsEmpty() {
- return regular_object_promotion_list_.IsEmpty() &&
- large_object_promotion_list_.IsEmpty();
-}
-
-size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
- return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
- large_object_promotion_list_.LocalPushSegmentSize(task_id);
-}
-
-bool Scavenger::PromotionList::Pop(int task_id,
- struct PromotionListEntry* entry) {
+bool Scavenger::PromotionList::Local::Pop(struct PromotionListEntry* entry) {
ObjectAndSize regular_object;
- if (regular_object_promotion_list_.Pop(task_id, &regular_object)) {
+ if (regular_object_promotion_list_local_.Pop(&regular_object)) {
entry->heap_object = regular_object.first;
entry->size = regular_object.second;
entry->map = entry->heap_object.map();
return true;
}
- return large_object_promotion_list_.Pop(task_id, entry);
-}
-
-void Scavenger::PromotionList::FlushToGlobal(int task_id) {
- regular_object_promotion_list_.FlushToGlobal(task_id);
- large_object_promotion_list_.FlushToGlobal(task_id);
+ return large_object_promotion_list_local_.Pop(entry);
}
-size_t Scavenger::PromotionList::GlobalPoolSize() const {
- return regular_object_promotion_list_.GlobalPoolSize() +
- large_object_promotion_list_.GlobalPoolSize();
+void Scavenger::PromotionList::Local::Publish() {
+ regular_object_promotion_list_local_.Publish();
+ large_object_promotion_list_local_.Publish();
}
-bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
- return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
- large_object_promotion_list_.IsGlobalPoolEmpty();
+bool Scavenger::PromotionList::Local::IsGlobalPoolEmpty() const {
+ return regular_object_promotion_list_local_.IsGlobalEmpty() &&
+ large_object_promotion_list_local_.IsGlobalEmpty();
}
-bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
+bool Scavenger::PromotionList::Local::ShouldEagerlyProcessPromotionList()
+ const {
// Threshold when to prioritize processing of the promotion list. Right
// now we only look into the regular object list.
const int kProcessPromotionListThreshold =
kRegularObjectPromotionListSegmentSize / 2;
- return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
+ return LocalPushSegmentSize() < kProcessPromotionListThreshold;
+}
+
+bool Scavenger::PromotionList::IsEmpty() const {
+ return regular_object_promotion_list_.IsEmpty() &&
+ large_object_promotion_list_.IsEmpty();
+}
+
+size_t Scavenger::PromotionList::Size() const {
+ return regular_object_promotion_list_.Size() +
+ large_object_promotion_list_.Size();
}
void Scavenger::PageMemoryFence(MaybeObject object) {
@@ -169,7 +135,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
}
HeapObjectReference::Update(slot, target);
if (object_fields == ObjectFields::kMaybePointers) {
- copied_list_.Push(ObjectAndSize(target, object_size));
+ copied_list_local_.Push(ObjectAndSize(target, object_size));
}
copied_size_ += object_size;
return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
@@ -217,7 +183,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
}
HeapObjectReference::Update(slot, target);
if (object_fields == ObjectFields::kMaybePointers) {
- promotion_list_.PushRegularObject(target, object_size);
+ promotion_list_local_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
@@ -246,7 +212,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
- promotion_list_.PushLargeObject(object, map, object_size);
+ promotion_list_local_.PushLargeObject(object, map, object_size);
}
}
return true;
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 9faf71f9ee..3e3a67a5e6 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -212,9 +212,9 @@ size_t ScavengerCollector::JobTask::GetMaxConcurrency(
// GlobalPoolSize() of copied_list_ and promotion_list_.
return std::min<size_t>(
scavengers_->size(),
- std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
- worker_count + copied_list_->GlobalPoolSize() +
- promotion_list_->GlobalPoolSize()));
+ std::max<size_t>(
+ remaining_memory_chunks_.load(std::memory_order_relaxed),
+ worker_count + copied_list_->Size() + promotion_list_->Size()));
}
void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
@@ -272,11 +272,11 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(surviving_new_large_objects_.empty());
std::vector<std::unique_ptr<Scavenger>> scavengers;
- Worklist<MemoryChunk*, 64> empty_chunks;
+ Scavenger::EmptyChunksList empty_chunks;
const int num_scavenge_tasks = NumberOfScavengeTasks();
- Scavenger::CopiedList copied_list(num_scavenge_tasks);
- Scavenger::PromotionList promotion_list(num_scavenge_tasks);
- EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
+ Scavenger::CopiedList copied_list;
+ Scavenger::PromotionList promotion_list;
+ EphemeronTableList ephemeron_table_list;
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
@@ -341,7 +341,7 @@ void ScavengerCollector::CollectGarbage() {
heap_->IterateRoots(&root_scavenge_visitor, options);
isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
&root_scavenge_visitor);
- scavengers[kMainThreadId]->Flush();
+ scavengers[kMainThreadId]->Publish();
}
{
// Parallel phase scavenging all copied and promoted objects.
@@ -428,9 +428,9 @@ void ScavengerCollector::CollectGarbage() {
{
TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
+ Scavenger::EmptyChunksList::Local empty_chunks_local(&empty_chunks);
MemoryChunk* chunk;
-
- while (empty_chunks.Pop(kMainThreadId, &chunk)) {
+ while (empty_chunks_local.Pop(&chunk)) {
// Since sweeping was already restarted only check chunks that already got
// swept.
if (chunk->SweepingDone()) {
@@ -534,16 +534,22 @@ int ScavengerCollector::NumberOfScavengeTasks() {
return tasks;
}
+Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
+ : regular_object_promotion_list_local_(
+ &promotion_list->regular_object_promotion_list_),
+ large_object_promotion_list_local_(
+ &promotion_list->large_object_promotion_list_) {}
+
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
- Worklist<MemoryChunk*, 64>* empty_chunks,
- CopiedList* copied_list, PromotionList* promotion_list,
+ EmptyChunksList* empty_chunks, CopiedList* copied_list,
+ PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id)
: collector_(collector),
heap_(heap),
- empty_chunks_(empty_chunks, task_id),
- promotion_list_(promotion_list, task_id),
- copied_list_(copied_list, task_id),
- ephemeron_table_list_(ephemeron_table_list, task_id),
+ empty_chunks_local_(empty_chunks),
+ promotion_list_local_(promotion_list),
+ copied_list_local_(copied_list),
+ ephemeron_table_list_local_(ephemeron_table_list),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
copied_size_(0),
promoted_size_(0),
@@ -602,7 +608,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndScavengeObject(heap_, slot);
},
- empty_chunks_);
+ &empty_chunks_local_);
}
if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
@@ -641,24 +647,24 @@ void Scavenger::Process(JobDelegate* delegate) {
do {
done = true;
ObjectAndSize object_and_size;
- while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
- copied_list_.Pop(&object_and_size)) {
+ while (promotion_list_local_.ShouldEagerlyProcessPromotionList() &&
+ copied_list_local_.Pop(&object_and_size)) {
scavenge_visitor.Visit(object_and_size.first);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
- if (!copied_list_.IsGlobalPoolEmpty()) {
+ if (!copied_list_local_.IsEmpty()) {
delegate->NotifyConcurrencyIncrease();
}
}
}
struct PromotionListEntry entry;
- while (promotion_list_.Pop(&entry)) {
+ while (promotion_list_local_.Pop(&entry)) {
HeapObject target = entry.heap_object;
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (delegate && ((++objects % kInterruptThreshold) == 0)) {
- if (!promotion_list_.IsGlobalPoolEmpty()) {
+ if (!promotion_list_local_.IsGlobalPoolEmpty()) {
delegate->NotifyConcurrencyIncrease();
}
}
@@ -735,8 +741,8 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
- empty_chunks_.FlushToGlobal();
- ephemeron_table_list_.FlushToGlobal();
+ empty_chunks_local_.Publish();
+ ephemeron_table_list_local_.Publish();
for (auto it = ephemeron_remembered_set_.begin();
it != ephemeron_remembered_set_.end(); ++it) {
auto insert_result = heap()->ephemeron_remembered_set_.insert(
@@ -747,13 +753,13 @@ void Scavenger::Finalize() {
}
}
-void Scavenger::Flush() {
- copied_list_.FlushToGlobal();
- promotion_list_.FlushToGlobal();
+void Scavenger::Publish() {
+ copied_list_local_.Publish();
+ promotion_list_local_.Publish();
}
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
- ephemeron_table_list_.Push(table);
+ ephemeron_table_list_local_.Push(table);
}
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 63b3f314db..0eb12a5f3d 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -6,12 +6,12 @@
#define V8_HEAP_SCAVENGER_H_
#include "src/base/platform/condition-variable.h"
+#include "src/heap/base/worklist.h"
#include "src/heap/index-generator.h"
#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
-#include "src/heap/worklist.h"
namespace v8 {
namespace internal {
@@ -33,7 +33,7 @@ using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject, Map>;
constexpr int kEphemeronTableListSegmentSize = 128;
using EphemeronTableList =
- Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
+ ::heap::base::Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
class ScavengerCollector;
@@ -47,58 +47,49 @@ class Scavenger {
class PromotionList {
public:
- class View {
+ static constexpr size_t kRegularObjectPromotionListSegmentSize = 256;
+ static constexpr size_t kLargeObjectPromotionListSegmentSize = 4;
+
+ using RegularObjectPromotionList =
+ ::heap::base::Worklist<ObjectAndSize,
+ kRegularObjectPromotionListSegmentSize>;
+ using LargeObjectPromotionList =
+ ::heap::base::Worklist<PromotionListEntry,
+ kLargeObjectPromotionListSegmentSize>;
+
+ class Local {
public:
- View(PromotionList* promotion_list, int task_id)
- : promotion_list_(promotion_list), task_id_(task_id) {}
+ explicit Local(PromotionList* promotion_list);
inline void PushRegularObject(HeapObject object, int size);
inline void PushLargeObject(HeapObject object, Map map, int size);
- inline bool IsEmpty();
- inline size_t LocalPushSegmentSize();
+ inline size_t LocalPushSegmentSize() const;
inline bool Pop(struct PromotionListEntry* entry);
- inline bool IsGlobalPoolEmpty();
- inline bool ShouldEagerlyProcessPromotionList();
- inline void FlushToGlobal();
+ inline bool IsGlobalPoolEmpty() const;
+ inline bool ShouldEagerlyProcessPromotionList() const;
+ inline void Publish();
private:
- PromotionList* promotion_list_;
- int task_id_;
+ RegularObjectPromotionList::Local regular_object_promotion_list_local_;
+ LargeObjectPromotionList::Local large_object_promotion_list_local_;
};
- explicit PromotionList(int num_tasks)
- : regular_object_promotion_list_(num_tasks),
- large_object_promotion_list_(num_tasks) {}
-
- inline void PushRegularObject(int task_id, HeapObject object, int size);
- inline void PushLargeObject(int task_id, HeapObject object, Map map,
- int size);
- inline bool IsEmpty();
- inline size_t GlobalPoolSize() const;
- inline size_t LocalPushSegmentSize(int task_id);
- inline bool Pop(int task_id, struct PromotionListEntry* entry);
- inline bool IsGlobalPoolEmpty();
- inline bool ShouldEagerlyProcessPromotionList(int task_id);
- inline void FlushToGlobal(int task_id);
+ inline bool IsEmpty() const;
+ inline size_t Size() const;
private:
- static const int kRegularObjectPromotionListSegmentSize = 256;
- static const int kLargeObjectPromotionListSegmentSize = 4;
-
- using RegularObjectPromotionList =
- Worklist<ObjectAndSize, kRegularObjectPromotionListSegmentSize>;
- using LargeObjectPromotionList =
- Worklist<PromotionListEntry, kLargeObjectPromotionListSegmentSize>;
-
RegularObjectPromotionList regular_object_promotion_list_;
LargeObjectPromotionList large_object_promotion_list_;
};
static const int kCopiedListSegmentSize = 256;
- using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
+ using CopiedList =
+ ::heap::base::Worklist<ObjectAndSize, kCopiedListSegmentSize>;
+ using EmptyChunksList = ::heap::base::Worklist<MemoryChunk*, 64>;
+
Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
- Worklist<MemoryChunk*, 64>* empty_chunks, CopiedList* copied_list,
+ EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
EphemeronTableList* ephemeron_table_list, int task_id);
@@ -112,7 +103,7 @@ class Scavenger {
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
- void Flush();
+ void Publish();
void AddEphemeronHashTable(EphemeronHashTable table);
@@ -198,10 +189,10 @@ class Scavenger {
ScavengerCollector* const collector_;
Heap* const heap_;
- Worklist<MemoryChunk*, 64>::View empty_chunks_;
- PromotionList::View promotion_list_;
- CopiedList::View copied_list_;
- EphemeronTableList::View ephemeron_table_list_;
+ EmptyChunksList::Local empty_chunks_local_;
+ PromotionList::Local promotion_list_local_;
+ CopiedList::Local copied_list_local_;
+ EphemeronTableList::Local ephemeron_table_list_local_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 011fc5e53a..4e7b2afbdc 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -189,7 +189,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
void Heap::FinalizePartialMap(Map map) {
ReadOnlyRoots roots(this);
- map.set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
+ map.set_dependent_code(DependentCode::empty_dependent_code(roots));
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
map.set_prototype(roots.null_value());
@@ -407,6 +407,9 @@ bool Heap::CreateInitialMaps() {
if (StringShape(entry.type).IsCons()) map.mark_unstable();
roots_table()[entry.index] = map.ptr();
}
+ ALLOCATE_VARSIZE_MAP(SHARED_STRING_TYPE, seq_string_migration_sentinel);
+ ALLOCATE_VARSIZE_MAP(SHARED_ONE_BYTE_STRING_TYPE,
+ one_byte_seq_string_migration_sentinel);
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
roots.fixed_double_array_map().set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
@@ -503,12 +506,14 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
+ IF_WASM(ALLOCATE_MAP, WASM_API_FUNCTION_REF_TYPE, WasmApiFunctionRef::kSize,
+ wasm_api_function_ref)
IF_WASM(ALLOCATE_MAP, WASM_CAPI_FUNCTION_DATA_TYPE,
WasmCapiFunctionData::kSize, wasm_capi_function_data)
IF_WASM(ALLOCATE_MAP, WASM_EXPORTED_FUNCTION_DATA_TYPE,
WasmExportedFunctionData::kSize, wasm_exported_function_data)
- IF_WASM(ALLOCATE_MAP, WASM_API_FUNCTION_REF_TYPE, WasmApiFunctionRef::kSize,
- wasm_api_function_ref)
+ IF_WASM(ALLOCATE_MAP, WASM_INTERNAL_FUNCTION_TYPE,
+ WasmInternalFunction::kSize, wasm_internal_function)
IF_WASM(ALLOCATE_MAP, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData::kSize,
wasm_js_function_data)
IF_WASM(ALLOCATE_MAP, WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize,
@@ -804,6 +809,9 @@ void Heap::CreateInitialObjects() {
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
set_pending_optimize_for_test_bytecode(roots.undefined_value());
set_shared_wasm_memories(roots.empty_weak_array_list());
+#ifdef V8_ENABLE_WEBASSEMBLY
+ set_active_continuation(roots.undefined_value());
+#endif // V8_ENABLE_WEBASSEMBLY
set_script_list(roots.empty_weak_array_list());
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index c36b02c22d..5e70cbc33d 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -12,11 +12,11 @@
#include "src/base/atomic-utils.h"
#include "src/base/bit-field.h"
#include "src/base/bits.h"
-#include "src/heap/worklist.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/slots.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 230d004fe3..43d01f3989 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -136,7 +136,7 @@ Address SkipFillers(PtrComprCageBase cage_base, HeapObject filler,
while (addr < end) {
filler = HeapObject::FromAddress(addr);
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
- addr = filler.address() + filler.Size();
+ addr = filler.address() + filler.Size(cage_base);
}
return addr;
}
@@ -184,7 +184,7 @@ size_t Page::ShrinkToHighWaterMark() {
this, address() + size() - unused, unused, area_end() - unused);
if (filler.address() != area_end()) {
CHECK(filler.IsFreeSpaceOrFiller(cage_base));
- CHECK_EQ(filler.address() + filler.Size(), area_end());
+ CHECK_EQ(filler.address() + filler.Size(cage_base), area_end());
}
}
return unused;
@@ -270,7 +270,7 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
return start + min_size;
} else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
- DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
+ DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
// Generated code may allocate inline from the linear allocation area for.
// To make sure we can observe these allocations, we use a lower ©limit.
@@ -325,14 +325,7 @@ void LocalAllocationBuffer::MakeIterable() {
LocalAllocationBuffer::LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
: heap_(heap),
- allocation_info_(allocation_info) {
- if (IsValid()) {
- heap_->CreateFillerObjectAtBackground(
- allocation_info_.top(),
- static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
- ClearFreedMemoryMode::kDontClearFreedMemory);
- }
-}
+ allocation_info_(allocation_info) {}
LocalAllocationBuffer::LocalAllocationBuffer(LocalAllocationBuffer&& other)
V8_NOEXCEPT {
@@ -381,16 +374,16 @@ void SpaceWithLinearArea::ResumeAllocationObservers() {
}
void SpaceWithLinearArea::AdvanceAllocationObservers() {
- if (allocation_info_.top() &&
- allocation_info_.start() != allocation_info_.top()) {
- allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
- allocation_info_.start());
+ if (allocation_info_->top() &&
+ allocation_info_->start() != allocation_info_->top()) {
+ allocation_counter_.AdvanceAllocationObservers(allocation_info_->top() -
+ allocation_info_->start());
MarkLabStartInitialized();
}
}
void SpaceWithLinearArea::MarkLabStartInitialized() {
- allocation_info_.ResetStart();
+ allocation_info_->ResetStart();
if (identity() == NEW_SPACE) {
heap()->new_space()->MoveOriginalTopForward();
@@ -420,12 +413,12 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
if (allocation_size >= allocation_counter_.NextBytes()) {
// Only the first object in a LAB should reach the next step.
- DCHECK_EQ(soon_object,
- allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);
+ DCHECK_EQ(soon_object, allocation_info_->start() + aligned_size_in_bytes -
+ size_in_bytes);
// Right now the LAB only contains that one object.
- DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
- allocation_info_.limit());
+ DCHECK_EQ(allocation_info_->top() + allocation_size - aligned_size_in_bytes,
+ allocation_info_->limit());
// Ensure that there is a valid object
if (identity() == CODE_SPACE) {
@@ -439,7 +432,7 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
#if DEBUG
// Ensure that allocation_info_ isn't modified during one of the
// AllocationObserver::Step methods.
- LinearAllocationArea saved_allocation_info = allocation_info_;
+ LinearAllocationArea saved_allocation_info = *allocation_info_;
#endif
// Run AllocationObserver::Step through the AllocationCounter.
@@ -447,13 +440,13 @@ void SpaceWithLinearArea::InvokeAllocationObservers(
allocation_size);
// Ensure that start/top/limit didn't change.
- DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
- DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
- DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
+ DCHECK_EQ(saved_allocation_info.start(), allocation_info_->start());
+ DCHECK_EQ(saved_allocation_info.top(), allocation_info_->top());
+ DCHECK_EQ(saved_allocation_info.limit(), allocation_info_->limit());
}
DCHECK_IMPLIES(allocation_counter_.IsActive(),
- (allocation_info_.limit() - allocation_info_.start()) <
+ (allocation_info_->limit() - allocation_info_->start()) <
allocation_counter_.NextBytes());
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index eb71467f78..3ac1e00208 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -61,8 +61,7 @@ class SemiSpace;
// collection. The large object space is paged. Pages in large object space
// may be larger than the page size.
//
-// A store-buffer based write barrier is used to keep track of intergenerational
-// references. See heap/store-buffer.h.
+// A remembered set is used to keep track of intergenerational references.
//
// During scavenges and mark-sweep collections we sometimes (after a store
// buffer overflow) iterate intergenerational pointers without decoding heap
@@ -434,23 +433,24 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space {
public:
- SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
- : Space(heap, id, free_list) {
- allocation_info_.Reset(kNullAddress, kNullAddress);
- }
+ SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list,
+ LinearAllocationArea* allocation_info)
+ : Space(heap, id, free_list), allocation_info_(allocation_info) {}
virtual bool SupportsAllocationObserver() = 0;
// Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top(); }
- Address limit() { return allocation_info_.limit(); }
+ Address top() const { return allocation_info_->top(); }
+ Address limit() const { return allocation_info_->limit(); }
// The allocation top address.
- Address* allocation_top_address() { return allocation_info_.top_address(); }
+ Address* allocation_top_address() const {
+ return allocation_info_->top_address();
+ }
// The allocation limit address.
- Address* allocation_limit_address() {
- return allocation_info_.limit_address();
+ Address* allocation_limit_address() const {
+ return allocation_info_->limit_address();
}
// Methods needed for allocation observers.
@@ -484,7 +484,7 @@ class SpaceWithLinearArea : public Space {
protected:
// TODO(ofrobots): make these private after refactoring is complete.
- LinearAllocationArea allocation_info_;
+ LinearAllocationArea* const allocation_info_;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 7e18fc2895..1b9a9b4eb7 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -412,8 +412,8 @@ int Sweeper::RawSweep(
CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
- p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (code_object_registry) code_object_registry->Finalize();
+ p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(
diff --git a/deps/v8/src/heap/weak-object-worklists.cc b/deps/v8/src/heap/weak-object-worklists.cc
index 50e268ab91..951657456d 100644
--- a/deps/v8/src/heap/weak-object-worklists.cc
+++ b/deps/v8/src/heap/weak-object-worklists.cc
@@ -6,7 +6,6 @@
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
-#include "src/heap/worklist.h"
#include "src/objects/hash-table.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-function.h"
@@ -19,12 +18,32 @@ namespace v8 {
namespace internal {
+WeakObjects::Local::Local(WeakObjects* weak_objects)
+ : WeakObjects::UnusedBase()
+#define INIT_LOCAL_WORKLIST(_, name, __) , name##_local(&weak_objects->name)
+ WEAK_OBJECT_WORKLISTS(INIT_LOCAL_WORKLIST)
+#undef INIT_LOCAL_WORKLIST
+{
+}
+
+void WeakObjects::Local::Publish() {
+#define INVOKE_PUBLISH(_, name, __) name##_local.Publish();
+ WEAK_OBJECT_WORKLISTS(INVOKE_PUBLISH)
+#undef INVOKE_PUBLISH
+}
+
void WeakObjects::UpdateAfterScavenge() {
#define INVOKE_UPDATE(_, name, Name) Update##Name(name);
WEAK_OBJECT_WORKLISTS(INVOKE_UPDATE)
#undef INVOKE_UPDATE
}
+void WeakObjects::Clear() {
+#define INVOKE_CLEAR(_, name, __) name.Clear();
+ WEAK_OBJECT_WORKLISTS(INVOKE_CLEAR)
+#undef INVOKE_CLEAR
+}
+
// static
void WeakObjects::UpdateTransitionArrays(
WeakObjectWorklist<TransitionArray>& transition_arrays) {
diff --git a/deps/v8/src/heap/weak-object-worklists.h b/deps/v8/src/heap/weak-object-worklists.h
index c61b15a0e9..6da8a661fc 100644
--- a/deps/v8/src/heap/weak-object-worklists.h
+++ b/deps/v8/src/heap/weak-object-worklists.h
@@ -6,7 +6,7 @@
#define V8_HEAP_WEAK_OBJECT_WORKLISTS_H_
#include "src/common/globals.h"
-#include "src/heap/worklist.h"
+#include "src/heap/base/worklist.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-weak-refs.h"
@@ -61,16 +61,32 @@ class TransitionArray;
F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
F(JSFunction, flushed_js_functions, FlushedJSFunctions)
-class WeakObjects {
+class WeakObjects final {
+ private:
+ class UnusedBase {}; // Base class to allow using macro in initializer list.
+
public:
template <typename Type>
- using WeakObjectWorklist = Worklist<Type, 64>;
+ using WeakObjectWorklist = ::heap::base::Worklist<Type, 64>;
+
+ class Local final : public UnusedBase {
+ public:
+ explicit Local(WeakObjects* weak_objects);
+
+ V8_EXPORT_PRIVATE void Publish();
+
+#define DECLARE_WORKLIST(Type, name, _) \
+ WeakObjectWorklist<Type>::Local name##_local;
+ WEAK_OBJECT_WORKLISTS(DECLARE_WORKLIST)
+#undef DECLARE_WORKLIST
+ };
#define DECLARE_WORKLIST(Type, name, _) WeakObjectWorklist<Type> name;
WEAK_OBJECT_WORKLISTS(DECLARE_WORKLIST)
#undef DECLARE_WORKLIST
void UpdateAfterScavenge();
+ void Clear();
private:
#define DECLARE_UPDATE_METHODS(Type, _, Name) \
diff --git a/deps/v8/src/heap/worklist.h b/deps/v8/src/heap/worklist.h
deleted file mode 100644
index 0f5f13cdf7..0000000000
--- a/deps/v8/src/heap/worklist.h
+++ /dev/null
@@ -1,453 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_WORKLIST_H_
-#define V8_HEAP_WORKLIST_H_
-
-#include <cstddef>
-#include <utility>
-
-#include "src/base/atomic-utils.h"
-#include "src/base/logging.h"
-#include "src/base/macros.h"
-#include "src/base/platform/mutex.h"
-#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
-
-namespace v8 {
-namespace internal {
-
-// A concurrent worklist based on segments. Each tasks gets private
-// push and pop segments. Empty pop segments are swapped with their
-// corresponding push segments. Full push segments are published to a global
-// pool of segments and replaced with empty segments.
-//
-// Work stealing is best effort, i.e., there is no way to inform other tasks
-// of the need of items.
-template <typename EntryType, int SEGMENT_SIZE>
-class Worklist {
- public:
- class View {
- public:
- View(Worklist<EntryType, SEGMENT_SIZE>* worklist, int task_id)
- : worklist_(worklist), task_id_(task_id) {}
-
- // Pushes an entry onto the worklist.
- bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
-
- // Pops an entry from the worklist.
- bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
-
- // Returns true if the local portion of the worklist is empty.
- bool IsLocalEmpty() { return worklist_->IsLocalEmpty(task_id_); }
-
- // Returns true if the worklist is empty. Can only be used from the main
- // thread without concurrent access.
- bool IsEmpty() { return worklist_->IsEmpty(); }
-
- bool IsGlobalPoolEmpty() { return worklist_->IsGlobalPoolEmpty(); }
-
- size_t LocalPushSegmentSize() {
- return worklist_->LocalPushSegmentSize(task_id_);
- }
-
- void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
-
- private:
- Worklist<EntryType, SEGMENT_SIZE>* worklist_;
- int task_id_;
- };
-
- static const int kMaxNumTasks = 8;
- static const size_t kSegmentCapacity = SEGMENT_SIZE;
-
- Worklist() : Worklist(kMaxNumTasks) {}
-
- explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
- DCHECK_LE(num_tasks, kMaxNumTasks);
- for (int i = 0; i < num_tasks_; i++) {
- private_push_segment(i) = NewSegment();
- private_pop_segment(i) = NewSegment();
- }
- }
-
- ~Worklist() {
- CHECK(IsEmpty());
- for (int i = 0; i < num_tasks_; i++) {
- DCHECK_NOT_NULL(private_push_segment(i));
- DCHECK_NOT_NULL(private_pop_segment(i));
- delete private_push_segment(i);
- delete private_pop_segment(i);
- }
- }
-
- // Swaps content with the given worklist. Local buffers need to
- // be empty, not thread safe.
- void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
- CHECK(AreLocalsEmpty());
- CHECK(other.AreLocalsEmpty());
-
- global_pool_.Swap(other.global_pool_);
- }
-
- bool Push(int task_id, EntryType entry) {
- DCHECK_LT(task_id, num_tasks_);
- DCHECK_NOT_NULL(private_push_segment(task_id));
- if (!private_push_segment(task_id)->Push(entry)) {
- PublishPushSegmentToGlobal(task_id);
- bool success = private_push_segment(task_id)->Push(entry);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- bool Pop(int task_id, EntryType* entry) {
- DCHECK_LT(task_id, num_tasks_);
- DCHECK_NOT_NULL(private_pop_segment(task_id));
- if (!private_pop_segment(task_id)->Pop(entry)) {
- if (!private_push_segment(task_id)->IsEmpty()) {
- Segment* tmp = private_pop_segment(task_id);
- private_pop_segment(task_id) = private_push_segment(task_id);
- private_push_segment(task_id) = tmp;
- } else if (!StealPopSegmentFromGlobal(task_id)) {
- return false;
- }
- bool success = private_pop_segment(task_id)->Pop(entry);
- USE(success);
- DCHECK(success);
- }
- return true;
- }
-
- size_t LocalPushSegmentSize(int task_id) {
- return private_push_segment(task_id)->Size();
- }
-
- bool IsLocalEmpty(int task_id) {
- return private_pop_segment(task_id)->IsEmpty() &&
- private_push_segment(task_id)->IsEmpty();
- }
-
- bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
-
- bool IsEmpty() {
- if (!AreLocalsEmpty()) return false;
- return global_pool_.IsEmpty();
- }
-
- bool AreLocalsEmpty() {
- for (int i = 0; i < num_tasks_; i++) {
- if (!IsLocalEmpty(i)) return false;
- }
- return true;
- }
-
- size_t LocalSize(int task_id) {
- return private_pop_segment(task_id)->Size() +
- private_push_segment(task_id)->Size();
- }
-
- // Thread-safe but may return an outdated result.
- size_t GlobalPoolSize() const { return global_pool_.Size(); }
-
- // Clears all segments. Frees the global segment pool.
- //
- // Assumes that no other tasks are running.
- void Clear() {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Clear();
- private_push_segment(i)->Clear();
- }
- global_pool_.Clear();
- }
-
- // Calls the specified callback on each element of the deques and replaces
- // the element with the result of the callback.
- // The signature of the callback is
- // bool Callback(EntryType old, EntryType* new).
- // If the callback returns |false| then the element is removed from the
- // worklist. Otherwise the |new| entry is updated.
- //
- // Assumes that no other tasks are running.
- template <typename Callback>
- void Update(Callback callback) {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Update(callback);
- private_push_segment(i)->Update(callback);
- }
- global_pool_.Update(callback);
- }
-
- // Calls the specified callback on each element of the deques.
- // The signature of the callback is:
- // void Callback(EntryType entry).
- //
- // Assumes that no other tasks are running.
- template <typename Callback>
- void Iterate(Callback callback) {
- for (int i = 0; i < num_tasks_; i++) {
- private_pop_segment(i)->Iterate(callback);
- private_push_segment(i)->Iterate(callback);
- }
- global_pool_.Iterate(callback);
- }
-
- template <typename Callback>
- void IterateGlobalPool(Callback callback) {
- global_pool_.Iterate(callback);
- }
-
- void FlushToGlobal(int task_id) {
- PublishPushSegmentToGlobal(task_id);
- PublishPopSegmentToGlobal(task_id);
- }
-
- void MergeGlobalPool(Worklist* other) {
- global_pool_.Merge(&other->global_pool_);
- }
-
- private:
- FRIEND_TEST(WorkListTest, SegmentCreate);
- FRIEND_TEST(WorkListTest, SegmentPush);
- FRIEND_TEST(WorkListTest, SegmentPushPop);
- FRIEND_TEST(WorkListTest, SegmentIsEmpty);
- FRIEND_TEST(WorkListTest, SegmentIsFull);
- FRIEND_TEST(WorkListTest, SegmentClear);
- FRIEND_TEST(WorkListTest, SegmentFullPushFails);
- FRIEND_TEST(WorkListTest, SegmentEmptyPopFails);
- FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
- FRIEND_TEST(WorkListTest, SegmentUpdate);
-
- class Segment {
- public:
- static const size_t kCapacity = kSegmentCapacity;
-
- Segment() : index_(0) {}
-
- bool Push(EntryType entry) {
- if (IsFull()) return false;
- entries_[index_++] = entry;
- return true;
- }
-
- bool Pop(EntryType* entry) {
- if (IsEmpty()) return false;
- *entry = entries_[--index_];
- return true;
- }
-
- size_t Size() const { return index_; }
- bool IsEmpty() const { return index_ == 0; }
- bool IsFull() const { return index_ == kCapacity; }
- void Clear() { index_ = 0; }
-
- template <typename Callback>
- void Update(Callback callback) {
- size_t new_index = 0;
- for (size_t i = 0; i < index_; i++) {
- if (callback(entries_[i], &entries_[new_index])) {
- new_index++;
- }
- }
- index_ = new_index;
- }
-
- template <typename Callback>
- void Iterate(Callback callback) const {
- for (size_t i = 0; i < index_; i++) {
- callback(entries_[i]);
- }
- }
-
- Segment* next() const { return next_; }
- void set_next(Segment* segment) { next_ = segment; }
-
- private:
- Segment* next_;
- size_t index_;
- EntryType entries_[kCapacity];
- };
-
- struct PrivateSegmentHolder {
- Segment* private_push_segment;
- Segment* private_pop_segment;
- char cache_line_padding[64];
- };
-
- class GlobalPool {
- public:
- GlobalPool() : top_(nullptr) {}
-
- // Swaps contents, not thread safe.
- void Swap(GlobalPool& other) {
- Segment* temp = top_;
- set_top(other.top_);
- other.set_top(temp);
- size_t other_size = other.size_.exchange(
- size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
- size_.store(other_size, std::memory_order_relaxed);
- }
-
- V8_INLINE void Push(Segment* segment) {
- base::MutexGuard guard(&lock_);
- segment->set_next(top_);
- set_top(segment);
- size_.fetch_add(1, std::memory_order_relaxed);
- }
-
- V8_INLINE bool Pop(Segment** segment) {
- base::MutexGuard guard(&lock_);
- if (top_ != nullptr) {
- DCHECK_LT(0U, size_);
- size_.fetch_sub(1, std::memory_order_relaxed);
- *segment = top_;
- set_top(top_->next());
- return true;
- }
- return false;
- }
-
- V8_INLINE bool IsEmpty() {
- return base::AsAtomicPointer::Relaxed_Load(&top_) == nullptr;
- }
-
- V8_INLINE size_t Size() const {
- // It is safe to read |size_| without a lock since this variable is
- // atomic, keeping in mind that threads may not immediately see the new
- // value when it is updated.
- return size_.load(std::memory_order_relaxed);
- }
-
- void Clear() {
- base::MutexGuard guard(&lock_);
- size_.store(0, std::memory_order_relaxed);
- Segment* current = top_;
- while (current != nullptr) {
- Segment* tmp = current;
- current = current->next();
- delete tmp;
- }
- set_top(nullptr);
- }
-
- // See Worklist::Update.
- template <typename Callback>
- void Update(Callback callback) {
- base::MutexGuard guard(&lock_);
- Segment* prev = nullptr;
- Segment* current = top_;
- size_t num_deleted = 0;
- while (current != nullptr) {
- current->Update(callback);
- if (current->IsEmpty()) {
- DCHECK_LT(0U, size_);
- ++num_deleted;
- if (prev == nullptr) {
- top_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- Segment* tmp = current;
- current = current->next();
- delete tmp;
- } else {
- prev = current;
- current = current->next();
- }
- }
- size_.fetch_sub(num_deleted, std::memory_order_relaxed);
- }
-
- // See Worklist::Iterate.
- template <typename Callback>
- void Iterate(Callback callback) {
- base::MutexGuard guard(&lock_);
- for (Segment* current = top_; current != nullptr;
- current = current->next()) {
- current->Iterate(callback);
- }
- }
-
- void Merge(GlobalPool* other) {
- Segment* top = nullptr;
- size_t other_size = 0;
- {
- base::MutexGuard guard(&other->lock_);
- if (!other->top_) return;
- top = other->top_;
- other_size = other->size_.load(std::memory_order_relaxed);
- other->size_.store(0, std::memory_order_relaxed);
- other->set_top(nullptr);
- }
-
- // It's safe to iterate through these segments because the top was
- // extracted from |other|.
- Segment* end = top;
- while (end->next()) end = end->next();
-
- {
- base::MutexGuard guard(&lock_);
- size_.fetch_add(other_size, std::memory_order_relaxed);
- end->set_next(top_);
- set_top(top);
- }
- }
-
- private:
- void set_top(Segment* segment) {
- base::AsAtomicPointer::Relaxed_Store(&top_, segment);
- }
-
- base::Mutex lock_;
- Segment* top_;
- std::atomic<size_t> size_{0};
- };
-
- V8_INLINE Segment*& private_push_segment(int task_id) {
- return private_segments_[task_id].private_push_segment;
- }
-
- V8_INLINE Segment*& private_pop_segment(int task_id) {
- return private_segments_[task_id].private_pop_segment;
- }
-
- V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
- if (!private_push_segment(task_id)->IsEmpty()) {
- global_pool_.Push(private_push_segment(task_id));
- private_push_segment(task_id) = NewSegment();
- }
- }
-
- V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
- if (!private_pop_segment(task_id)->IsEmpty()) {
- global_pool_.Push(private_pop_segment(task_id));
- private_pop_segment(task_id) = NewSegment();
- }
- }
-
- V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
- if (global_pool_.IsEmpty()) return false;
- Segment* new_segment = nullptr;
- if (global_pool_.Pop(&new_segment)) {
- delete private_pop_segment(task_id);
- private_pop_segment(task_id) = new_segment;
- return true;
- }
- return false;
- }
-
- V8_INLINE Segment* NewSegment() {
- // Bottleneck for filtering in crash dumps.
- return new Segment();
- }
-
- PrivateSegmentHolder private_segments_[kMaxNumTasks];
- GlobalPool global_pool_;
- int num_tasks_;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_HEAP_WORKLIST_H_