summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/OWNERS3
-rw-r--r--deps/v8/src/heap/allocation-observer.h131
-rw-r--r--deps/v8/src/heap/allocation-result.h74
-rw-r--r--deps/v8/src/heap/base/active-system-pages.cc71
-rw-r--r--deps/v8/src/heap/base/active-system-pages.h51
-rw-r--r--deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc3
-rw-r--r--deps/v8/src/heap/base/stack.cc25
-rw-r--r--deps/v8/src/heap/base/stack.h11
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h16
-rw-r--r--deps/v8/src/heap/code-object-registry.h1
-rw-r--r--deps/v8/src/heap/code-range.cc20
-rw-r--r--deps/v8/src/heap/concurrent-allocator-inl.h10
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc16
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc26
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc300
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h25
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h47
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-marking-state.h67
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc3
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc2
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h26
-rw-r--r--deps/v8/src/heap/cppgc/default-platform.cc2
-rw-r--r--deps/v8/src/heap/cppgc/explicit-management.cc30
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h9
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.cc12
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.h2
-rw-r--r--deps/v8/src/heap/cppgc/globals.h21
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc5
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h12
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h19
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.cc2
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h1
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc7
-rw-r--r--deps/v8/src/heap/cppgc/heap.h3
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc77
-rw-r--r--deps/v8/src/heap/cppgc/marker.h40
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h196
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc2
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h6
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.h3
-rw-r--r--deps/v8/src/heap/cppgc/platform.cc8
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc17
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.h13
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.cc135
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.h68
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc24
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h1
-rw-r--r--deps/v8/src/heap/cppgc/testing.cc8
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc18
-rw-r--r--deps/v8/src/heap/embedder-tracing-inl.h46
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc87
-rw-r--r--deps/v8/src/heap/embedder-tracing.h52
-rw-r--r--deps/v8/src/heap/evacuation-allocator-inl.h (renamed from deps/v8/src/heap/local-allocator-inl.h)36
-rw-r--r--deps/v8/src/heap/evacuation-allocator.h (renamed from deps/v8/src/heap/local-allocator.h)12
-rw-r--r--deps/v8/src/heap/factory-base.cc34
-rw-r--r--deps/v8/src/heap/factory-base.h3
-rw-r--r--deps/v8/src/heap/factory-inl.h4
-rw-r--r--deps/v8/src/heap/factory.cc294
-rw-r--r--deps/v8/src/heap/factory.h72
-rw-r--r--deps/v8/src/heap/gc-tracer.cc542
-rw-r--r--deps/v8/src/heap/gc-tracer.h136
-rw-r--r--deps/v8/src/heap/heap-allocator-inl.h250
-rw-r--r--deps/v8/src/heap/heap-allocator.cc163
-rw-r--r--deps/v8/src/heap/heap-allocator.h119
-rw-r--r--deps/v8/src/heap/heap-inl.h234
-rw-r--r--deps/v8/src/heap/heap-layout-tracer.cc3
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h17
-rw-r--r--deps/v8/src/heap/heap-write-barrier.cc28
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h6
-rw-r--r--deps/v8/src/heap/heap.cc1069
-rw-r--r--deps/v8/src/heap/heap.h245
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc5
-rw-r--r--deps/v8/src/heap/incremental-marking.cc171
-rw-r--r--deps/v8/src/heap/incremental-marking.h4
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h13
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc11
-rw-r--r--deps/v8/src/heap/invalidated-slots.h8
-rw-r--r--deps/v8/src/heap/large-spaces.cc73
-rw-r--r--deps/v8/src/heap/large-spaces.h4
-rw-r--r--deps/v8/src/heap/local-factory.cc5
-rw-r--r--deps/v8/src/heap/local-factory.h17
-rw-r--r--deps/v8/src/heap/local-heap-inl.h3
-rw-r--r--deps/v8/src/heap/local-heap.cc2
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h11
-rw-r--r--deps/v8/src/heap/mark-compact.cc722
-rw-r--r--deps/v8/src/heap/mark-compact.h37
-rw-r--r--deps/v8/src/heap/marking-barrier.cc19
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h72
-rw-r--r--deps/v8/src/heap/marking-visitor.h67
-rw-r--r--deps/v8/src/heap/marking-worklist-inl.h38
-rw-r--r--deps/v8/src/heap/marking-worklist.cc30
-rw-r--r--deps/v8/src/heap/marking-worklist.h44
-rw-r--r--deps/v8/src/heap/memory-allocator.cc140
-rw-r--r--deps/v8/src/heap/memory-allocator.h176
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h7
-rw-r--r--deps/v8/src/heap/memory-chunk.cc28
-rw-r--r--deps/v8/src/heap/memory-chunk.h5
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h15
-rw-r--r--deps/v8/src/heap/new-spaces.cc119
-rw-r--r--deps/v8/src/heap/new-spaces.h34
-rw-r--r--deps/v8/src/heap/object-stats.cc86
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h13
-rw-r--r--deps/v8/src/heap/objects-visiting.h2
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h23
-rw-r--r--deps/v8/src/heap/paged-spaces.cc100
-rw-r--r--deps/v8/src/heap/paged-spaces.h41
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc11
-rw-r--r--deps/v8/src/heap/reference-summarizer.cc116
-rw-r--r--deps/v8/src/heap/reference-summarizer.h55
-rw-r--r--deps/v8/src/heap/remembered-set-inl.h16
-rw-r--r--deps/v8/src/heap/remembered-set.h13
-rw-r--r--deps/v8/src/heap/safepoint.cc34
-rw-r--r--deps/v8/src/heap/safepoint.h12
-rw-r--r--deps/v8/src/heap/scavenger-inl.h24
-rw-r--r--deps/v8/src/heap/scavenger.cc70
-rw-r--r--deps/v8/src/heap/scavenger.h12
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc40
-rw-r--r--deps/v8/src/heap/slot-set.cc2
-rw-r--r--deps/v8/src/heap/slot-set.h53
-rw-r--r--deps/v8/src/heap/spaces-inl.h33
-rw-r--r--deps/v8/src/heap/spaces.cc43
-rw-r--r--deps/v8/src/heap/spaces.h46
-rw-r--r--deps/v8/src/heap/sweeper.cc70
-rw-r--r--deps/v8/src/heap/sweeper.h13
-rw-r--r--deps/v8/src/heap/third-party/heap-api-stub.cc12
125 files changed, 5338 insertions, 2603 deletions
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index e0f0a37128..857988c90a 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -4,5 +4,4 @@ hpayer@chromium.org
mlippautz@chromium.org
omerkatz@chromium.org
-per-file *factory*=leszeks@chromium.org
-per-file read-only-*=delphick@chromium.org
+per-file *factory*=file:../objects/OWNERS
diff --git a/deps/v8/src/heap/allocation-observer.h b/deps/v8/src/heap/allocation-observer.h
index 6a3826bf16..26559ed16a 100644
--- a/deps/v8/src/heap/allocation-observer.h
+++ b/deps/v8/src/heap/allocation-observer.h
@@ -14,48 +14,84 @@
namespace v8 {
namespace internal {
-class AllocationObserver;
-
-class AllocationCounter {
+// Observer for allocations that is aware of LAB-based allocation.
+class AllocationObserver {
public:
- AllocationCounter()
- : paused_(false),
- current_counter_(0),
- next_counter_(0),
- step_in_progress_(false) {}
- V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer);
- V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer);
+ explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {
+ DCHECK_LE(kTaggedSize, step_size);
+ }
+ virtual ~AllocationObserver() = default;
+ AllocationObserver(const AllocationObserver&) = delete;
+ AllocationObserver& operator=(const AllocationObserver&) = delete;
- bool IsActive() { return !IsPaused() && observers_.size() > 0; }
+ protected:
+ // Called when at least `step_size_` bytes have been allocated. `soon_object`
+ // points to the uninitialized memory that has just been allocated and is the
+ // result for a request of `size` bytes.
+ //
+ // Some caveats:
+ // 1. `soon_object` will be nullptr in cases where the allocation returns a
+ // filler object, which is e.g. needed at page boundaries.
+ // 2. `soon_object` may actually be the first object in an
+ // allocation-folding group. In such a case size is the size of the group
+ // rather than the first object.
+ // 3. `size` is the requested size at the time of allocation. Right-trimming
+ // may change the object size dynamically.
+ virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
- void Pause() {
- DCHECK(!paused_);
- DCHECK(!step_in_progress_);
- paused_ = true;
- }
+ // Subclasses can override this method to make step size dynamic.
+ virtual intptr_t GetNextStepSize() { return step_size_; }
- void Resume() {
- DCHECK(paused_);
- DCHECK(!step_in_progress_);
- paused_ = false;
- }
+ private:
+ const intptr_t step_size_;
+
+ friend class AllocationCounter;
+};
+
+// A global allocation counter observers can be added to.
+class AllocationCounter final {
+ public:
+ AllocationCounter() = default;
+
+ // Adds an observer. May be called from `AllocationObserver::Step()`.
+ V8_EXPORT_PRIVATE void AddAllocationObserver(AllocationObserver* observer);
+
+ // Removes an observer. May be called from `AllocationObserver::Step()`.
+ V8_EXPORT_PRIVATE void RemoveAllocationObserver(AllocationObserver* observer);
+ // Advances forward by `allocated` bytes. Does not invoke any observers.
V8_EXPORT_PRIVATE void AdvanceAllocationObservers(size_t allocated);
+
+ // Invokes observers via `AllocationObserver::Step()` and computes new step
+ // sizes. Does not advance the current allocation counter.
V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
size_t object_size,
size_t aligned_object_size);
- size_t NextBytes() {
+ bool IsActive() const { return !IsPaused() && observers_.size() > 0; }
+
+ bool IsStepInProgress() const { return step_in_progress_; }
+
+ size_t NextBytes() const {
DCHECK(IsActive());
return next_counter_ - current_counter_;
}
- bool IsStepInProgress() { return step_in_progress_; }
+ void Pause() {
+ DCHECK(!step_in_progress_);
+ paused_++;
+ }
+
+ void Resume() {
+ DCHECK_NE(0, paused_);
+ DCHECK(!step_in_progress_);
+ paused_--;
+ }
private:
- bool IsPaused() { return paused_; }
+ bool IsPaused() const { return paused_; }
- struct AllocationObserverCounter {
+ struct AllocationObserverCounter final {
AllocationObserverCounter(AllocationObserver* observer, size_t prev_counter,
size_t next_counter)
: observer_(observer),
@@ -71,47 +107,10 @@ class AllocationCounter {
std::vector<AllocationObserverCounter> pending_added_;
std::unordered_set<AllocationObserver*> pending_removed_;
- bool paused_;
-
- size_t current_counter_;
- size_t next_counter_;
-
- bool step_in_progress_;
-};
-
-// -----------------------------------------------------------------------------
-// Allows observation of allocations.
-class AllocationObserver {
- public:
- explicit AllocationObserver(intptr_t step_size) : step_size_(step_size) {
- DCHECK_LE(kTaggedSize, step_size);
- }
- virtual ~AllocationObserver() = default;
- AllocationObserver(const AllocationObserver&) = delete;
- AllocationObserver& operator=(const AllocationObserver&) = delete;
-
- protected:
- // Pure virtual method provided by the subclasses that gets called when at
- // least step_size bytes have been allocated. soon_object is the address just
- // allocated (but not yet initialized.) size is the size of the object as
- // requested (i.e. w/o the alignment fillers). Some complexities to be aware
- // of:
- // 1) soon_object will be nullptr in cases where we end up observing an
- // allocation that happens to be a filler space (e.g. page boundaries.)
- // 2) size is the requested size at the time of allocation. Right-trimming
- // may change the object size dynamically.
- // 3) soon_object may actually be the first object in an allocation-folding
- // group. In such a case size is the size of the group rather than the
- // first object.
- virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
-
- // Subclasses can override this method to make step size dynamic.
- virtual intptr_t GetNextStepSize() { return step_size_; }
-
- private:
- intptr_t step_size_;
-
- friend class AllocationCounter;
+ size_t current_counter_ = 0;
+ size_t next_counter_ = 0;
+ bool step_in_progress_ = false;
+ int paused_ = 0;
};
class V8_EXPORT_PRIVATE V8_NODISCARD PauseAllocationObserversScope {
diff --git a/deps/v8/src/heap/allocation-result.h b/deps/v8/src/heap/allocation-result.h
new file mode 100644
index 0000000000..04a618995b
--- /dev/null
+++ b/deps/v8/src/heap/allocation-result.h
@@ -0,0 +1,74 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ALLOCATION_RESULT_H_
+#define V8_HEAP_ALLOCATION_RESULT_H_
+
+#include "src/common/globals.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+enum class AllocationOrigin {
+ kGeneratedCode = 0,
+ kRuntime = 1,
+ kGC = 2,
+ kFirstAllocationOrigin = kGeneratedCode,
+ kLastAllocationOrigin = kGC,
+ kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
+};
+
+// The result of an allocation attempt. Either represents a successful
+// allocation that can be turned into an object or a failed attempt.
+class AllocationResult final {
+ public:
+ static AllocationResult Failure() { return AllocationResult(); }
+
+ static AllocationResult FromObject(HeapObject heap_object) {
+ return AllocationResult(heap_object);
+ }
+
+ // Empty constructor creates a failed result. The callsite determines which
+ // GC to invoke based on the requested allocation.
+ AllocationResult() = default;
+
+ bool IsFailure() const { return object_.is_null(); }
+
+ template <typename T>
+ bool To(T* obj) const {
+ if (IsFailure()) return false;
+ *obj = T::cast(object_);
+ return true;
+ }
+
+ HeapObject ToObjectChecked() const {
+ CHECK(!IsFailure());
+ return HeapObject::cast(object_);
+ }
+
+ HeapObject ToObject() const {
+ DCHECK(!IsFailure());
+ return HeapObject::cast(object_);
+ }
+
+ Address ToAddress() const {
+ DCHECK(!IsFailure());
+ return HeapObject::cast(object_).address();
+ }
+
+ private:
+ explicit AllocationResult(HeapObject heap_object) : object_(heap_object) {}
+
+ HeapObject object_;
+};
+
+STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_ALLOCATION_RESULT_H_
diff --git a/deps/v8/src/heap/base/active-system-pages.cc b/deps/v8/src/heap/base/active-system-pages.cc
new file mode 100644
index 0000000000..8ad225461e
--- /dev/null
+++ b/deps/v8/src/heap/base/active-system-pages.cc
@@ -0,0 +1,71 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/base/active-system-pages.h"
+
+#include <climits>
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+
+namespace heap {
+namespace base {
+
+size_t ActiveSystemPages::Init(size_t header_size, size_t page_size_bits,
+ size_t user_page_size) {
+#if DEBUG
+ size_t page_size = 1 << page_size_bits;
+ DCHECK_LE(RoundUp(user_page_size, page_size) >> page_size_bits,
+ ActiveSystemPages::kMaxPages);
+#endif // DEBUG
+ Clear();
+ return Add(0, header_size, page_size_bits);
+}
+
+size_t ActiveSystemPages::Add(uintptr_t start, uintptr_t end,
+ size_t page_size_bits) {
+ const size_t page_size = 1 << page_size_bits;
+
+ DCHECK_LE(start, end);
+ DCHECK_LE(end, kMaxPages * page_size);
+
+ // Make sure we actually get the bitcount as argument.
+ DCHECK_LT(page_size_bits, sizeof(uintptr_t) * CHAR_BIT);
+
+ const uintptr_t start_page_bit =
+ RoundDown(start, page_size) >> page_size_bits;
+ const uintptr_t end_page_bit = RoundUp(end, page_size) >> page_size_bits;
+ DCHECK_LE(start_page_bit, end_page_bit);
+
+ const uintptr_t bits = end_page_bit - start_page_bit;
+ DCHECK_LE(bits, kMaxPages);
+ const bitset_t mask = bits == kMaxPages
+ ? int64_t{-1}
+ : ((uint64_t{1} << bits) - 1) << start_page_bit;
+ const bitset_t added_pages = ~value_ & mask;
+ value_ |= mask;
+ return added_pages.count();
+}
+
+size_t ActiveSystemPages::Reduce(ActiveSystemPages updated_value) {
+ DCHECK_EQ(~value_ & updated_value.value_, 0);
+ const bitset_t removed_pages(value_ & ~updated_value.value_);
+ value_ = updated_value.value_;
+ return removed_pages.count();
+}
+
+size_t ActiveSystemPages::Clear() {
+ const size_t removed_pages = value_.count();
+ value_ = 0;
+ return removed_pages;
+}
+
+size_t ActiveSystemPages::Size(size_t page_size_bits) const {
+ // Make sure we don't get the full page size as argument.
+ DCHECK_LT(page_size_bits, sizeof(uintptr_t) * CHAR_BIT);
+ return value_.count() * (size_t{1} << page_size_bits);
+}
+
+} // namespace base
+} // namespace heap
diff --git a/deps/v8/src/heap/base/active-system-pages.h b/deps/v8/src/heap/base/active-system-pages.h
new file mode 100644
index 0000000000..0c30cb928f
--- /dev/null
+++ b/deps/v8/src/heap/base/active-system-pages.h
@@ -0,0 +1,51 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
+#define V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
+
+#include <bitset>
+#include <cstdint>
+
+#include "src/base/macros.h"
+
+namespace heap {
+namespace base {
+
+// Class implements a bitset of system pages on a heap page.
+class ActiveSystemPages final {
+ public:
+ // Defines the maximum number of system pages that can be tracked in one
+ // instance.
+ static constexpr size_t kMaxPages = 64;
+
+ // Initializes the set of active pages to the system pages for the header.
+ V8_EXPORT_PRIVATE size_t Init(size_t header_size, size_t page_size_bits,
+ size_t user_page_size);
+
+ // Adds the pages for this memory range. Returns the number of freshly added
+ // pages.
+ V8_EXPORT_PRIVATE size_t Add(size_t start, size_t end, size_t page_size_bits);
+
+ // Replaces the current bitset with the given argument. The new bitset needs
+ // to be a proper subset of the current pages, which means this operation
+ // can't add pages. Returns the number of removed pages.
+ V8_EXPORT_PRIVATE size_t Reduce(ActiveSystemPages updated_value);
+
+ // Removes all pages. Returns the number of removed pages.
+ V8_EXPORT_PRIVATE size_t Clear();
+
+ // Returns the memory used with the given page size.
+ V8_EXPORT_PRIVATE size_t Size(size_t page_size_bits) const;
+
+ private:
+ using bitset_t = std::bitset<kMaxPages>;
+
+ bitset_t value_;
+};
+
+} // namespace base
+} // namespace heap
+
+#endif // V8_HEAP_BASE_ACTIVE_SYSTEM_PAGES_H_
diff --git a/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
index 6befa3bcc0..47779e0736 100644
--- a/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
+++ b/deps/v8/src/heap/base/asm/mips64/push_registers_asm.cc
@@ -10,7 +10,8 @@
//
// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
// GN toolchain (e.g. ChromeOS) and not provide them.
-asm(".set noreorder \n"
+asm(".text \n"
+ ".set noreorder \n"
".global PushAllRegistersAndIterateStack \n"
".type PushAllRegistersAndIterateStack, %function \n"
".hidden PushAllRegistersAndIterateStack \n"
diff --git a/deps/v8/src/heap/base/stack.cc b/deps/v8/src/heap/base/stack.cc
index 299d2dd1c1..7ae7e1380a 100644
--- a/deps/v8/src/heap/base/stack.cc
+++ b/deps/v8/src/heap/base/stack.cc
@@ -10,7 +10,6 @@
#include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/msan.h"
#include "src/base/sanitizer/tsan.h"
-#include "src/heap/cppgc/globals.h"
namespace heap {
namespace base {
@@ -21,7 +20,12 @@ extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
Stack::Stack(const void* stack_start) : stack_start_(stack_start) {}
+void Stack::SetStackStart(const void* stack_start) {
+ stack_start_ = stack_start;
+}
+
bool Stack::IsOnStack(void* slot) const {
+ DCHECK_NOT_NULL(stack_start_);
#ifdef V8_USE_ADDRESS_SANITIZER
// If the slot is part of a fake frame, then it is definitely on the stack.
if (__asan_addr_is_in_fake_stack(__asan_get_current_fake_stack(),
@@ -35,7 +39,7 @@ bool Stack::IsOnStack(void* slot) const {
#if defined(__has_feature)
#if __has_feature(safe_stack)
if (__builtin___get_unsafe_stack_top() >= slot &&
- slot > __builtin___get_unsafe_stack_ptr()) {
+ slot >= __builtin___get_unsafe_stack_ptr()) {
return true;
}
#endif // __has_feature(safe_stack)
@@ -86,7 +90,7 @@ void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
#endif // V8_USE_ADDRESS_SANITIZER
-void IterateSafeStackIfNecessary(StackVisitor* visitor) {
+void IterateUnsafeStackIfNecessary(StackVisitor* visitor) {
#if defined(__has_feature)
#if __has_feature(safe_stack)
// Source:
@@ -146,11 +150,12 @@ void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
} // namespace
void Stack::IteratePointers(StackVisitor* visitor) const {
+ DCHECK_NOT_NULL(stack_start_);
PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration.
// TODO(chromium:1056170): Add support for SIMD and/or filtering.
- IterateSafeStackIfNecessary(visitor);
+ IterateUnsafeStackIfNecessary(visitor);
}
void Stack::IteratePointersUnsafe(StackVisitor* visitor,
@@ -158,5 +163,17 @@ void Stack::IteratePointersUnsafe(StackVisitor* visitor,
IteratePointersImpl(this, visitor, reinterpret_cast<intptr_t*>(stack_end));
}
+const void* Stack::GetCurrentStackPointerForLocalVariables() {
+#if defined(__has_feature)
+#if __has_feature(safe_stack)
+ return __builtin___get_unsafe_stack_ptr();
+#else // __has_feature(safe_stack)
+ return v8::base::Stack::GetCurrentStackPosition();
+#endif // __has_feature(safe_stack)
+#else // defined(__has_feature)
+ return v8::base::Stack::GetCurrentStackPosition();
+#endif // defined(__has_feature)
+}
+
} // namespace base
} // namespace heap
diff --git a/deps/v8/src/heap/base/stack.h b/deps/v8/src/heap/base/stack.h
index d7267deee7..59411d786e 100644
--- a/deps/v8/src/heap/base/stack.h
+++ b/deps/v8/src/heap/base/stack.h
@@ -21,7 +21,10 @@ class StackVisitor {
// - SafeStack: https://releases.llvm.org/10.0.0/tools/clang/docs/SafeStack.html
class V8_EXPORT_PRIVATE Stack final {
public:
- explicit Stack(const void* stack_start);
+ explicit Stack(const void* stack_start = nullptr);
+
+ // Sets the start of the stack.
+ void SetStackStart(const void* stack_start);
// Returns true if |slot| is part of the stack and false otherwise.
bool IsOnStack(void* slot) const;
@@ -43,6 +46,12 @@ class V8_EXPORT_PRIVATE Stack final {
// Returns the start of the stack.
const void* stack_start() const { return stack_start_; }
+ // Get the current stack pointer for the stack, on which local variables are
+ // stored. In case the safe-stack is enabled (-fsanitize=safe-stack), this
+ // will return the stack pointer for the unsafe-stack. Otherwise, the function
+ // returns the stack pointer for the native stack.
+ static const void* GetCurrentStackPointerForLocalVariables();
+
private:
const void* stack_start_;
};
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index de91e6ea9f..98a7109f97 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -79,31 +79,27 @@ class BasicMemoryChunk {
// triggering on the same page.
COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
- // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
- // to iterate the page.
- SWEEP_TO_ITERATE = 1u << 17,
-
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled.
- INCREMENTAL_MARKING = 1u << 18,
- NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
+ INCREMENTAL_MARKING = 1u << 17,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 18,
// The memory chunk freeing bookkeeping has been performed but the chunk has
// not yet been freed.
- UNREGISTERED = 1u << 20,
+ UNREGISTERED = 1u << 19,
// The memory chunk belongs to the read-only heap and does not participate
// in garbage collection. This is used instead of owner for identity
// checking since read-only chunks have no owner once they are detached.
- READ_ONLY_HEAP = 1u << 21,
+ READ_ONLY_HEAP = 1u << 20,
// The memory chunk is pinned in memory and can't be moved. This is likely
// because there exists a potential pointer to somewhere in the chunk which
// can't be updated.
- PINNED = 1u << 22,
+ PINNED = 1u << 21,
// This page belongs to a shared heap.
- IN_SHARED_HEAP = 1u << 23,
+ IN_SHARED_HEAP = 1u << 22,
};
using MainThreadFlags = base::Flags<Flag, uintptr_t>;
diff --git a/deps/v8/src/heap/code-object-registry.h b/deps/v8/src/heap/code-object-registry.h
index f0ae334d99..b0a2dbd4cf 100644
--- a/deps/v8/src/heap/code-object-registry.h
+++ b/deps/v8/src/heap/code-object-registry.h
@@ -5,7 +5,6 @@
#ifndef V8_HEAP_CODE_OBJECT_REGISTRY_H_
#define V8_HEAP_CODE_OBJECT_REGISTRY_H_
-#include <set>
#include <vector>
#include "src/base/macros.h"
diff --git a/deps/v8/src/heap/code-range.cc b/deps/v8/src/heap/code-range.cc
index 5c5911d676..08b3c15148 100644
--- a/deps/v8/src/heap/code-range.cc
+++ b/deps/v8/src/heap/code-range.cc
@@ -53,6 +53,11 @@ Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size,
CHECK(IsAligned(result, alignment));
return result;
}
+ // The empty memory_ranges means that GetFreeMemoryRangesWithin() API
+ // is not supported, so use the lowest address from the preferred region
+ // as a hint because it'll be at least as good as the fallback hint but
+ // with a higher chances to point to the free address space range.
+ return RoundUp(preferred_region.begin(), alignment);
}
return RoundUp(FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint),
alignment);
@@ -124,16 +129,8 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
: VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
params.page_size = MemoryChunk::kPageSize;
- // V8_EXTERNAL_CODE_SPACE imposes additional alignment requirement for the
- // base address, so make sure the hint calculation function takes that into
- // account. Otherwise the allocated reservation might be outside of the
- // preferred region (see Isolate::GetShortBuiltinsCallRegion()).
- const size_t hint_alignment =
- V8_EXTERNAL_CODE_SPACE_BOOL
- ? RoundUp(params.base_alignment, allocate_page_size)
- : allocate_page_size;
params.requested_start_hint =
- GetCodeRangeAddressHint()->GetAddressHint(requested, hint_alignment);
+ GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
if (!VirtualMemoryCage::InitReservation(params)) return false;
@@ -175,7 +172,10 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
size_t embedded_blob_code_size) {
base::MutexGuard guard(&remap_embedded_builtins_mutex_);
- const base::AddressRegion& code_region = reservation()->region();
+ // Remap embedded builtins into the end of the address range controlled by
+ // the BoundedPageAllocator.
+ const base::AddressRegion code_region(page_allocator()->begin(),
+ page_allocator()->size());
CHECK_NE(code_region.begin(), kNullAddress);
CHECK(!code_region.is_empty());
diff --git a/deps/v8/src/heap/concurrent-allocator-inl.h b/deps/v8/src/heap/concurrent-allocator-inl.h
index b6ef858e12..a76d5db050 100644
--- a/deps/v8/src/heap/concurrent-allocator-inl.h
+++ b/deps/v8/src/heap/concurrent-allocator-inl.h
@@ -24,7 +24,7 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
- local_heap_->VerifyCurrent();
+ if (local_heap_) local_heap_->VerifyCurrent();
#endif
if (object_size > kMaxLabObjectSize) {
@@ -37,11 +37,9 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationResult ConcurrentAllocator::AllocateInLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
- if (allocation.IsRetry()) {
- return AllocateInLabSlow(object_size, alignment, origin);
- } else {
- return allocation;
- }
+ return allocation.IsFailure()
+ ? AllocateInLabSlow(object_size, alignment, origin)
+ : allocation;
}
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index bfdfaea7fe..b4dfddbb4e 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -37,7 +37,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
AllocationResult result = local_heap.AllocateRaw(
kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
AllocationAlignment::kTaggedAligned);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kSmallObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
@@ -48,7 +48,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
AllocationAlignment::kTaggedAligned);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kMediumObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
@@ -59,7 +59,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
AllocationOrigin::kRuntime,
AllocationAlignment::kTaggedAligned);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
heap->CreateFillerObjectAtBackground(
result.ToAddress(), kLargeObjectSize,
ClearFreedMemoryMode::kDontClearFreedMemory);
@@ -122,11 +122,11 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
if (!EnsureLab(origin)) {
- return AllocationResult::Retry(space_->identity());
+ return AllocationResult::Failure();
}
AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
- DCHECK(!allocation.IsRetry());
+ DCHECK(!allocation.IsFailure());
return allocation;
}
@@ -145,7 +145,7 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(
- local_heap_->heap(), AllocationResult(object), result->second);
+ space_->heap(), AllocationResult::FromObject(object), result->second);
DCHECK(lab_.IsValid());
if (!lab_.TryMerge(&saved_lab)) {
saved_lab.CloseAndMakeIterable();
@@ -157,7 +157,7 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
auto result = space_->RawRefillLabBackground(local_heap_, object_size,
object_size, alignment, origin);
- if (!result) return AllocationResult::Retry(space_->identity());
+ if (!result) return AllocationResult::Failure();
HeapObject object = HeapObject::FromAddress(result->first);
@@ -166,7 +166,7 @@ AllocationResult ConcurrentAllocator::AllocateOutsideLab(
object_size);
}
- return AllocationResult(object);
+ return AllocationResult::FromObject(object);
}
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index f806c4eca6..1863eb5a22 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -116,6 +116,10 @@ class ConcurrentMarkingVisitor final
return VisitJSObjectSubclassFast(map, object);
}
+ int VisitJSExternalObject(Map map, JSExternalObject object) {
+ return VisitJSObjectSubclass(map, object);
+ }
+
#if V8_ENABLE_WEBASSEMBLY
int VisitWasmInstanceObject(Map map, WasmInstanceObject object) {
return VisitJSObjectSubclass(map, object);
@@ -318,15 +322,17 @@ class ConcurrentMarkingVisitor final
}
void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target) {
+ if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target))
+ return;
+
MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
- if (info.should_record) {
- MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
- if (!data.typed_slots) {
- data.typed_slots.reset(new TypedSlots());
- }
- data.typed_slots->Insert(info.slot_type, info.offset);
+ MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
+
+ MemoryChunkData& data = (*memory_chunk_data_)[info.memory_chunk];
+ if (!data.typed_slots) {
+ data.typed_slots.reset(new TypedSlots());
}
+ data.typed_slots->Insert(info.slot_type, info.offset);
}
void SynchronizePageAccess(HeapObject heap_object) {
@@ -451,7 +457,11 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
int kObjectsUntilInterrupCheck = 1000;
uint8_t task_id = delegate->GetTaskId() + 1;
TaskState* task_state = &task_state_[task_id];
- MarkingWorklists::Local local_marking_worklists(marking_worklists_);
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ MarkingWorklists::Local local_marking_worklists(
+ marking_worklists_, cpp_heap
+ ? cpp_heap->CreateCppMarkingState()
+ : MarkingWorklists::Local::kNoCppMarkingState);
WeakObjects::Local local_weak_objects(weak_objects_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, &local_weak_objects, heap_,
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 15737881ef..7c6d7fdda6 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -22,6 +22,7 @@
#include "src/handles/global-handles.h"
#include "src/handles/handles.h"
#include "src/heap/base/stack.h"
+#include "src/heap/cppgc-js/cpp-marking-state.h"
#include "src/heap/cppgc-js/cpp-snapshot.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc-js/unified-heap-marking-verifier.h"
@@ -40,6 +41,7 @@
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
#include "src/heap/cppgc/unmarker.h"
+#include "src/heap/embedder-tracing-inl.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/marking-worklist.h"
@@ -49,6 +51,62 @@
namespace v8 {
+namespace {
+
+class V8ToCppGCReferencesVisitor final
+ : public v8::EmbedderHeapTracer::TracedGlobalHandleVisitor {
+ public:
+ V8ToCppGCReferencesVisitor(
+ cppgc::internal::MutatorMarkingState& marking_state,
+ v8::internal::Isolate* isolate,
+ const v8::WrapperDescriptor& wrapper_descriptor)
+ : marking_state_(marking_state),
+ isolate_(isolate),
+ wrapper_descriptor_(wrapper_descriptor) {}
+
+ void VisitTracedGlobalHandle(const v8::TracedGlobal<v8::Value>&) final {
+ UNREACHABLE();
+ }
+
+ void VisitTracedReference(const v8::TracedReference<v8::Value>& value) final {
+ VisitHandle(value, value.WrapperClassId());
+ }
+
+ private:
+ void VisitHandle(const v8::TracedReference<v8::Value>& value,
+ uint16_t class_id) {
+ DCHECK(!value.IsEmpty());
+
+ const internal::JSObject js_object =
+ *reinterpret_cast<const internal::JSObject* const&>(value);
+ if (!js_object.ptr() || !js_object.MayHaveEmbedderFields()) return;
+
+ internal::LocalEmbedderHeapTracer::WrapperInfo info;
+ if (!internal::LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ isolate_, js_object, wrapper_descriptor_, &info))
+ return;
+
+ marking_state_.MarkAndPush(
+ cppgc::internal::HeapObjectHeader::FromObject(info.second));
+ }
+
+ cppgc::internal::MutatorMarkingState& marking_state_;
+ v8::internal::Isolate* isolate_;
+ const v8::WrapperDescriptor& wrapper_descriptor_;
+};
+
+void TraceV8ToCppGCReferences(
+ v8::internal::Isolate* isolate,
+ cppgc::internal::MutatorMarkingState& marking_state,
+ const v8::WrapperDescriptor& wrapper_descriptor) {
+ DCHECK(isolate);
+ V8ToCppGCReferencesVisitor forwarding_visitor(marking_state, isolate,
+ wrapper_descriptor);
+ isolate->global_handles()->IterateTracedNodes(&forwarding_visitor);
+}
+
+} // namespace
+
// static
constexpr uint16_t WrapperDescriptor::kUnknownEmbedderId;
@@ -88,19 +146,16 @@ void CppHeap::EnableDetachedGarbageCollectionsForTesting() {
}
void CppHeap::CollectGarbageForTesting(cppgc::EmbedderStackState stack_state) {
- return internal::CppHeap::From(this)->CollectGarbageForTesting(stack_state);
-}
-
-void JSHeapConsistency::DijkstraMarkingBarrierSlow(
- cppgc::HeapHandle& heap_handle, const TracedReferenceBase& ref) {
- auto& heap_base = cppgc::internal::HeapBase::From(heap_handle);
- static_cast<JSVisitor*>(&heap_base.marker()->Visitor())->Trace(ref);
+ return internal::CppHeap::From(this)->CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ stack_state);
}
-void JSHeapConsistency::CheckWrapper(v8::Local<v8::Object>& wrapper,
- int wrapper_index, const void* wrappable) {
- CHECK_EQ(wrappable,
- wrapper->GetAlignedPointerFromInternalField(wrapper_index));
+void CppHeap::CollectGarbageInYoungGenerationForTesting(
+ cppgc::EmbedderStackState stack_state) {
+ return internal::CppHeap::From(this)->CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMinor,
+ stack_state);
}
namespace internal {
@@ -176,17 +231,34 @@ UnifiedHeapConcurrentMarker::CreateConcurrentMarkingVisitor(
heap(), marking_state, unified_heap_marking_state_);
}
+void FatalOutOfMemoryHandlerImpl(const std::string& reason,
+ const SourceLocation&, HeapBase* heap) {
+ FatalProcessOutOfMemory(static_cast<v8::internal::CppHeap*>(heap)->isolate(),
+ reason.c_str());
+}
+
+} // namespace
+
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public:
- UnifiedHeapMarker(Key, Heap* v8_heap, cppgc::internal::HeapBase& cpp_heap,
+ UnifiedHeapMarker(Heap* v8_heap, cppgc::internal::HeapBase& cpp_heap,
cppgc::Platform* platform, MarkingConfig config);
~UnifiedHeapMarker() final = default;
void AddObject(void*);
+ cppgc::internal::MarkingWorklists& GetMarkingWorklists() {
+ return marking_worklists_;
+ }
+
+ cppgc::internal::MutatorMarkingState& GetMutatorMarkingState() {
+ return static_cast<cppgc::internal::MutatorMarkingState&>(
+ marking_visitor_->marking_state_);
+ }
+
protected:
- cppgc::Visitor& visitor() final { return marking_visitor_; }
+ cppgc::Visitor& visitor() final { return *marking_visitor_; }
cppgc::internal::ConservativeTracingVisitor& conservative_visitor() final {
return conservative_marking_visitor_;
}
@@ -196,20 +268,25 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
private:
UnifiedHeapMarkingState unified_heap_marking_state_;
- MutatorUnifiedHeapMarkingVisitor marking_visitor_;
+ std::unique_ptr<MutatorUnifiedHeapMarkingVisitor> marking_visitor_;
cppgc::internal::ConservativeMarkingVisitor conservative_marking_visitor_;
};
-UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap* v8_heap,
+UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap,
cppgc::internal::HeapBase& heap,
cppgc::Platform* platform,
MarkingConfig config)
- : cppgc::internal::MarkerBase(key, heap, platform, config),
+ : cppgc::internal::MarkerBase(heap, platform, config),
unified_heap_marking_state_(v8_heap),
- marking_visitor_(heap, mutator_marking_state_,
- unified_heap_marking_state_),
+ marking_visitor_(
+ config.collection_type == cppgc::internal::GarbageCollector::Config::
+ CollectionType::kMajor
+ ? std::make_unique<MutatorUnifiedHeapMarkingVisitor>(
+ heap, mutator_marking_state_, unified_heap_marking_state_)
+ : std::make_unique<MutatorMinorGCMarkingVisitor>(
+ heap, mutator_marking_state_, unified_heap_marking_state_)),
conservative_marking_visitor_(heap, mutator_marking_state_,
- marking_visitor_) {
+ *marking_visitor_) {
concurrent_marker_ = std::make_unique<UnifiedHeapConcurrentMarker>(
heap_, marking_worklists_, schedule_, platform_,
unified_heap_marking_state_);
@@ -220,18 +297,11 @@ void UnifiedHeapMarker::AddObject(void* object) {
cppgc::internal::HeapObjectHeader::FromObject(object));
}
-void FatalOutOfMemoryHandlerImpl(const std::string& reason,
- const SourceLocation&, HeapBase* heap) {
- FatalProcessOutOfMemory(static_cast<v8::internal::CppHeap*>(heap)->isolate(),
- reason.c_str());
-}
-
-} // namespace
-
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
const FullCycle& cppgc_event) {
+ DCHECK(!last_full_gc_event_.has_value());
last_full_gc_event_ = cppgc_event;
- GetIsolate()->heap()->tracer()->NotifyGCCompleted();
+ GetIsolate()->heap()->tracer()->NotifyCppGCCompleted();
}
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
@@ -300,12 +370,23 @@ bool CppHeap::MetricRecorderAdapter::MetricsReportPending() const {
const base::Optional<cppgc::internal::MetricRecorder::FullCycle>
CppHeap::MetricRecorderAdapter::ExtractLastFullGcEvent() {
- return std::move(last_full_gc_event_);
+ auto res = std::move(last_full_gc_event_);
+ last_full_gc_event_.reset();
+ return res;
}
const base::Optional<cppgc::internal::MetricRecorder::MainThreadIncrementalMark>
CppHeap::MetricRecorderAdapter::ExtractLastIncrementalMarkEvent() {
- return std::move(last_incremental_mark_event_);
+ auto res = std::move(last_incremental_mark_event_);
+ last_incremental_mark_event_.reset();
+ return res;
+}
+
+void CppHeap::MetricRecorderAdapter::ClearCachedEvents() {
+ incremental_mark_batched_events_.events.clear();
+ incremental_sweep_batched_events_.events.clear();
+ last_incremental_mark_event_.reset();
+ last_full_gc_event_.reset();
}
Isolate* CppHeap::MetricRecorderAdapter::GetIsolate() const {
@@ -331,8 +412,10 @@ CppHeap::CppHeap(
std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
cppgc::internal::HeapBase::StackSupport::
kSupportsConservativeStackScan,
- cppgc::internal::HeapBase::MarkingType::kIncrementalAndConcurrent,
- cppgc::internal::HeapBase::SweepingType::kIncrementalAndConcurrent),
+ FLAG_single_threaded_gc ? MarkingType::kIncremental
+ : MarkingType::kIncrementalAndConcurrent,
+ FLAG_single_threaded_gc ? SweepingType::kIncremental
+ : SweepingType::kIncrementalAndConcurrent),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);
@@ -397,17 +480,6 @@ void CppHeap::DetachIsolate() {
no_gc_scope_++;
}
-void CppHeap::RegisterV8References(
- const std::vector<std::pair<void*, void*> >& embedder_fields) {
- DCHECK(marker_);
- for (auto& tuple : embedder_fields) {
- // First field points to type.
- // Second field points to object.
- static_cast<UnifiedHeapMarker*>(marker_.get())->AddObject(tuple.second);
- }
- marking_done_ = false;
-}
-
namespace {
bool IsMemoryReducingGC(CppHeap::GarbageCollectionFlags flags) {
@@ -423,38 +495,62 @@ bool ShouldReduceMemory(CppHeap::GarbageCollectionFlags flags) {
}
} // namespace
-void CppHeap::TracePrologue(GarbageCollectionFlags gc_flags) {
+
+CppHeap::MarkingType CppHeap::SelectMarkingType() const {
+ if (IsForceGC(current_gc_flags_) && !force_incremental_marking_for_testing_)
+ return MarkingType::kAtomic;
+
+ return marking_support();
+}
+
+CppHeap::SweepingType CppHeap::SelectSweepingType() const {
+ if (IsForceGC(current_gc_flags_)) return SweepingType::kAtomic;
+
+ return sweeping_support();
+}
+
+void CppHeap::InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType collection_type,
+ GarbageCollectionFlags gc_flags) {
CHECK(!sweeper_.IsSweepingInProgress());
+ // Check that previous cycle metrics have been reported.
+ DCHECK_IMPLIES(GetMetricRecorder(),
+ !GetMetricRecorder()->MetricsReportPending());
+
+ DCHECK(!collection_type_);
+ collection_type_ = collection_type;
+
#if defined(CPPGC_YOUNG_GENERATION)
- cppgc::internal::SequentialUnmarker unmarker(raw_heap());
+ if (*collection_type_ ==
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor)
+ cppgc::internal::SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
current_gc_flags_ = gc_flags;
const UnifiedHeapMarker::MarkingConfig marking_config{
- UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
- cppgc::Heap::StackState::kNoHeapPointers,
- (IsForceGC(current_gc_flags_) && !force_incremental_marking_for_testing_)
- ? UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic
- : UnifiedHeapMarker::MarkingConfig::MarkingType::
- kIncrementalAndConcurrent,
+ *collection_type_, cppgc::Heap::StackState::kNoHeapPointers,
+ SelectMarkingType(),
IsForceGC(current_gc_flags_)
? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
: UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
- DCHECK_IMPLIES(!isolate_, (cppgc::Heap::MarkingType::kAtomic ==
- marking_config.marking_type) ||
- force_incremental_marking_for_testing_);
+ DCHECK_IMPLIES(!isolate_,
+ (MarkingType::kAtomic == marking_config.marking_type) ||
+ force_incremental_marking_for_testing_);
if (ShouldReduceMemory(current_gc_flags_)) {
// Only enable compaction when in a memory reduction garbage collection as
// it may significantly increase the final garbage collection pause.
compactor_.InitializeIfShouldCompact(marking_config.marking_type,
marking_config.stack_state);
}
- marker_ =
- cppgc::internal::MarkerFactory::CreateAndStartMarking<UnifiedHeapMarker>(
- isolate_ ? isolate_->heap() : nullptr, AsBase(), platform_.get(),
- marking_config);
+ marker_ = std::make_unique<UnifiedHeapMarker>(
+ isolate_ ? isolate()->heap() : nullptr, AsBase(), platform_.get(),
+ marking_config);
+}
+
+void CppHeap::StartTracing() {
+ marker_->StartMarking();
marking_done_ = false;
}
@@ -483,12 +579,17 @@ bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
in_atomic_pause_ = true;
- if (override_stack_state_) {
- stack_state = *override_stack_state_;
- }
marker_->EnterAtomicPause(stack_state);
- compactor_.CancelIfShouldNotCompact(cppgc::Heap::MarkingType::kAtomic,
- stack_state);
+ if (isolate_ &&
+ *collection_type_ ==
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMinor) {
+ // Visit V8 -> cppgc references.
+ TraceV8ToCppGCReferences(isolate_,
+ static_cast<UnifiedHeapMarker*>(marker_.get())
+ ->GetMutatorMarkingState(),
+ wrapper_descriptor_);
+ }
+ compactor_.CancelIfShouldNotCompact(MarkingType::kAtomic, stack_state);
}
void CppHeap::TraceEpilogue() {
@@ -511,8 +612,7 @@ void CppHeap::TraceEpilogue() {
buffered_allocated_bytes_ = 0;
const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
#if CPPGC_VERIFY_HEAP
- UnifiedHeapMarkingVerifier verifier(
- *this, cppgc::internal::Heap::Config::CollectionType::kMajor);
+ UnifiedHeapMarkingVerifier verifier(*this, *collection_type_);
verifier.Run(
stack_state_of_prev_gc(), stack_end_of_current_gc(),
stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
@@ -528,27 +628,43 @@ void CppHeap::TraceEpilogue() {
cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
- // In case the GC was forced, also finalize sweeping right away.
- IsForceGC(current_gc_flags_)
- ? cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic
- : cppgc::internal::Sweeper::SweepingConfig::SweepingType::
- kIncrementalAndConcurrent,
- compactable_space_handling,
+ SelectSweepingType(), compactable_space_handling,
ShouldReduceMemory(current_gc_flags_)
? cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
kDiscardWherePossible
: cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
kDoNotDiscard};
- DCHECK_IMPLIES(
- !isolate_,
- cppgc::internal::Sweeper::SweepingConfig::SweepingType::kAtomic ==
- sweeping_config.sweeping_type);
+ DCHECK_IMPLIES(!isolate_,
+ SweepingType::kAtomic == sweeping_config.sweeping_type);
sweeper().Start(sweeping_config);
}
in_atomic_pause_ = false;
+ collection_type_.reset();
sweeper().NotifyDoneIfNeeded();
}
+void CppHeap::RunMinorGC() {
+#if defined(CPPGC_YOUNG_GENERATION)
+ if (in_no_gc_scope()) return;
+ // Minor GC does not support nesting in full GCs.
+ if (IsMarking()) return;
+ // Finish sweeping in case it is still running.
+ sweeper().FinishIfRunning();
+
+ SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+
+ // Perform an atomic GC, with starting incremental/concurrent marking and
+ // immediately finalizing the garbage collection.
+ InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMinor,
+ GarbageCollectionFlagValues::kForced);
+ StartTracing();
+ EnterFinalPause(cppgc::EmbedderStackState::kMayContainHeapPointers);
+ AdvanceTracing(std::numeric_limits<double>::infinity());
+ TraceEpilogue();
+#endif // defined(CPPGC_YOUNG_GENERATION)
+}
+
void CppHeap::AllocatedObjectSizeIncreased(size_t bytes) {
buffered_allocated_bytes_ += static_cast<int64_t>(bytes);
ReportBufferedAllocationSizeIfPossible();
@@ -584,6 +700,7 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
}
void CppHeap::CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType collection_type,
cppgc::internal::GarbageCollector::Config::StackState stack_state) {
if (in_no_gc_scope()) return;
@@ -599,7 +716,10 @@ void CppHeap::CollectGarbageForTesting(
} else {
// Perform an atomic GC, with starting incremental/concurrent marking and
// immediately finalizing the garbage collection.
- if (!IsMarking()) TracePrologue(GarbageCollectionFlagValues::kForced);
+ if (!IsMarking()) {
+ InitializeTracing(collection_type, GarbageCollectionFlagValues::kForced);
+ StartTracing();
+ }
EnterFinalPause(stack_state);
AdvanceTracing(std::numeric_limits<double>::infinity());
TraceEpilogue();
@@ -620,7 +740,10 @@ void CppHeap::StartIncrementalGarbageCollectionForTesting() {
DCHECK_NULL(isolate_);
if (IsMarking()) return;
force_incremental_marking_for_testing_ = true;
- TracePrologue(GarbageCollectionFlagValues::kForced);
+ InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ GarbageCollectionFlagValues::kForced);
+ StartTracing();
force_incremental_marking_for_testing_ = false;
}
@@ -630,7 +753,9 @@ void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
DCHECK_NULL(isolate_);
DCHECK(IsMarking());
if (IsMarking()) {
- CollectGarbageForTesting(stack_state);
+ CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ stack_state);
}
sweeper_.FinishIfRunning();
}
@@ -719,5 +844,24 @@ CppHeap::MetricRecorderAdapter* CppHeap::GetMetricRecorder() const {
void CppHeap::FinishSweepingIfRunning() { sweeper_.FinishIfRunning(); }
+void CppHeap::FinishSweepingIfOutOfWork() { sweeper_.FinishIfOutOfWork(); }
+
+std::unique_ptr<CppMarkingState> CppHeap::CreateCppMarkingState() {
+ DCHECK(IsMarking());
+ return std::make_unique<CppMarkingState>(
+ isolate(), wrapper_descriptor_,
+ std::make_unique<cppgc::internal::MarkingStateBase>(
+ AsBase(),
+ static_cast<UnifiedHeapMarker*>(marker())->GetMarkingWorklists()));
+}
+
+std::unique_ptr<CppMarkingState>
+CppHeap::CreateCppMarkingStateForMutatorThread() {
+ DCHECK(IsMarking());
+ return std::make_unique<CppMarkingState>(
+ isolate(), wrapper_descriptor_,
+ static_cast<UnifiedHeapMarker*>(marker())->GetMutatorMarkingState());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 3f9e8d9ec7..70958b2b6d 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -15,6 +15,7 @@ static_assert(
#include "include/v8-metrics.h"
#include "src/base/flags.h"
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/logging/metrics.h"
@@ -25,6 +26,8 @@ class Isolate;
namespace internal {
+class CppMarkingState;
+
// A C++ heap implementation used with V8 to implement unified heap.
class V8_EXPORT_PRIVATE CppHeap final
: public cppgc::internal::HeapBase,
@@ -62,6 +65,8 @@ class V8_EXPORT_PRIVATE CppHeap final
cppgc::internal::MetricRecorder::MainThreadIncrementalMark>
ExtractLastIncrementalMarkEvent();
+ void ClearCachedEvents();
+
private:
Isolate* GetIsolate() const;
@@ -105,6 +110,7 @@ class V8_EXPORT_PRIVATE CppHeap final
void EnableDetachedGarbageCollectionsForTesting();
void CollectGarbageForTesting(
+ cppgc::internal::GarbageCollector::Config::CollectionType,
cppgc::internal::GarbageCollector::Config::StackState);
void CollectCustomSpaceStatisticsAtLastGC(
@@ -112,15 +118,19 @@ class V8_EXPORT_PRIVATE CppHeap final
std::unique_ptr<CustomSpaceStatisticsReceiver>);
void FinishSweepingIfRunning();
+ void FinishSweepingIfOutOfWork();
- void RegisterV8References(
- const std::vector<std::pair<void*, void*>>& embedder_fields);
- void TracePrologue(GarbageCollectionFlags);
+ void InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType,
+ GarbageCollectionFlags);
+ void StartTracing();
bool AdvanceTracing(double max_duration);
bool IsTracingDone();
void TraceEpilogue();
void EnterFinalPause(cppgc::EmbedderStackState stack_state);
+ void RunMinorGC();
+
// StatsCollector::AllocationObserver interface.
void AllocatedObjectSizeIncreased(size_t) final;
void AllocatedObjectSizeDecreased(size_t) final;
@@ -134,6 +144,9 @@ class V8_EXPORT_PRIVATE CppHeap final
Isolate* isolate() const { return isolate_; }
+ std::unique_ptr<CppMarkingState> CreateCppMarkingState();
+ std::unique_ptr<CppMarkingState> CreateCppMarkingStateForMutatorThread();
+
private:
void FinalizeIncrementalGarbageCollectionIfNeeded(
cppgc::Heap::StackState) final {
@@ -147,8 +160,14 @@ class V8_EXPORT_PRIVATE CppHeap final
void FinalizeIncrementalGarbageCollectionForTesting(
cppgc::EmbedderStackState) final;
+ MarkingType SelectMarkingType() const;
+ SweepingType SelectSweepingType() const;
+
Isolate* isolate_ = nullptr;
bool marking_done_ = false;
+ // |collection_type_| is initialized when marking is in progress.
+ base::Optional<cppgc::internal::GarbageCollector::Config::CollectionType>
+ collection_type_;
GarbageCollectionFlags current_gc_flags_;
// Buffered allocated bytes. Reporting allocated bytes to V8 can trigger a GC
diff --git a/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h b/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h
new file mode 100644
index 0000000000..23294b4dca
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-marking-state-inl.h
@@ -0,0 +1,47 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_
+#define V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_
+
+#include "src/heap/cppgc-js/cpp-marking-state.h"
+#include "src/heap/embedder-tracing-inl.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects.h"
+
+namespace v8 {
+namespace internal {
+
+bool CppMarkingState::ExtractEmbedderDataSnapshot(
+ Map map, JSObject object, EmbedderDataSnapshot& snapshot) {
+ if (JSObject::GetEmbedderFieldCount(map) < 2) return false;
+
+ EmbedderDataSlot::PopulateEmbedderDataSnapshot(
+ map, object, wrapper_descriptor_.wrappable_type_index, snapshot.first);
+ EmbedderDataSlot::PopulateEmbedderDataSnapshot(
+ map, object, wrapper_descriptor_.wrappable_instance_index,
+ snapshot.second);
+ return true;
+}
+
+void CppMarkingState::MarkAndPush(const EmbedderDataSnapshot& snapshot) {
+ const EmbedderDataSlot type_slot(snapshot.first);
+ const EmbedderDataSlot instance_slot(snapshot.second);
+ MarkAndPush(type_slot, instance_slot);
+}
+
+void CppMarkingState::MarkAndPush(const EmbedderDataSlot type_slot,
+ const EmbedderDataSlot instance_slot) {
+ LocalEmbedderHeapTracer::WrapperInfo info;
+ if (LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ isolate_, wrapper_descriptor_, type_slot, instance_slot, &info)) {
+ marking_state_.MarkAndPush(
+ cppgc::internal::HeapObjectHeader::FromObject(info.second));
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_INL_H_
diff --git a/deps/v8/src/heap/cppgc-js/cpp-marking-state.h b/deps/v8/src/heap/cppgc-js/cpp-marking-state.h
new file mode 100644
index 0000000000..ad8ef3b680
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/cpp-marking-state.h
@@ -0,0 +1,67 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_H_
+#define V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_H_
+
+#include <memory>
+
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc/marking-state.h"
+#include "src/heap/cppgc/marking-worklists.h"
+#include "src/objects/embedder-data-slot.h"
+
+namespace v8 {
+namespace internal {
+
+class JSObject;
+class EmbedderDataSlot;
+
+class CppMarkingState {
+ public:
+ using EmbedderDataSnapshot =
+ std::pair<EmbedderDataSlot::EmbedderDataSlotSnapshot,
+ EmbedderDataSlot::EmbedderDataSlotSnapshot>;
+
+ CppMarkingState(Isolate* isolate, const WrapperDescriptor& wrapper_descriptor,
+ cppgc::internal::MarkingStateBase& main_thread_marking_state)
+ : isolate_(isolate),
+ wrapper_descriptor_(wrapper_descriptor),
+ owned_marking_state_(nullptr),
+ marking_state_(main_thread_marking_state) {}
+
+ CppMarkingState(Isolate* isolate, const WrapperDescriptor& wrapper_descriptor,
+ std::unique_ptr<cppgc::internal::MarkingStateBase>
+ concurrent_marking_state)
+ : isolate_(isolate),
+ wrapper_descriptor_(wrapper_descriptor),
+ owned_marking_state_(std::move(concurrent_marking_state)),
+ marking_state_(*owned_marking_state_) {}
+ CppMarkingState(const CppMarkingState&) = delete;
+ CppMarkingState& operator=(const CppMarkingState&) = delete;
+
+ void Publish() { marking_state_.Publish(); }
+
+ inline bool ExtractEmbedderDataSnapshot(Map, JSObject, EmbedderDataSnapshot&);
+
+ inline void MarkAndPush(const EmbedderDataSnapshot&);
+ inline void MarkAndPush(const EmbedderDataSlot type_slot,
+ const EmbedderDataSlot instance_slot);
+
+ bool IsLocalEmpty() {
+ return marking_state_.marking_worklist().IsLocalEmpty();
+ }
+
+ private:
+ Isolate* const isolate_;
+ const WrapperDescriptor& wrapper_descriptor_;
+
+ std::unique_ptr<cppgc::internal::MarkingStateBase> owned_marking_state_;
+ cppgc::internal::MarkingStateBase& marking_state_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_MARKING_STATE_H_
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index e1065376ea..69ab62f086 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -358,7 +358,8 @@ void* ExtractEmbedderDataBackref(Isolate* isolate,
if (!v8_value->IsObject()) return nullptr;
Handle<Object> v8_object = Utils::OpenHandle(*v8_value);
- if (!v8_object->IsJSObject() || !JSObject::cast(*v8_object).IsApiWrapper())
+ if (!v8_object->IsJSObject() ||
+ !JSObject::cast(*v8_object).MayHaveEmbedderFields())
return nullptr;
JSObject js_object = JSObject::cast(*v8_object);
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index 09564055dc..f884b1d9fe 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
UnifiedHeapMarkingVisitorBase::UnifiedHeapMarkingVisitorBase(
- HeapBase& heap, cppgc::internal::MarkingStateBase& marking_state,
+ HeapBase& heap, cppgc::internal::BasicMarkingState& marking_state,
UnifiedHeapMarkingState& unified_heap_marking_state)
: JSVisitor(cppgc::internal::VisitorFactory::CreateKey()),
marking_state_(marking_state),
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
index 721dbe5d98..abff33cd5a 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.h
@@ -17,7 +17,7 @@ class SourceLocation;
namespace internal {
class ConcurrentMarkingState;
-class MarkingStateBase;
+class BasicMarkingState;
class MutatorMarkingState;
} // namespace internal
} // namespace cppgc
@@ -31,9 +31,11 @@ using cppgc::WeakCallback;
using cppgc::internal::HeapBase;
using cppgc::internal::MutatorMarkingState;
+class UnifiedHeapMarker;
+
class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
public:
- UnifiedHeapMarkingVisitorBase(HeapBase&, cppgc::internal::MarkingStateBase&,
+ UnifiedHeapMarkingVisitorBase(HeapBase&, cppgc::internal::BasicMarkingState&,
UnifiedHeapMarkingState&);
~UnifiedHeapMarkingVisitorBase() override = default;
@@ -49,13 +51,15 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVisitorBase : public JSVisitor {
void HandleMovableReference(const void**) final;
// JS handling.
- void Visit(const TracedReferenceBase& ref) final;
+ void Visit(const TracedReferenceBase& ref) override;
- cppgc::internal::MarkingStateBase& marking_state_;
+ cppgc::internal::BasicMarkingState& marking_state_;
UnifiedHeapMarkingState& unified_heap_marking_state_;
+
+ friend class UnifiedHeapMarker;
};
-class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
+class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor
: public UnifiedHeapMarkingVisitorBase {
public:
MutatorUnifiedHeapMarkingVisitor(HeapBase&, MutatorMarkingState&,
@@ -68,6 +72,18 @@ class V8_EXPORT_PRIVATE MutatorUnifiedHeapMarkingVisitor final
const SourceLocation&) final;
};
+class V8_EXPORT_PRIVATE MutatorMinorGCMarkingVisitor final
+ : public MutatorUnifiedHeapMarkingVisitor {
+ public:
+ using MutatorUnifiedHeapMarkingVisitor::MutatorUnifiedHeapMarkingVisitor;
+ ~MutatorMinorGCMarkingVisitor() override = default;
+
+ protected:
+ // Override and make the function empty, since we don't want to trace V8
+ // reference during cppgc's minor GC.
+ void Visit(const TracedReferenceBase&) final {}
+};
+
class V8_EXPORT_PRIVATE ConcurrentUnifiedHeapMarkingVisitor final
: public UnifiedHeapMarkingVisitorBase {
public:
diff --git a/deps/v8/src/heap/cppgc/default-platform.cc b/deps/v8/src/heap/cppgc/default-platform.cc
index 46884d42df..1899557134 100644
--- a/deps/v8/src/heap/cppgc/default-platform.cc
+++ b/deps/v8/src/heap/cppgc/default-platform.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <include/cppgc/default-platform.h>
+#include "include/cppgc/default-platform.h"
namespace cppgc {
diff --git a/deps/v8/src/heap/cppgc/explicit-management.cc b/deps/v8/src/heap/cppgc/explicit-management.cc
index ccc5840af4..568ff2a5ce 100644
--- a/deps/v8/src/heap/cppgc/explicit-management.cc
+++ b/deps/v8/src/heap/cppgc/explicit-management.cc
@@ -4,6 +4,7 @@
#include "include/cppgc/explicit-management.h"
+#include <algorithm>
#include <tuple>
#include "src/heap/cppgc/heap-base.h"
@@ -26,7 +27,8 @@ bool InGC(HeapHandle& heap_handle) {
} // namespace
-void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
+void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
+ void* object) {
if (InGC(heap_handle)) {
return;
}
@@ -34,16 +36,21 @@ void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
auto& header = HeapObjectHeader::FromObject(object);
header.Finalize();
+ size_t object_size = 0;
+ USE(object_size);
+
// `object` is guaranteed to be of type GarbageCollected, so getting the
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
if (base_page->is_large()) { // Large object.
+ object_size = LargePage::From(base_page)->ObjectSize();
base_page->space().RemovePage(base_page);
base_page->heap().stats_collector()->NotifyExplicitFree(
LargePage::From(base_page)->PayloadSize());
LargePage::Destroy(LargePage::From(base_page));
} else { // Regular object.
const size_t header_size = header.AllocatedSize();
+ object_size = header.ObjectSize();
auto* normal_page = NormalPage::From(base_page);
auto& normal_space = *static_cast<NormalPageSpace*>(&base_page->space());
auto& lab = normal_space.linear_allocation_buffer();
@@ -59,6 +66,13 @@ void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
// list entry.
}
}
+#if defined(CPPGC_YOUNG_GENERATION)
+ auto& heap_base = HeapBase::From(heap_handle);
+ heap_base.remembered_set().InvalidateRememberedSlotsInRange(
+ object, reinterpret_cast<uint8_t*>(object) + object_size);
+ // If this object was registered as remembered, remove it.
+ heap_base.remembered_set().InvalidateRememberedSourceObject(header);
+#endif // defined(CPPGC_YOUNG_GENERATION)
}
namespace {
@@ -98,17 +112,19 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
lab.Set(free_start, lab.size() + size_delta);
SetMemoryInaccessible(lab.start(), size_delta);
header.SetAllocatedSize(new_size);
- return true;
- }
- // Heuristic: Only return memory to the free list if the block is larger than
- // the smallest size class.
- if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
+ } else if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
+ // Heuristic: Only return memory to the free list if the block is larger
+ // than the smallest size class.
SetMemoryInaccessible(free_start, size_delta);
base_page.heap().stats_collector()->NotifyExplicitFree(size_delta);
normal_space.free_list().Add({free_start, size_delta});
NormalPage::From(&base_page)->object_start_bitmap().SetBit(free_start);
header.SetAllocatedSize(new_size);
}
+#if defined(CPPGC_YOUNG_GENERATION)
+ base_page.heap().remembered_set().InvalidateRememberedSlotsInRange(
+ free_start, free_start + size_delta);
+#endif // defined(CPPGC_YOUNG_GENERATION)
// Return success in any case, as we want to avoid that embedders start
// copying memory because of small deltas.
return true;
@@ -116,7 +132,7 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
} // namespace
-bool Resize(void* object, size_t new_object_size) {
+bool ExplicitManagementImpl::Resize(void* object, size_t new_object_size) {
// `object` is guaranteed to be of type GarbageCollected, so getting the
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index b8e52452ee..a49a7a1bad 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
#define V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+#include "include/cppgc/common.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/sweeper.h"
@@ -55,6 +56,11 @@ class GarbageCollector {
MarkingType::kAtomic, SweepingType::kAtomic};
}
+ static constexpr Config MinorConservativeAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
CollectionType collection_type = CollectionType::kMajor;
StackState stack_state = StackState::kMayContainHeapPointers;
MarkingType marking_type = MarkingType::kAtomic;
@@ -70,6 +76,9 @@ class GarbageCollector {
// The current epoch that the GC maintains. The epoch is increased on every
// GC invocation.
virtual size_t epoch() const = 0;
+
+ // Returns a non-null state if the stack state if overriden.
+ virtual const EmbedderStackState* override_stack_state() const = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.cc b/deps/v8/src/heap/cppgc/gc-invoker.cc
index 9537f0c2a4..1bddad7a7e 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.cc
+++ b/deps/v8/src/heap/cppgc/gc-invoker.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "include/cppgc/common.h"
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/task-handle.h"
@@ -24,6 +25,9 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
void CollectGarbage(GarbageCollector::Config) final;
void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
size_t epoch() const final { return collector_->epoch(); }
+ const EmbedderStackState* override_stack_state() const final {
+ return collector_->override_stack_state();
+ }
private:
class GCTask final : public cppgc::Task {
@@ -48,6 +52,8 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
private:
void Run() final {
+ CHECK_NULL(collector_->override_stack_state());
+
if (handle_.IsCanceled() || (collector_->epoch() != saved_epoch_)) return;
collector_->CollectGarbage(config_);
@@ -94,6 +100,8 @@ void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
// Force a precise GC since it will run in a non-nestable task.
config.stack_state =
GarbageCollector::Config::StackState::kNoHeapPointers;
+ DCHECK_NE(cppgc::Heap::StackSupport::kSupportsConservativeStackScan,
+ stack_support_);
gc_task_handle_ = GCTask::Post(
collector_, platform_->GetForegroundTaskRunner().get(), config);
}
@@ -137,5 +145,9 @@ void GCInvoker::StartIncrementalGarbageCollection(
size_t GCInvoker::epoch() const { return impl_->epoch(); }
+const EmbedderStackState* GCInvoker::override_stack_state() const {
+ return impl_->override_stack_state();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.h b/deps/v8/src/heap/cppgc/gc-invoker.h
index fa5e7e5435..ceebca139c 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.h
+++ b/deps/v8/src/heap/cppgc/gc-invoker.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_GC_INVOKER_H_
#define V8_HEAP_CPPGC_GC_INVOKER_H_
+#include "include/cppgc/common.h"
#include "include/cppgc/heap.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/garbage-collector.h"
@@ -36,6 +37,7 @@ class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
void CollectGarbage(GarbageCollector::Config) final;
void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
size_t epoch() const final;
+ const EmbedderStackState* override_stack_state() const final;
private:
class GCInvokerImpl;
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index bf1e215c22..e04df872b2 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -43,9 +43,30 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
+#if defined(V8_TARGET_ARCH_ARM64) && defined(V8_OS_MACOS)
+// No guard pages on ARM64 macOS. This target has 16 kiB pages, meaning that
+// the guard pages do not protect anything, since there is no inaccessible
+// region surrounding the allocation.
+//
+// However, with a 4k guard page size (as below), we avoid putting any data
+// inside the "guard pages" region. Effectively, this wastes 2 * 4kiB of memory
+// for each 128kiB page, since this is memory we pay for (since accounting as at
+// the OS page level), but never use.
+//
+// The layout of pages is broadly:
+// | guard page | header | payload | guard page |
+// <--- 4k ---> <--- 4k --->
+// <------------------ 128k -------------------->
+//
+// Since this is aligned on an OS page boundary (16k), the guard pages are part
+// of the first and last OS page, respectively. So they are really private dirty
+// memory which we never use.
+constexpr size_t kGuardPageSize = 0;
+#else
// Guard pages are always put into memory. Whether they are actually protected
// depends on the allocator provided to the garbage collector.
constexpr size_t kGuardPageSize = 4096;
+#endif
constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index a5c89b6218..14b0d2ad19 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -82,6 +82,9 @@ HeapBase::HeapBase(
weak_persistent_region_(*oom_handler_.get()),
strong_cross_thread_persistent_region_(*oom_handler_.get()),
weak_cross_thread_persistent_region_(*oom_handler_.get()),
+#if defined(CPPGC_YOUNG_GENERATION)
+ remembered_set_(*this),
+#endif // defined(CPPGC_YOUNG_GENERATION)
stack_support_(stack_support),
marking_support_(marking_support),
sweeping_support_(sweeping_support) {
@@ -136,7 +139,7 @@ void HeapBase::ResetRememberedSet() {
};
DCHECK(AllLABsAreEmpty(raw_heap()).value());
caged_heap().local_data().age_table.Reset(&caged_heap().allocator());
- remembered_slots().clear();
+ remembered_set_.Reset();
}
#endif // defined(CPPGC_YOUNG_GENERATION)
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 041f4cf3bd..0c6fe757f8 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -15,6 +15,7 @@
#include "src/base/macros.h"
#include "src/heap/cppgc/compactor.h"
#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
@@ -29,6 +30,10 @@
#include "src/heap/cppgc/caged-heap.h"
#endif
+#if defined(CPPGC_YOUNG_GENERATION)
+#include "src/heap/cppgc/remembered-set.h"
+#endif
+
namespace v8 {
namespace base {
class LsanPageAllocator;
@@ -162,7 +167,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
}
#if defined(CPPGC_YOUNG_GENERATION)
- std::set<void*>& remembered_slots() { return remembered_slots_; }
+ OldToNewRememberedSet& remembered_set() { return remembered_set_; }
#endif // defined(CPPGC_YOUNG_GENERATION)
size_t ObjectPayloadSize() const;
@@ -207,6 +212,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int GetCreationThreadId() const { return creation_thread_id_; }
MarkingType marking_support() const { return marking_support_; }
+ SweepingType sweeping_support() const { return sweeping_support_; }
protected:
// Used by the incremental scheduler to finalize a GC if supported.
@@ -259,8 +265,8 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
ProcessHeapStatisticsUpdater::AllocationObserverImpl
allocation_observer_for_PROCESS_HEAP_STATISTICS_;
#if defined(CPPGC_YOUNG_GENERATION)
- std::set<void*> remembered_slots_;
-#endif
+ OldToNewRememberedSet remembered_set_;
+#endif // defined(CPPGC_YOUNG_GENERATION)
size_t no_gc_scope_ = 0;
size_t disallow_gc_scope_ = 0;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index e5a428a5a9..a6efb8defd 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -125,18 +125,17 @@ class HeapObjectHeader {
using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
// Used in |encoded_low_|.
using MarkBitField = v8::base::BitField16<bool, 0, 1>;
- using SizeField = void; // Use EncodeSize/DecodeSize instead.
+ using SizeField =
+ MarkBitField::Next<size_t, 15>; // Use EncodeSize/DecodeSize instead.
static constexpr size_t DecodeSize(uint16_t encoded) {
// Essentially, gets optimized to << 1.
- using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
- return SizeFieldImpl::decode(encoded) * kAllocationGranularity;
+ return SizeField::decode(encoded) * kAllocationGranularity;
}
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
- using SizeFieldImpl = MarkBitField::Next<size_t, 15>;
- return SizeFieldImpl::encode(size / kAllocationGranularity);
+ return SizeField::encode(size / kAllocationGranularity);
}
V8_EXPORT_PRIVATE void CheckApiConstants();
@@ -230,8 +229,16 @@ size_t HeapObjectHeader::AllocatedSize() const {
}
void HeapObjectHeader::SetAllocatedSize(size_t size) {
+#if !defined(CPPGC_YOUNG_GENERATION)
+ // With sticky bits, marked objects correspond to old objects.
+ // TODO(bikineev:1029379): Consider disallowing old/marked objects to be
+ // resized.
DCHECK(!IsMarked());
- encoded_low_ = EncodeSize(size);
+#endif
+ // The object may be marked (i.e. old, in case young generation is enabled).
+ // Make sure to not overwrite the mark bit.
+ encoded_low_ &= ~SizeField::encode(SizeField::kMax);
+ encoded_low_ |= EncodeSize(size);
}
template <AccessMode mode>
diff --git a/deps/v8/src/heap/cppgc/heap-space.cc b/deps/v8/src/heap/cppgc/heap-space.cc
index 9a78b44433..96e57d233f 100644
--- a/deps/v8/src/heap/cppgc/heap-space.cc
+++ b/deps/v8/src/heap/cppgc/heap-space.cc
@@ -20,6 +20,8 @@ BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type,
USE(is_compactable_);
}
+BaseSpace::~BaseSpace() = default;
+
void BaseSpace::AddPage(BasePage* page) {
v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
index 18fe7ba225..39be232079 100644
--- a/deps/v8/src/heap/cppgc/heap-space.h
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -28,6 +28,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
BaseSpace(const BaseSpace&) = delete;
BaseSpace& operator=(const BaseSpace&) = delete;
+ virtual ~BaseSpace();
iterator begin() { return pages_.begin(); }
const_iterator begin() const { return pages_.begin(); }
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 26500a9ca8..beaa089206 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -159,8 +159,8 @@ void Heap::StartGarbageCollection(Config config) {
const Marker::MarkingConfig marking_config{
config.collection_type, config.stack_state, config.marking_type,
config.is_forced_gc};
- marker_ = MarkerFactory::CreateAndStartMarking<Marker>(
- AsBase(), platform_.get(), marking_config);
+ marker_ = std::make_unique<Marker>(AsBase(), platform_.get(), marking_config);
+ marker_->StartMarking();
}
void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
@@ -168,9 +168,6 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
DCHECK(!in_no_gc_scope());
CHECK(!in_disallow_gc_scope());
config_.stack_state = stack_state;
- if (override_stack_state_) {
- config_.stack_state = *override_stack_state_;
- }
SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
in_atomic_pause_ = true;
{
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index c3504073bc..e47d203327 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -37,6 +37,9 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
void FinalizeIncrementalGarbageCollectionIfRunning(Config);
size_t epoch() const final { return epoch_; }
+ const EmbedderStackState* override_stack_state() const final {
+ return HeapBase::override_stack_state();
+ }
void DisableHeapGrowingForTesting();
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index e792c4c844..fa8732fde7 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -60,33 +60,11 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
return false;
}
-// Visit remembered set that was recorded in the generational barrier.
-void VisitRememberedSlots(HeapBase& heap,
- MutatorMarkingState& mutator_marking_state) {
-#if defined(CPPGC_YOUNG_GENERATION)
- StatsCollector::EnabledScope stats_scope(
- heap.stats_collector(), StatsCollector::kMarkVisitRememberedSets);
- for (void* slot : heap.remembered_slots()) {
- auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
- ->ObjectHeaderFromInnerAddress(slot);
- if (slot_header.IsYoung()) continue;
- // The design of young generation requires collections to be executed at the
- // top level (with the guarantee that no objects are currently being in
- // construction). This can be ensured by running young GCs from safe points
- // or by reintroducing nested allocation scopes that avoid finalization.
- DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
-
- void* value = *reinterpret_cast<void**>(slot);
- mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
- }
-#endif
-}
-
static constexpr size_t kDefaultDeadlineCheckInterval = 150u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
-bool DrainWorklistWithBytesAndTimeDeadline(MarkingStateBase& marking_state,
+bool DrainWorklistWithBytesAndTimeDeadline(BasicMarkingState& marking_state,
size_t marked_bytes_deadline,
v8::base::TimeTicks time_deadline,
WorklistLocal& worklist_local,
@@ -153,7 +131,7 @@ void MarkerBase::IncrementalMarkingTask::Run() {
}
}
-MarkerBase::MarkerBase(Key, HeapBase& heap, cppgc::Platform* platform,
+MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config)
: heap_(heap),
config_(config),
@@ -248,6 +226,13 @@ void MarkerBase::StartMarking() {
incremental_marking_allocation_observer_.get());
}
}
+void MarkerBase::HandleNotFullyConstructedObjects() {
+ if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
+ mutator_marking_state_.FlushNotFullyConstructedObjects();
+ } else {
+ MarkNotFullyConstructedObjects();
+ }
+}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
@@ -271,12 +256,7 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
{
// VisitRoots also resets the LABs.
VisitRoots(config_.stack_state);
- if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
- mutator_marking_state_.FlushNotFullyConstructedObjects();
- DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
- } else {
- MarkNotFullyConstructedObjects();
- }
+ HandleNotFullyConstructedObjects();
}
if (heap().marking_support() ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
@@ -339,12 +319,32 @@ void MarkerBase::ProcessWeakness() {
heap().GetWeakCrossThreadPersistentRegion().Trace(&visitor());
// Call weak callbacks on objects that may now be pointing to dead objects.
- MarkingWorklists::WeakCallbackItem item;
LivenessBroker broker = LivenessBrokerFactory::Create();
+#if defined(CPPGC_YOUNG_GENERATION)
+ auto& remembered_set = heap().remembered_set();
+ if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ // Custom callbacks assume that untraced pointers point to not yet freed
+ // objects. They must make sure that upon callback completion no
+ // UntracedMember points to a freed object. This may not hold true if a
+ // custom callback for an old object operates with a reference to a young
+ // object that was freed on a minor collection cycle. To maintain the
+ // invariant that UntracedMembers always point to valid objects, execute
+ // custom callbacks for old objects on each minor collection cycle.
+ remembered_set.ExecuteCustomCallbacks(broker);
+ } else {
+ // For major GCs, just release all the remembered weak callbacks.
+ remembered_set.ReleaseCustomCallbacks();
+ }
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
+ MarkingWorklists::WeakCallbackItem item;
MarkingWorklists::WeakCallbackWorklist::Local& local =
mutator_marking_state_.weak_callback_worklist();
while (local.Pop(&item)) {
item.callback(broker, item.parameter);
+#if defined(CPPGC_YOUNG_GENERATION)
+ heap().remembered_set().AddWeakCallback(item);
+#endif // defined(CPPGC_YOUNG_GENERATION)
}
// Weak callbacks should not add any new objects for marking.
@@ -372,9 +372,13 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
heap().stats_collector(), StatsCollector::kMarkVisitStack);
heap().stack()->IteratePointers(&stack_visitor());
}
+#if defined(CPPGC_YOUNG_GENERATION)
if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
- VisitRememberedSlots(heap(), mutator_marking_state_);
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(), StatsCollector::kMarkVisitRememberedSets);
+ heap().remembered_set().Visit(visitor(), mutator_marking_state_);
}
+#endif // defined(CPPGC_YOUNG_GENERATION)
}
bool MarkerBase::VisitCrossThreadPersistentsIfNeeded() {
@@ -434,6 +438,10 @@ bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
concurrent_marker_->Cancel();
concurrent_marking_active_ = false;
+ // Concurrent markers may have pushed some "leftover" in-construction objects
+ // after flushing in EnterAtomicPause.
+ HandleNotFullyConstructedObjects();
+ DCHECK(marking_worklists_.not_fully_constructed_worklist()->IsEmpty());
return true;
}
@@ -618,9 +626,8 @@ void MarkerBase::WaitForConcurrentMarkingForTesting() {
concurrent_marker_->JoinForTesting();
}
-Marker::Marker(Key key, HeapBase& heap, cppgc::Platform* platform,
- MarkingConfig config)
- : MarkerBase(key, heap, platform, config),
+Marker::Marker(HeapBase& heap, cppgc::Platform* platform, MarkingConfig config)
+ : MarkerBase(heap, platform, config),
marking_visitor_(heap, mutator_marking_state_),
conservative_marking_visitor_(heap, mutator_marking_state_,
marking_visitor_) {
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 1f76583177..d990dcaed0 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -24,12 +24,10 @@ namespace cppgc {
namespace internal {
class HeapBase;
-class MarkerFactory;
// Marking algorithm. Example for a valid call sequence creating the marking
// phase:
-// 1. StartMarking() [Called implicitly when creating a Marker using
-// MarkerFactory]
+// 1. StartMarking()
// 2. AdvanceMarkingWithLimits() [Optional, depending on environment.]
// 3. EnterAtomicPause()
// 4. AdvanceMarkingWithLimits()
@@ -87,6 +85,10 @@ class V8_EXPORT_PRIVATE MarkerBase {
// objects to be marked and merely updates marking states if needed.
void LeaveAtomicPause();
+ // Initialize marking according to the given config. This method will
+ // trigger incremental/concurrent marking if needed.
+ void StartMarking();
+
// Combines:
// - EnterAtomicPause()
// - AdvanceMarkingWithLimits()
@@ -141,17 +143,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
- class Key {
- private:
- Key() = default;
- friend class MarkerFactory;
- };
-
- MarkerBase(Key, HeapBase&, cppgc::Platform*, MarkingConfig);
-
- // Initialize marking according to the given config. This method will
- // trigger incremental/concurrent marking if needed.
- void StartMarking();
+ MarkerBase(HeapBase&, cppgc::Platform*, MarkingConfig);
virtual cppgc::Visitor& visitor() = 0;
virtual ConservativeTracingVisitor& conservative_visitor() = 0;
@@ -173,6 +165,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool CancelConcurrentMarkingIfNeeded();
+ void HandleNotFullyConstructedObjects();
+
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
@@ -193,27 +187,11 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};
-
- friend class MarkerFactory;
-};
-
-class V8_EXPORT_PRIVATE MarkerFactory {
- public:
- template <typename T, typename... Args>
- static std::unique_ptr<T> CreateAndStartMarking(Args&&... args) {
- static_assert(std::is_base_of<MarkerBase, T>::value,
- "MarkerFactory can only create subclasses of MarkerBase");
- std::unique_ptr<T> marker =
- std::make_unique<T>(MarkerBase::Key(), std::forward<Args>(args)...);
- marker->StartMarking();
- return marker;
- }
};
class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
public:
- Marker(Key, HeapBase&, cppgc::Platform*,
- MarkingConfig = MarkingConfig::Default());
+ Marker(HeapBase&, cppgc::Platform*, MarkingConfig = MarkingConfig::Default());
protected:
cppgc::Visitor& visitor() final { return marking_visitor_; }
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 513c781b96..b550d4e354 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -23,8 +23,7 @@ namespace internal {
// C++ marking implementation.
class MarkingStateBase {
public:
- inline MarkingStateBase(HeapBase& heap, MarkingWorklists&,
- CompactionWorklists*);
+ inline MarkingStateBase(HeapBase&, MarkingWorklists&);
MarkingStateBase(const MarkingStateBase&) = delete;
MarkingStateBase& operator=(const MarkingStateBase&) = delete;
@@ -34,6 +33,86 @@ class MarkingStateBase {
inline void PushMarked(HeapObjectHeader&, TraceDescriptor desc);
+ void Publish() { marking_worklist_.Publish(); }
+
+ MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
+ return marking_worklist_;
+ }
+ MarkingWorklists::NotFullyConstructedWorklist&
+ not_fully_constructed_worklist() {
+ return not_fully_constructed_worklist_;
+ }
+
+ protected:
+ inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
+
+ inline bool MarkNoPush(HeapObjectHeader&);
+
+ HeapBase& heap_;
+
+ MarkingWorklists::MarkingWorklist::Local marking_worklist_;
+ MarkingWorklists::NotFullyConstructedWorklist&
+ not_fully_constructed_worklist_;
+};
+
+MarkingStateBase::MarkingStateBase(HeapBase& heap,
+ MarkingWorklists& marking_worklists)
+ : heap_(heap),
+ marking_worklist_(marking_worklists.marking_worklist()),
+ not_fully_constructed_worklist_(
+ *marking_worklists.not_fully_constructed_worklist()) {}
+
+void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
+ DCHECK_NOT_NULL(object);
+ MarkAndPush(
+ HeapObjectHeader::FromObject(const_cast<void*>(desc.base_object_payload)),
+ desc);
+}
+
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
+ TraceDescriptor desc) {
+ DCHECK_NOT_NULL(desc.callback);
+
+ if (header.IsInConstruction<AccessMode::kAtomic>()) {
+ not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
+ } else if (MarkNoPush(header)) {
+ PushMarked(header, desc);
+ }
+}
+
+bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
+ // A GC should only mark the objects that belong in its heap.
+ DCHECK_EQ(&heap_, &BasePage::FromPayload(&header)->heap());
+ // Never mark free space objects. This would e.g. hint to marking a promptly
+ // freed backing store.
+ DCHECK(!header.IsFree<AccessMode::kAtomic>());
+ return header.TryMarkAtomic();
+}
+
+void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
+ MarkAndPush(
+ header,
+ {header.ObjectStart(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+}
+
+void MarkingStateBase::PushMarked(HeapObjectHeader& header,
+ TraceDescriptor desc) {
+ DCHECK(header.IsMarked<AccessMode::kAtomic>());
+ DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
+ DCHECK_NOT_NULL(desc.callback);
+
+ marking_worklist_.Push(desc);
+}
+
+class BasicMarkingState : public MarkingStateBase {
+ public:
+ inline BasicMarkingState(HeapBase& heap, MarkingWorklists&,
+ CompactionWorklists*);
+
+ BasicMarkingState(const BasicMarkingState&) = delete;
+ BasicMarkingState& operator=(const BasicMarkingState&) = delete;
+
inline void RegisterWeakReferenceIfNeeded(const void*, TraceDescriptor,
WeakCallback, const void*);
inline void RegisterWeakCallback(WeakCallback, const void*);
@@ -58,7 +137,7 @@ class MarkingStateBase {
size_t marked_bytes() const { return marked_bytes_; }
void Publish() {
- marking_worklist_.Publish();
+ MarkingStateBase::Publish();
previously_not_fully_constructed_worklist_.Publish();
weak_callback_worklist_.Publish();
write_barrier_worklist_.Publish();
@@ -68,13 +147,6 @@ class MarkingStateBase {
if (IsCompactionEnabled()) movable_slots_worklist_->Publish();
}
- MarkingWorklists::MarkingWorklist::Local& marking_worklist() {
- return marking_worklist_;
- }
- MarkingWorklists::NotFullyConstructedWorklist&
- not_fully_constructed_worklist() {
- return not_fully_constructed_worklist_;
- }
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local&
previously_not_fully_constructed_worklist() {
return previously_not_fully_constructed_worklist_;
@@ -121,21 +193,12 @@ class MarkingStateBase {
void set_in_atomic_pause() { in_atomic_pause_ = true; }
protected:
- inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
-
- inline bool MarkNoPush(HeapObjectHeader&);
-
inline void RegisterWeakContainer(HeapObjectHeader&);
inline bool IsCompactionEnabled() const {
return movable_slots_worklist_.get();
}
- HeapBase& heap_;
-
- MarkingWorklists::MarkingWorklist::Local marking_worklist_;
- MarkingWorklists::NotFullyConstructedWorklist&
- not_fully_constructed_worklist_;
MarkingWorklists::PreviouslyNotFullyConstructedWorklist::Local
previously_not_fully_constructed_worklist_;
MarkingWorklists::WeakCallbackWorklist::Local weak_callback_worklist_;
@@ -160,13 +223,10 @@ class MarkingStateBase {
bool in_atomic_pause_ = false;
};
-MarkingStateBase::MarkingStateBase(HeapBase& heap,
- MarkingWorklists& marking_worklists,
- CompactionWorklists* compaction_worklists)
- : heap_(heap),
- marking_worklist_(marking_worklists.marking_worklist()),
- not_fully_constructed_worklist_(
- *marking_worklists.not_fully_constructed_worklist()),
+BasicMarkingState::BasicMarkingState(HeapBase& heap,
+ MarkingWorklists& marking_worklists,
+ CompactionWorklists* compaction_worklists)
+ : MarkingStateBase(heap, marking_worklists),
previously_not_fully_constructed_worklist_(
marking_worklists.previously_not_fully_constructed_worklist()),
weak_callback_worklist_(marking_worklists.weak_callback_worklist()),
@@ -187,53 +247,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
}
}
-void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
- DCHECK_NOT_NULL(object);
- MarkAndPush(
- HeapObjectHeader::FromObject(const_cast<void*>(desc.base_object_payload)),
- desc);
-}
-
-void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
- TraceDescriptor desc) {
- DCHECK_NOT_NULL(desc.callback);
-
- if (header.IsInConstruction<AccessMode::kAtomic>()) {
- not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
- } else if (MarkNoPush(header)) {
- PushMarked(header, desc);
- }
-}
-
-bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
- // A GC should only mark the objects that belong in its heap.
- DCHECK_EQ(&heap_, &BasePage::FromPayload(&header)->heap());
- // Never mark free space objects. This would e.g. hint to marking a promptly
- // freed backing store.
- DCHECK(!header.IsFree<AccessMode::kAtomic>());
- return header.TryMarkAtomic();
-}
-
-void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
- MarkAndPush(
- header,
- {header.ObjectStart(),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
-}
-
-void MarkingStateBase::PushMarked(HeapObjectHeader& header,
- TraceDescriptor desc) {
- DCHECK(header.IsMarked<AccessMode::kAtomic>());
- DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
- DCHECK_NOT_NULL(desc.callback);
-
- marking_worklist_.Push(desc);
-}
-
-void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
- TraceDescriptor desc,
- WeakCallback weak_callback,
- const void* parameter) {
+void BasicMarkingState::RegisterWeakReferenceIfNeeded(
+ const void* object, TraceDescriptor desc, WeakCallback weak_callback,
+ const void* parameter) {
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
@@ -245,20 +261,20 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
RegisterWeakCallback(weak_callback, parameter);
}
-void MarkingStateBase::RegisterWeakCallback(WeakCallback callback,
- const void* object) {
+void BasicMarkingState::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
DCHECK_NOT_NULL(callback);
weak_callback_worklist_.Push({callback, object});
}
-void MarkingStateBase::RegisterWeakContainer(HeapObjectHeader& header) {
+void BasicMarkingState::RegisterWeakContainer(HeapObjectHeader& header) {
weak_containers_worklist_.Push<AccessMode::kAtomic>(&header);
}
-void MarkingStateBase::ProcessWeakContainer(const void* object,
- TraceDescriptor desc,
- WeakCallback callback,
- const void* data) {
+void BasicMarkingState::ProcessWeakContainer(const void* object,
+ TraceDescriptor desc,
+ WeakCallback callback,
+ const void* data) {
DCHECK_NOT_NULL(object);
HeapObjectHeader& header =
@@ -291,9 +307,9 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
}
}
-void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
- TraceDescriptor value_desc,
- Visitor& visitor) {
+void BasicMarkingState::ProcessEphemeron(const void* key, const void* value,
+ TraceDescriptor value_desc,
+ Visitor& visitor) {
// ProcessEphemeron is not expected to find new ephemerons recursively, which
// would break the main marking loop.
DCHECK(!in_ephemeron_processing_);
@@ -325,7 +341,7 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
in_ephemeron_processing_ = false;
}
-void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
+void BasicMarkingState::AccountMarkedBytes(const HeapObjectHeader& header) {
AccountMarkedBytes(
header.IsLargeObject<AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
@@ -333,18 +349,18 @@ void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
: header.AllocatedSize<AccessMode::kAtomic>());
}
-void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
+void BasicMarkingState::AccountMarkedBytes(size_t marked_bytes) {
marked_bytes_ += marked_bytes;
}
-class MutatorMarkingState : public MarkingStateBase {
+class MutatorMarkingState : public BasicMarkingState {
public:
MutatorMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
- : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+ : BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
inline bool MarkNoPush(HeapObjectHeader& header) {
- return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
+ return MutatorMarkingState::BasicMarkingState::MarkNoPush(header);
}
inline void ReTraceMarkedWeakContainer(cppgc::Visitor&, HeapObjectHeader&);
@@ -440,11 +456,11 @@ void MutatorMarkingState::RecentlyRetracedWeakContainers::Insert(
recently_retraced_cache_[last_used_index_] = header;
}
-class ConcurrentMarkingState : public MarkingStateBase {
+class ConcurrentMarkingState : public BasicMarkingState {
public:
ConcurrentMarkingState(HeapBase& heap, MarkingWorklists& marking_worklists,
CompactionWorklists* compaction_worklists)
- : MarkingStateBase(heap, marking_worklists, compaction_worklists) {}
+ : BasicMarkingState(heap, marking_worklists, compaction_worklists) {}
~ConcurrentMarkingState() { DCHECK_EQ(last_marked_bytes_, marked_bytes_); }
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
index f2dff286cd..1dff652bd0 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -12,7 +12,7 @@ namespace cppgc {
namespace internal {
MarkingVisitorBase::MarkingVisitorBase(HeapBase& heap,
- MarkingStateBase& marking_state)
+ BasicMarkingState& marking_state)
: marking_state_(marking_state) {}
void MarkingVisitorBase::Visit(const void* object, TraceDescriptor desc) {
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
index 4692b32025..302c0d262b 100644
--- a/deps/v8/src/heap/cppgc/marking-visitor.h
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -16,13 +16,13 @@ namespace internal {
class HeapBase;
class HeapObjectHeader;
class Marker;
-class MarkingStateBase;
+class BasicMarkingState;
class MutatorMarkingState;
class ConcurrentMarkingState;
class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
public:
- MarkingVisitorBase(HeapBase&, MarkingStateBase&);
+ MarkingVisitorBase(HeapBase&, BasicMarkingState&);
~MarkingVisitorBase() override = default;
protected:
@@ -35,7 +35,7 @@ class V8_EXPORT_PRIVATE MarkingVisitorBase : public VisitorBase {
void RegisterWeakCallback(WeakCallback, const void*) final;
void HandleMovableReference(const void**) final;
- MarkingStateBase& marking_state_;
+ BasicMarkingState& marking_state_;
};
class V8_EXPORT_PRIVATE MutatorMarkingVisitor : public MarkingVisitorBase {
diff --git a/deps/v8/src/heap/cppgc/page-memory.h b/deps/v8/src/heap/cppgc/page-memory.h
index e5b73318f7..5006b9121b 100644
--- a/deps/v8/src/heap/cppgc/page-memory.h
+++ b/deps/v8/src/heap/cppgc/page-memory.h
@@ -242,7 +242,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns true if the provided allocator supports committing at the required
// granularity.
inline bool SupportsCommittingGuardPages(PageAllocator& allocator) {
- return kGuardPageSize % allocator.CommitPageSize() == 0;
+ return kGuardPageSize != 0 &&
+ kGuardPageSize % allocator.CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
diff --git a/deps/v8/src/heap/cppgc/platform.cc b/deps/v8/src/heap/cppgc/platform.cc
index fd769ae469..ba5d2a18d0 100644
--- a/deps/v8/src/heap/cppgc/platform.cc
+++ b/deps/v8/src/heap/cppgc/platform.cc
@@ -16,7 +16,13 @@
namespace cppgc {
namespace internal {
-void Abort() { v8::base::OS::Abort(); }
+void Fatal(const std::string& reason, const SourceLocation& loc) {
+#ifdef DEBUG
+ V8_Fatal(loc.FileName(), static_cast<int>(loc.Line()), "%s", reason.c_str());
+#else // !DEBUG
+ V8_Fatal("%s", reason.c_str());
+#endif // !DEBUG
+}
void FatalOutOfMemoryHandler::operator()(const std::string& reason,
const SourceLocation& loc) const {
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index 1a4c60e3a2..5cf6435390 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -16,17 +16,14 @@
namespace cppgc {
namespace internal {
-// static
-void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
- PreFinalizer pre_finalizer) {
- BasePage::FromPayload(pre_finalizer.object)
- ->heap()
- .prefinalizer_handler()
- ->RegisterPrefinalizer(pre_finalizer);
+PrefinalizerRegistration::PrefinalizerRegistration(void* object,
+ Callback callback) {
+ auto* page = BasePage::FromPayload(object);
+ DCHECK(!page->space().is_compactable());
+ page->heap().prefinalizer_handler()->RegisterPrefinalizer({object, callback});
}
-bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
- const PreFinalizer& other) const {
+bool PreFinalizer::operator==(const PreFinalizer& other) const {
return (object == other.object) && (callback == other.callback);
}
@@ -36,7 +33,7 @@ PreFinalizerHandler::PreFinalizerHandler(HeapBase& heap)
#ifdef DEBUG
,
creation_thread_id_(v8::base::OS::GetCurrentThreadId())
-#endif
+#endif // DEBUG
{
}
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.h b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
index bc17c99b18..e3850174db 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
#define V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+#include <utility>
#include <vector>
#include "include/cppgc/prefinalizer.h"
@@ -14,11 +15,17 @@ namespace internal {
class HeapBase;
+struct PreFinalizer final {
+ using Callback = PrefinalizerRegistration::Callback;
+
+ void* object;
+ Callback callback;
+
+ bool operator==(const PreFinalizer& other) const;
+};
+
class PreFinalizerHandler final {
public:
- using PreFinalizer =
- cppgc::internal::PreFinalizerRegistrationDispatcher::PreFinalizer;
-
explicit PreFinalizerHandler(HeapBase& heap);
void RegisterPrefinalizer(PreFinalizer pre_finalizer);
diff --git a/deps/v8/src/heap/cppgc/remembered-set.cc b/deps/v8/src/heap/cppgc/remembered-set.cc
new file mode 100644
index 0000000000..8843219745
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/remembered-set.cc
@@ -0,0 +1,135 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/remembered-set.h"
+
+#include <algorithm>
+
+#include "include/cppgc/visitor.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/marking-state.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+// Visit remembered set that was recorded in the generational barrier.
+void VisitRememberedSlots(std::set<void*> slots, const HeapBase& heap,
+ MutatorMarkingState& mutator_marking_state) {
+ for (void* slot : slots) {
+ // Slot must always point to a valid, not freed object.
+ auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
+ ->ObjectHeaderFromInnerAddress(slot);
+ // The age checking in the generational barrier is imprecise, since a card
+ // may have mixed young/old objects. Check here precisely if the object is
+ // old.
+ if (slot_header.IsYoung()) continue;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
+
+ void* value = *reinterpret_cast<void**>(slot);
+ // Slot could be updated to nullptr or kSentinelPointer by the mutator.
+ if (value == kSentinelPointer || value == nullptr) continue;
+
+#if DEBUG
+ // Check that the slot can not point to a freed object.
+ HeapObjectHeader& header =
+ BasePage::FromPayload(value)->ObjectHeaderFromInnerAddress(value);
+ DCHECK(!header.IsFree());
+#endif
+
+ mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+ }
+}
+
+// Visits source objects that were recorded in the generational barrier for
+// slots.
+void VisitRememberedSourceObjects(
+ std::set<HeapObjectHeader*> remembered_source_objects, Visitor& visitor) {
+ for (HeapObjectHeader* source_hoh : remembered_source_objects) {
+ DCHECK(source_hoh);
+ // The age checking in the generational barrier is imprecise, since a card
+ // may have mixed young/old objects. Check here precisely if the object is
+ // old.
+ if (source_hoh->IsYoung()) continue;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!source_hoh->template IsInConstruction<AccessMode::kNonAtomic>());
+
+ const TraceCallback trace_callback =
+ GlobalGCInfoTable::GCInfoFromIndex(source_hoh->GetGCInfoIndex()).trace;
+
+ // Process eagerly to avoid reaccounting.
+ trace_callback(&visitor, source_hoh->ObjectStart());
+ }
+}
+
+} // namespace
+
+void OldToNewRememberedSet::AddSlot(void* slot) {
+ remembered_slots_.insert(slot);
+}
+
+void OldToNewRememberedSet::AddSourceObject(HeapObjectHeader& hoh) {
+ remembered_source_objects_.insert(&hoh);
+}
+
+void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
+ // TODO(1029379): WeakCallbacks are also executed for weak collections.
+ // Consider splitting weak-callbacks in custom weak callbacks and ones for
+ // collections.
+ remembered_weak_callbacks_.insert(item);
+}
+
+void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
+ void* end) {
+ // TODO(1029379): The 2 binary walks can be optimized with a custom algorithm.
+ auto from = remembered_slots_.lower_bound(begin),
+ to = remembered_slots_.lower_bound(end);
+ remembered_slots_.erase(from, to);
+#if defined(ENABLE_SLOW_DCHECKS)
+ // Check that no remembered slots are referring to the freed area.
+ DCHECK(std::none_of(remembered_slots_.begin(), remembered_slots_.end(),
+ [begin, end](void* slot) {
+ void* value = *reinterpret_cast<void**>(slot);
+ return begin <= value && value < end;
+ }));
+#endif // defined(ENABLE_SLOW_DCHECKS)
+}
+
+void OldToNewRememberedSet::InvalidateRememberedSourceObject(
+ HeapObjectHeader& header) {
+ remembered_source_objects_.erase(&header);
+}
+
+void OldToNewRememberedSet::Visit(Visitor& visitor,
+ MutatorMarkingState& marking_state) {
+ VisitRememberedSlots(remembered_slots_, heap_, marking_state);
+ VisitRememberedSourceObjects(remembered_source_objects_, visitor);
+}
+
+void OldToNewRememberedSet::ExecuteCustomCallbacks(LivenessBroker broker) {
+ for (const auto& callback : remembered_weak_callbacks_) {
+ callback.callback(broker, callback.parameter);
+ }
+}
+
+void OldToNewRememberedSet::ReleaseCustomCallbacks() {
+ remembered_weak_callbacks_.clear();
+}
+
+void OldToNewRememberedSet::Reset() {
+ remembered_slots_.clear();
+ remembered_source_objects_.clear();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/remembered-set.h b/deps/v8/src/heap/cppgc/remembered-set.h
new file mode 100644
index 0000000000..eb8de6da8e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/remembered-set.h
@@ -0,0 +1,68 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_REMEMBERED_SET_H_
+#define V8_HEAP_CPPGC_REMEMBERED_SET_H_
+
+#include <set>
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/marking-worklists.h"
+
+namespace cppgc {
+
+class Visitor;
+class LivenessBroker;
+
+namespace internal {
+
+class HeapBase;
+class HeapObjectHeader;
+class MutatorMarkingState;
+
+class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
+ public:
+ using WeakCallbackItem = MarkingWorklists::WeakCallbackItem;
+
+ explicit OldToNewRememberedSet(const HeapBase& heap)
+ : heap_(heap), remembered_weak_callbacks_(compare_parameter) {}
+
+ OldToNewRememberedSet(const OldToNewRememberedSet&) = delete;
+ OldToNewRememberedSet& operator=(const OldToNewRememberedSet&) = delete;
+
+ void AddSlot(void* slot);
+ void AddSourceObject(HeapObjectHeader& source_hoh);
+ void AddWeakCallback(WeakCallbackItem);
+
+ void InvalidateRememberedSlotsInRange(void* begin, void* end);
+ void InvalidateRememberedSourceObject(HeapObjectHeader& source_hoh);
+
+ void Visit(Visitor&, MutatorMarkingState&);
+
+ void ExecuteCustomCallbacks(LivenessBroker);
+ void ReleaseCustomCallbacks();
+
+ void Reset();
+
+ private:
+ friend class MinorGCTest;
+
+ static constexpr struct {
+ bool operator()(const WeakCallbackItem& lhs,
+ const WeakCallbackItem& rhs) const {
+ return lhs.parameter < rhs.parameter;
+ }
+ } compare_parameter{};
+
+ const HeapBase& heap_;
+ std::set<void*> remembered_slots_;
+ std::set<HeapObjectHeader*> remembered_source_objects_;
+ std::set<WeakCallbackItem, decltype(compare_parameter)>
+ remembered_weak_callbacks_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index b063b26f04..0aa12a614a 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -737,8 +737,6 @@ class Sweeper::SweeperImpl final {
if (config.sweeping_type == SweepingConfig::SweepingType::kAtomic) {
Finish();
} else {
- DCHECK_EQ(SweepingConfig::SweepingType::kIncrementalAndConcurrent,
- config.sweeping_type);
ScheduleIncrementalSweeping();
ScheduleConcurrentSweeping();
}
@@ -811,10 +809,25 @@ class Sweeper::SweeperImpl final {
NotifyDone();
}
+ void FinishIfOutOfWork() {
+ if (is_in_progress_ && !is_sweeping_on_mutator_thread_ &&
+ concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
+ !concurrent_sweeper_handle_->IsActive()) {
+ // At this point we know that the concurrent sweeping task has run
+ // out-of-work: all pages are swept. The main thread still needs to finish
+ // sweeping though.
+ DCHECK(std::all_of(space_states_.begin(), space_states_.end(),
+ [](const SpaceState& state) {
+ return state.unswept_pages.IsEmpty();
+ }));
+ FinishIfRunning();
+ }
+ }
+
void Finish() {
DCHECK(is_in_progress_);
- MutatorThreadSweepingScope sweeping_in_progresss(*this);
+ MutatorThreadSweepingScope sweeping_in_progress(*this);
// First, call finalizers on the mutator thread.
SweepFinalizer finalizer(platform_, config_.free_memory_handling);
@@ -953,6 +966,10 @@ class Sweeper::SweeperImpl final {
void ScheduleConcurrentSweeping() {
DCHECK(platform_);
+ if (config_.sweeping_type !=
+ SweepingConfig::SweepingType::kIncrementalAndConcurrent)
+ return;
+
concurrent_sweeper_handle_ =
platform_->PostJob(cppgc::TaskPriority::kUserVisible,
std::make_unique<ConcurrentSweepTask>(
@@ -999,6 +1016,7 @@ void Sweeper::Start(SweepingConfig config) {
impl_->Start(config, heap_.platform());
}
void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
+void Sweeper::FinishIfOutOfWork() { impl_->FinishIfOutOfWork(); }
void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();
}
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index 2254453d7a..845dfbbfc1 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -48,6 +48,7 @@ class V8_EXPORT_PRIVATE Sweeper final {
// Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(SweepingConfig);
void FinishIfRunning();
+ void FinishIfOutOfWork();
void NotifyDoneIfNeeded();
// SweepForAllocationIfRunning sweeps the given |space| until a slot that can
// fit an allocation of size |size| is found. Returns true if a slot was
diff --git a/deps/v8/src/heap/cppgc/testing.cc b/deps/v8/src/heap/cppgc/testing.cc
index 0c81d7003b..38e96abf78 100644
--- a/deps/v8/src/heap/cppgc/testing.cc
+++ b/deps/v8/src/heap/cppgc/testing.cc
@@ -54,5 +54,13 @@ void StandaloneTestingHeap::ForceCompactionForNextGarbageCollection() {
.EnableForNextGCForTesting();
}
+bool IsHeapObjectOld(void* object) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ return internal::HeapObjectHeader::FromObject(object).IsMarked();
+#else
+ return true;
+#endif
+}
+
} // namespace testing
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 007abe3005..c533c353c3 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -129,6 +129,7 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
const AgeTable& age_table,
const void* slot,
uintptr_t value_offset) {
+ DCHECK(slot);
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
@@ -136,8 +137,23 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
+
// Record slot.
- local_data.heap_base.remembered_slots().insert(const_cast<void*>(slot));
+ local_data.heap_base.remembered_set().AddSlot((const_cast<void*>(slot)));
+}
+
+// static
+void WriteBarrier::GenerationalBarrierForSourceObjectSlow(
+ const CagedHeapLocalData& local_data, const void* inner_pointer) {
+ DCHECK(inner_pointer);
+
+ auto& object_header =
+ BasePage::FromInnerAddress(&local_data.heap_base, inner_pointer)
+ ->ObjectHeaderFromInnerAddress(inner_pointer);
+
+ // Record the source object.
+ local_data.heap_base.remembered_set().AddSourceObject(
+ const_cast<HeapObjectHeader&>(object_header));
}
#endif // CPPGC_YOUNG_GENERATION
diff --git a/deps/v8/src/heap/embedder-tracing-inl.h b/deps/v8/src/heap/embedder-tracing-inl.h
new file mode 100644
index 0000000000..9a1c201f41
--- /dev/null
+++ b/deps/v8/src/heap/embedder-tracing-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_HEAP_EMBEDDER_TRACING_INL_H_
+#define V8_HEAP_EMBEDDER_TRACING_INL_H_
+
+#include "src/heap/embedder-tracing.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+bool LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ Isolate* isolate, JSObject js_object,
+ const WrapperDescriptor& wrapper_descriptor, WrapperInfo* info) {
+ DCHECK(js_object.MayHaveEmbedderFields());
+ if (js_object.GetEmbedderFieldCount() < 2) return false;
+
+ return ExtractWrappableInfo(
+ isolate, wrapper_descriptor,
+ EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_type_index),
+ EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_instance_index),
+ info);
+}
+
+// static
+bool LocalEmbedderHeapTracer::ExtractWrappableInfo(
+ Isolate* isolate, const WrapperDescriptor& wrapper_descriptor,
+ const EmbedderDataSlot& type_slot, const EmbedderDataSlot& instance_slot,
+ WrapperInfo* info) {
+ if (type_slot.ToAlignedPointer(isolate, &info->first) && info->first &&
+ instance_slot.ToAlignedPointer(isolate, &info->second) && info->second) {
+ return (wrapper_descriptor.embedder_id_for_garbage_collected ==
+ WrapperDescriptor::kUnknownEmbedderId) ||
+ (*static_cast<uint16_t*>(info->first) ==
+ wrapper_descriptor.embedder_id_for_garbage_collected);
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_EMBEDDER_TRACING_INL_H_
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 08738af3f0..a61b89c5dc 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -6,10 +6,11 @@
#include "include/v8-cppgc.h"
#include "src/base/logging.h"
+#include "src/common/allow-deprecated.h"
#include "src/handles/global-handles.h"
+#include "src/heap/embedder-tracing-inl.h"
#include "src/heap/gc-tracer.h"
-#include "src/objects/embedder-data-slot.h"
-#include "src/objects/js-objects-inl.h"
+#include "src/heap/marking-worklist-inl.h"
namespace v8 {
namespace internal {
@@ -41,13 +42,21 @@ CppHeap::GarbageCollectionFlags ConvertTraceFlags(
}
} // namespace
+void LocalEmbedderHeapTracer::PrepareForTrace(
+ EmbedderHeapTracer::TraceFlags flags) {
+ if (cpp_heap_)
+ cpp_heap()->InitializeTracing(
+ cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
+ ConvertTraceFlags(flags));
+}
+
void LocalEmbedderHeapTracer::TracePrologue(
EmbedderHeapTracer::TraceFlags flags) {
if (!InUse()) return;
embedder_worklist_empty_ = false;
if (cpp_heap_)
- cpp_heap()->TracePrologue(ConvertTraceFlags(flags));
+ cpp_heap()->StartTracing();
else
remote_tracer_->TracePrologue(flags);
}
@@ -104,51 +113,17 @@ bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
: remote_tracer_->IsTracingDone());
}
-void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state) {
- if (!InUse()) return;
-
- embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
- NotifyEmptyEmbedderStack();
-}
-
-namespace {
-
-bool ExtractWrappableInfo(Isolate* isolate, JSObject js_object,
- const WrapperDescriptor& wrapper_descriptor,
- LocalEmbedderHeapTracer::WrapperInfo* info) {
- DCHECK(js_object.IsApiWrapper());
- if (js_object.GetEmbedderFieldCount() < 2) return false;
-
- if (EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_type_index)
- .ToAlignedPointerSafe(isolate, &info->first) &&
- info->first &&
- EmbedderDataSlot(js_object, wrapper_descriptor.wrappable_instance_index)
- .ToAlignedPointerSafe(isolate, &info->second) &&
- info->second) {
- return (wrapper_descriptor.embedder_id_for_garbage_collected ==
- WrapperDescriptor::kUnknownEmbedderId) ||
- (*static_cast<uint16_t*>(info->first) ==
- wrapper_descriptor.embedder_id_for_garbage_collected);
- }
- return false;
-}
-
-} // namespace
-
LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
LocalEmbedderHeapTracer* tracer)
- : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor()) {
+ : tracer_(tracer), wrapper_descriptor_(tracer->wrapper_descriptor_) {
+ DCHECK(!tracer_->cpp_heap_);
wrapper_cache_.reserve(kWrapperCacheSize);
}
LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
+ DCHECK(!tracer_->cpp_heap_);
if (!wrapper_cache_.empty()) {
- if (tracer_->cpp_heap_)
- tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
- else
- tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
}
}
@@ -164,7 +139,7 @@ LocalEmbedderHeapTracer::ExtractWrapperInfo(Isolate* isolate,
void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
JSObject js_object) {
- DCHECK(js_object.IsApiWrapper());
+ DCHECK(js_object.MayHaveEmbedderFields());
WrapperInfo info;
if (ExtractWrappableInfo(tracer_->isolate_, js_object, wrapper_descriptor_,
&info)) {
@@ -174,11 +149,9 @@ void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
}
void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
+ DCHECK(!tracer_->cpp_heap_);
if (wrapper_cache_.size() == wrapper_cache_.capacity()) {
- if (tracer_->cpp_heap_)
- tracer_->cpp_heap()->RegisterV8References(std::move(wrapper_cache_));
- else
- tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
+ tracer_->remote_tracer_->RegisterV8References(std::move(wrapper_cache_));
wrapper_cache_.clear();
wrapper_cache_.reserve(kWrapperCacheSize);
}
@@ -213,15 +186,37 @@ void LocalEmbedderHeapTracer::NotifyEmptyEmbedderStack() {
isolate_->global_handles()->NotifyEmptyEmbedderStack();
}
+void LocalEmbedderHeapTracer::EmbedderWriteBarrier(Heap* heap,
+ JSObject js_object) {
+ DCHECK(InUse());
+ DCHECK(js_object.MayHaveEmbedderFields());
+ if (cpp_heap_) {
+ DCHECK_NOT_NULL(heap->mark_compact_collector());
+ const EmbedderDataSlot type_slot(js_object,
+ wrapper_descriptor_.wrappable_type_index);
+ const EmbedderDataSlot instance_slot(
+ js_object, wrapper_descriptor_.wrappable_instance_index);
+ heap->mark_compact_collector()
+ ->local_marking_worklists()
+ ->cpp_marking_state()
+ ->MarkAndPush(type_slot, instance_slot);
+ return;
+ }
+ LocalEmbedderHeapTracer::ProcessingScope scope(this);
+ scope.TracePossibleWrapper(js_object);
+}
+
bool DefaultEmbedderRootsHandler::IsRoot(
const v8::TracedReference<v8::Value>& handle) {
return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
}
+START_ALLOW_USE_DEPRECATED()
bool DefaultEmbedderRootsHandler::IsRoot(
const v8::TracedGlobal<v8::Value>& handle) {
return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
}
+END_ALLOW_USE_DEPRECATED()
void DefaultEmbedderRootsHandler::ResetRoot(
const v8::TracedReference<v8::Value>& handle) {
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 6b08488aa6..72b1fd90e3 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -5,9 +5,12 @@
#ifndef V8_HEAP_EMBEDDER_TRACING_H_
#define V8_HEAP_EMBEDDER_TRACING_H_
+#include <atomic>
+
#include "include/v8-cppgc.h"
#include "include/v8-embedder-heap.h"
#include "include/v8-traced-handle.h"
+#include "src/common/allow-deprecated.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
@@ -23,7 +26,11 @@ class V8_EXPORT_PRIVATE DefaultEmbedderRootsHandler final
: public EmbedderRootsHandler {
public:
bool IsRoot(const v8::TracedReference<v8::Value>& handle) final;
+
+ START_ALLOW_USE_DEPRECATED()
bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) final;
+ END_ALLOW_USE_DEPRECATED()
+
void ResetRoot(const v8::TracedReference<v8::Value>& handle) final;
void SetTracer(EmbedderHeapTracer* tracer) { tracer_ = tracer; }
@@ -74,6 +81,13 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
WrapperCache wrapper_cache_;
};
+ static V8_INLINE bool ExtractWrappableInfo(Isolate*, JSObject,
+ const WrapperDescriptor&,
+ WrapperInfo*);
+ static V8_INLINE bool ExtractWrappableInfo(
+ Isolate*, const WrapperDescriptor&, const EmbedderDataSlot& type_slot,
+ const EmbedderDataSlot& instance_slot, WrapperInfo*);
+
explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
~LocalEmbedderHeapTracer() {
@@ -91,6 +105,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void SetRemoteTracer(EmbedderHeapTracer* tracer);
void SetCppHeap(CppHeap* cpp_heap);
+ void PrepareForTrace(EmbedderHeapTracer::TraceFlags flags);
void TracePrologue(EmbedderHeapTracer::TraceFlags flags);
void TraceEpilogue();
void EnterFinalPause();
@@ -102,15 +117,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
(IsRemoteTracingDone() && embedder_worklist_empty_);
}
- void SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state);
-
void SetEmbedderWorklistEmpty(bool is_empty) {
embedder_worklist_empty_ = is_empty;
}
void IncreaseAllocatedSize(size_t bytes) {
- remote_stats_.used_size += bytes;
+ remote_stats_.used_size.fetch_add(bytes, std::memory_order_relaxed);
remote_stats_.allocated_size += bytes;
if (remote_stats_.allocated_size >
remote_stats_.allocated_size_limit_for_check) {
@@ -121,13 +133,15 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
void DecreaseAllocatedSize(size_t bytes) {
- DCHECK_GE(remote_stats_.used_size, bytes);
- remote_stats_.used_size -= bytes;
+ DCHECK_GE(remote_stats_.used_size.load(std::memory_order_relaxed), bytes);
+ remote_stats_.used_size.fetch_sub(bytes, std::memory_order_relaxed);
}
void StartIncrementalMarkingIfNeeded();
- size_t used_size() const { return remote_stats_.used_size; }
+ size_t used_size() const {
+ return remote_stats_.used_size.load(std::memory_order_relaxed);
+ }
size_t allocated_size() const { return remote_stats_.allocated_size; }
WrapperInfo ExtractWrapperInfo(Isolate* isolate, JSObject js_object);
@@ -149,6 +163,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
return embedder_stack_state_;
}
+ void EmbedderWriteBarrier(Heap*, JSObject);
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -194,7 +210,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
// Used size of objects in bytes reported by the embedder. Updated via
// TraceSummary at the end of tracing and incrementally when the GC is not
// in progress.
- size_t used_size = 0;
+ std::atomic<size_t> used_size{0};
// Totally bytes allocated by the embedder. Monotonically
// increasing value. Used to approximate allocation rate.
size_t allocated_size = 0;
@@ -211,26 +227,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
friend class EmbedderStackStateScope;
};
-class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final {
- public:
- EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer,
- EmbedderHeapTracer::EmbedderStackState stack_state)
- : local_tracer_(local_tracer),
- old_stack_state_(local_tracer_->embedder_stack_state_) {
- local_tracer_->embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
- local_tracer_->NotifyEmptyEmbedderStack();
- }
-
- ~EmbedderStackStateScope() {
- local_tracer_->embedder_stack_state_ = old_stack_state_;
- }
-
- private:
- LocalEmbedderHeapTracer* const local_tracer_;
- const EmbedderHeapTracer::EmbedderStackState old_stack_state_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/evacuation-allocator-inl.h
index 3d769906a6..1afb240cad 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/evacuation-allocator-inl.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_LOCAL_ALLOCATOR_INL_H_
-#define V8_HEAP_LOCAL_ALLOCATOR_INL_H_
-
-#include "src/heap/local-allocator.h"
+#ifndef V8_HEAP_EVACUATION_ALLOCATOR_INL_H_
+#define V8_HEAP_EVACUATION_ALLOCATOR_INL_H_
+#include "src/common/globals.h"
+#include "src/heap/evacuation-allocator.h"
#include "src/heap/spaces-inl.h"
namespace v8 {
@@ -22,6 +22,9 @@ AllocationResult EvacuationAllocator::Allocate(AllocationSpace space,
case OLD_SPACE:
return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
alignment, origin);
+ case MAP_SPACE:
+ return compaction_spaces_.Get(MAP_SPACE)->AllocateRaw(object_size,
+ alignment, origin);
case CODE_SPACE:
return compaction_spaces_.Get(CODE_SPACE)
->AllocateRaw(object_size, alignment, origin);
@@ -39,6 +42,9 @@ void EvacuationAllocator::FreeLast(AllocationSpace space, HeapObject object,
case OLD_SPACE:
FreeLastInOldSpace(object, object_size);
return;
+ case MAP_SPACE:
+ FreeLastInMapSpace(object, object_size);
+ return;
default:
// Only new and old space supported.
UNREACHABLE();
@@ -64,19 +70,29 @@ void EvacuationAllocator::FreeLastInOldSpace(HeapObject object,
}
}
+void EvacuationAllocator::FreeLastInMapSpace(HeapObject object,
+ int object_size) {
+ if (!compaction_spaces_.Get(MAP_SPACE)->TryFreeLast(object.address(),
+ object_size)) {
+ // We couldn't free the last object so we have to write a proper filler.
+ heap_->CreateFillerObjectAt(object.address(), object_size,
+ ClearRecordedSlots::kNo);
+ }
+}
+
AllocationResult EvacuationAllocator::AllocateInLAB(
int object_size, AllocationAlignment alignment) {
AllocationResult allocation;
if (!new_space_lab_.IsValid() && !NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
+ return AllocationResult::Failure();
}
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- if (allocation.IsRetry()) {
+ if (allocation.IsFailure()) {
if (!NewLocalAllocationBuffer()) {
- return AllocationResult::Retry(OLD_SPACE);
+ return AllocationResult::Failure();
} else {
allocation = new_space_lab_.AllocateRawAligned(object_size, alignment);
- CHECK(!allocation.IsRetry());
+ CHECK(!allocation.IsFailure());
}
}
return allocation;
@@ -86,7 +102,7 @@ bool EvacuationAllocator::NewLocalAllocationBuffer() {
if (lab_allocation_will_fail_) return false;
AllocationResult result =
new_space_->AllocateRawSynchronized(kLabSize, kTaggedAligned);
- if (result.IsRetry()) {
+ if (result.IsFailure()) {
lab_allocation_will_fail_ = true;
return false;
}
@@ -110,4 +126,4 @@ AllocationResult EvacuationAllocator::AllocateInNewSpace(
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_LOCAL_ALLOCATOR_INL_H_
+#endif // V8_HEAP_EVACUATION_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/evacuation-allocator.h
index 2b6841ecd6..6dbeab1b29 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/evacuation-allocator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_LOCAL_ALLOCATOR_H_
-#define V8_HEAP_LOCAL_ALLOCATOR_H_
+#ifndef V8_HEAP_EVACUATION_ALLOCATOR_H_
+#define V8_HEAP_EVACUATION_ALLOCATOR_H_
#include "src/common/globals.h"
#include "src/heap/heap.h"
@@ -35,6 +35,11 @@ class EvacuationAllocator {
heap_->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap_->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE));
+ if (heap_->map_space()) {
+ heap_->map_space()->MergeCompactionSpace(
+ compaction_spaces_.Get(MAP_SPACE));
+ }
+
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
@@ -56,6 +61,7 @@ class EvacuationAllocator {
AllocationAlignment alignment);
inline void FreeLastInNewSpace(HeapObject object, int object_size);
inline void FreeLastInOldSpace(HeapObject object, int object_size);
+ inline void FreeLastInMapSpace(HeapObject object, int object_size);
Heap* const heap_;
NewSpace* const new_space_;
@@ -67,4 +73,4 @@ class EvacuationAllocator {
} // namespace internal
} // namespace v8
-#endif // V8_HEAP_LOCAL_ALLOCATOR_H_
+#endif // V8_HEAP_EVACUATION_ALLOCATOR_H_
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index ff1056ee57..5c31a72186 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -16,6 +16,7 @@
#include "src/heap/read-only-heap.h"
#include "src/logging/local-logger.h"
#include "src/logging/log.h"
+#include "src/objects/instance-type.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/oddball.h"
@@ -46,6 +47,8 @@ template V8_EXPORT_PRIVATE Handle<HeapNumber>
FactoryBase<Factory>::NewHeapNumber<AllocationType::kOld>();
template V8_EXPORT_PRIVATE Handle<HeapNumber>
FactoryBase<Factory>::NewHeapNumber<AllocationType::kReadOnly>();
+template V8_EXPORT_PRIVATE Handle<HeapNumber>
+FactoryBase<Factory>::NewHeapNumber<AllocationType::kSharedOld>();
template V8_EXPORT_PRIVATE Handle<HeapNumber>
FactoryBase<LocalFactory>::NewHeapNumber<AllocationType::kOld>();
@@ -81,10 +84,12 @@ Handle<CodeDataContainer> FactoryBase<Impl>::NewCodeDataContainer(
SKIP_WRITE_BARRIER);
data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- Isolate* isolate_for_heap_sandbox = impl()->isolate_for_heap_sandbox();
- data_container.AllocateExternalPointerEntries(isolate_for_heap_sandbox);
+ data_container.set_code_cage_base(impl()->isolate()->code_cage_base(),
+ kRelaxedStore);
+ Isolate* isolate_for_sandbox = impl()->isolate_for_sandbox();
+ data_container.AllocateExternalPointerEntries(isolate_for_sandbox);
data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
- data_container.set_code_entry_point(isolate_for_heap_sandbox, kNullAddress);
+ data_container.set_code_entry_point(isolate_for_sandbox, kNullAddress);
}
data_container.clear_padding();
return handle(data_container, isolate());
@@ -249,9 +254,6 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
DCHECK(source->IsString() || source->IsUndefined());
// Create and initialize script object.
ReadOnlyRoots roots = read_only_roots();
-#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME
- Handle<ArrayList> list = NewArrayList(0);
-#endif
Handle<Script> script = handle(
NewStructInternal<Script>(SCRIPT_TYPE, AllocationType::kOld), isolate());
{
@@ -273,7 +275,7 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
raw.set_flags(0);
raw.set_host_defined_options(roots.empty_fixed_array(), SKIP_WRITE_BARRIER);
#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME
- raw.set_script_or_modules(*list);
+ raw.set_script_or_modules(roots.empty_array_list());
#endif
}
@@ -286,12 +288,18 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
}
template <typename Impl>
-Handle<ArrayList> FactoryBase<Impl>::NewArrayList(int size) {
- Handle<FixedArray> fixed_array = NewFixedArray(size + ArrayList::kFirstIndex);
- fixed_array->set_map_no_write_barrier(read_only_roots().array_list_map());
- Handle<ArrayList> result = Handle<ArrayList>::cast(fixed_array);
- result->SetLength(0);
- return result;
+Handle<ArrayList> FactoryBase<Impl>::NewArrayList(int size,
+ AllocationType allocation) {
+ if (size == 0) return impl()->empty_array_list();
+ Handle<FixedArray> fixed_array =
+ NewFixedArray(size + ArrayList::kFirstIndex, allocation);
+ {
+ DisallowGarbageCollection no_gc;
+ FixedArray raw = *fixed_array;
+ raw.set_map_no_write_barrier(read_only_roots().array_list_map());
+ ArrayList::cast(raw).SetLength(0);
+ }
+ return Handle<ArrayList>::cast(fixed_array);
}
template <typename Impl>
diff --git a/deps/v8/src/heap/factory-base.h b/deps/v8/src/heap/factory-base.h
index c3aa816d0b..2a8eae50c9 100644
--- a/deps/v8/src/heap/factory-base.h
+++ b/deps/v8/src/heap/factory-base.h
@@ -157,7 +157,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<Script> NewScriptWithId(Handle<PrimitiveHeapObject> source,
int script_id);
- Handle<ArrayList> NewArrayList(int size);
+ Handle<ArrayList> NewArrayList(
+ int size, AllocationType allocation = AllocationType::kYoung);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 696f5355e5..c022f12450 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -76,6 +76,10 @@ ReadOnlyRoots Factory::read_only_roots() const {
return ReadOnlyRoots(isolate());
}
+HeapAllocator* Factory::allocator() const {
+ return isolate()->heap()->allocator();
+}
+
Factory::CodeBuilder& Factory::CodeBuilder::set_interpreter_data(
Handle<HeapObject> interpreter_data) {
// This DCHECK requires this function to be in -inl.h.
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 9e05c52472..d41521cdba 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -21,6 +21,7 @@
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/heap-allocator-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
@@ -38,6 +39,7 @@
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
+#include "src/objects/call-site-info-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/embedder-data-array-inl.h"
@@ -59,7 +61,6 @@
#include "src/objects/promise-inl.h"
#include "src/objects/property-descriptor-object-inl.h"
#include "src/objects/scope-info.h"
-#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/string-set-inl.h"
#include "src/objects/struct-inl.h"
#include "src/objects/synthetic-module-inl.h"
@@ -127,6 +128,9 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
}
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ data_container->initialize_flags(kind_, builtin_);
+ }
data_container->set_kind_specific_flags(kind_specific_flags_,
kRelaxedStore);
}
@@ -227,7 +231,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
raw_code.clear_padding();
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- raw_code.set_main_cage_base(isolate_->cage_base());
+ raw_code.set_main_cage_base(isolate_->cage_base(), kRelaxedStore);
data_container->SetCodeAndEntryPoint(isolate_, raw_code);
}
#ifdef VERIFY_HEAP
@@ -264,16 +268,17 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
bool retry_allocation_or_fail) {
Heap* heap = isolate_->heap();
+ HeapAllocator* allocator = heap->allocator();
HeapObject result;
AllocationType allocation_type = V8_EXTERNAL_CODE_SPACE_BOOL || is_executable_
? AllocationType::kCode
: AllocationType::kReadOnly;
const int object_size = Code::SizeFor(code_desc_.body_size());
if (retry_allocation_or_fail) {
- result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ result = allocator->AllocateRawWith<HeapAllocator::kRetryOrFail>(
object_size, allocation_type, AllocationOrigin::kRuntime);
} else {
- result = heap->AllocateRawWith<Heap::kLightRetry>(
+ result = allocator->AllocateRawWith<HeapAllocator::kLightRetry>(
object_size, allocation_type, AllocationOrigin::kRuntime);
// Return an empty handle if we cannot allocate the code object.
if (result.is_null()) return MaybeHandle<Code>();
@@ -327,7 +332,7 @@ Handle<Code> Factory::CodeBuilder::Build() {
HeapObject Factory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) {
- return isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ return allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
size, allocation, AllocationOrigin::kRuntime, alignment);
}
@@ -340,8 +345,8 @@ HeapObject Factory::AllocateRawWithAllocationSite(
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
size += AllocationMemento::kSize;
}
- HeapObject result =
- isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ size, allocation);
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
@@ -368,8 +373,8 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
- HeapObject result =
- isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ size, allocation);
// New space objects are allocated white.
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
@@ -383,7 +388,7 @@ Handle<HeapObject> Factory::NewFillerObject(int size,
AllocationType allocation,
AllocationOrigin origin) {
Heap* heap = isolate()->heap();
- HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
size, allocation, origin, alignment);
heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
@@ -441,10 +446,11 @@ Handle<Oddball> Factory::NewBasicBlockCountersMarker() {
Oddball::kBasicBlockCountersMarker);
}
-Handle<PropertyArray> Factory::NewPropertyArray(int length) {
+Handle<PropertyArray> Factory::NewPropertyArray(int length,
+ AllocationType allocation) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
- HeapObject result = AllocateRawFixedArray(length, AllocationType::kYoung);
+ HeapObject result = AllocateRawFixedArray(length, allocation);
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
PropertyArray array = PropertyArray::cast(result);
@@ -520,13 +526,9 @@ Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(int length) {
array.set_length(length);
if (length > 0) {
- ObjectSlot start(array.slots_start());
- ObjectSlot end(array.slots_end());
- size_t slot_count = end - start;
- MemsetTagged(start, *undefined_value(), slot_count);
for (int i = 0; i < length; i++) {
- // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot
- EmbedderDataSlot(array, i).AllocateExternalPointerEntry(isolate());
+ // TODO(v8): consider initializing embedded data array with Smi::zero().
+ EmbedderDataSlot(array, i).Initialize(*undefined_value());
}
}
return handle(array, isolate());
@@ -846,9 +848,8 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
String result = String::cast(AllocateRawWithImmortalMap(
size,
RefineAllocationTypeForInPlaceInternalizableString(
- isolate()->heap()->CanAllocateInReadOnlySpace()
- ? AllocationType::kReadOnly
- : AllocationType::kOld,
+ CanAllocateInReadOnlySpace() ? AllocationType::kReadOnly
+ : AllocationType::kOld,
map),
map));
DisallowGarbageCollection no_gc;
@@ -873,10 +874,6 @@ Handle<String> Factory::NewInternalizedStringImpl(Handle<String> string,
return AllocateInternalizedStringImpl<false>(string, chars, hash_field);
}
-namespace {
-
-} // namespace
-
StringTransitionStrategy Factory::ComputeInternalizationStrategyForString(
Handle<String> string, MaybeHandle<Map>* internalized_map) {
// Do not internalize young strings in-place: This allows us to ignore both
@@ -1064,6 +1061,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
external_string.set_length(static_cast<int>(length));
external_string.set_raw_hash_field(String::kEmptyHashField);
external_string.SetResource(isolate(), resource);
+
isolate()->heap()->RegisterExternalString(external_string);
return Handle<String>(external_string, isolate());
@@ -1086,7 +1084,9 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
string.set_length(static_cast<int>(length));
string.set_raw_hash_field(String::kEmptyHashField);
string.SetResource(isolate(), resource);
+
isolate()->heap()->RegisterExternalString(string);
+
return Handle<ExternalTwoByteString>(string, isolate());
}
@@ -1113,9 +1113,9 @@ Symbol Factory::NewSymbolInternal(AllocationType allocation) {
Symbol::kSize, allocation, read_only_roots().symbol_map()));
DisallowGarbageCollection no_gc;
// Generate a random hash value.
- int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
- symbol.set_raw_hash_field(Name::kIsNotIntegerIndexMask |
- (hash << Name::kHashShift));
+ int hash = isolate()->GenerateIdentityHash(Name::HashBits::kMax);
+ symbol.set_raw_hash_field(
+ Name::CreateHashFieldValue(hash, Name::HashFieldType::kHash));
symbol.set_description(read_only_roots().undefined_value(),
SKIP_WRITE_BARRIER);
symbol.set_flags(0);
@@ -1151,8 +1151,8 @@ Context Factory::NewContextInternal(Handle<Map> map, int size,
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
DCHECK_LE(Context::SizeFor(variadic_part_length), size);
- HeapObject result =
- isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ size, allocation);
result.set_map_after_allocation(*map);
DisallowGarbageCollection no_gc;
Context context = Context::cast(result);
@@ -1208,7 +1208,9 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
Handle<ScriptContextTable> context_table = Handle<ScriptContextTable>::cast(
NewFixedArrayWithMap(read_only_roots().script_context_table_map_handle(),
ScriptContextTable::kMinLength));
+ Handle<NameToIndexHashTable> names = NameToIndexHashTable::New(isolate(), 16);
context_table->set_used(0, kReleaseStore);
+ context_table->set_names_to_context_index(*names);
return context_table;
}
@@ -1364,6 +1366,19 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
return handle(info, isolate());
}
+Handle<ErrorStackData> Factory::NewErrorStackData(
+ Handle<Object> call_site_infos_or_formatted_stack,
+ Handle<Object> limit_or_stack_frame_infos) {
+ ErrorStackData error_stack_data = NewStructInternal<ErrorStackData>(
+ ERROR_STACK_DATA_TYPE, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ error_stack_data.set_call_site_infos_or_formatted_stack(
+ *call_site_infos_or_formatted_stack, SKIP_WRITE_BARRIER);
+ error_stack_data.set_limit_or_stack_frame_infos(*limit_or_stack_frame_infos,
+ SKIP_WRITE_BARRIER);
+ return handle(error_stack_data, isolate());
+}
+
void Factory::AddToScriptList(Handle<Script> script) {
Handle<WeakArrayList> scripts = script_list();
scripts = WeakArrayList::Append(isolate(), scripts,
@@ -1471,7 +1486,6 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
// The supertypes list is constant after initialization, so we pretenure
// that too. The subtypes list, however, is expected to grow (and hence be
// replaced), so we don't pretenure it.
- Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
Handle<FixedArray> supertypes;
if (opt_parent.is_null()) {
supertypes = NewFixedArray(wasm::kMinimumSupertypeArraySize);
@@ -1500,14 +1514,14 @@ Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(
result.AllocateExternalPointerEntries(isolate());
result.set_foreign_address(isolate(), type_address);
result.set_supertypes(*supertypes);
- result.set_subtypes(*subtypes);
+ result.set_subtypes(ReadOnlyRoots(isolate()).empty_array_list());
result.set_instance_size(instance_size_bytes);
result.set_instance(*instance);
return handle(result, isolate());
}
Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
- Handle<JSReceiver> callable) {
+ Handle<JSReceiver> callable, Handle<HeapObject> suspender) {
Map map = *wasm_api_function_ref_map();
auto result = WasmApiFunctionRef::cast(AllocateRawWithImmortalMap(
map.instance_size(), AllocationType::kOld, map));
@@ -1519,6 +1533,11 @@ Handle<WasmApiFunctionRef> Factory::NewWasmApiFunctionRef(
} else {
result.set_callable(*undefined_value());
}
+ if (!suspender.is_null()) {
+ result.set_suspender(*suspender);
+ } else {
+ result.set_suspender(*undefined_value());
+ }
return handle(result, isolate());
}
@@ -1532,7 +1551,7 @@ Handle<WasmInternalFunction> Factory::NewWasmInternalFunction(
result.set_foreign_address(isolate(), opt_call_target);
result.set_ref(*ref);
// Default values, will be overwritten by the caller.
- result.set_code(isolate()->builtins()->code(Builtin::kAbort));
+ result.set_code(*BUILTIN_CODE(isolate(), Abort));
result.set_external(*undefined_value());
return handle(result, isolate());
}
@@ -1540,8 +1559,8 @@ Handle<WasmInternalFunction> Factory::NewWasmInternalFunction(
Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code, Handle<Map> rtt) {
- Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(callable);
+ Handle<CodeT> wrapper_code, Handle<Map> rtt, Handle<HeapObject> suspender) {
+ Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(callable, suspender);
Handle<WasmInternalFunction> internal =
NewWasmInternalFunction(opt_call_target, ref, rtt);
Map map = *wasm_js_function_data_map();
@@ -1557,8 +1576,19 @@ Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
return handle(result, isolate());
}
+Handle<WasmOnFulfilledData> Factory::NewWasmOnFulfilledData(
+ Handle<WasmSuspenderObject> suspender) {
+ Map map = *wasm_onfulfilled_data_map();
+ WasmOnFulfilledData result =
+ WasmOnFulfilledData::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kOld, map));
+ DisallowGarbageCollection no_gc;
+ result.set_suspender(*suspender);
+ return handle(result, isolate());
+}
+
Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
- Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Handle<CodeT> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
Address sig_address, int wrapper_budget, Handle<Map> rtt) {
Handle<Foreign> sig_foreign = NewForeign(sig_address);
@@ -1576,17 +1606,23 @@ Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
result.set_function_index(func_index);
result.set_signature(*sig_foreign);
result.set_wrapper_budget(wrapper_budget);
- result.set_c_wrapper_code(ToCodeT(*BUILTIN_CODE(isolate(), Illegal)),
- SKIP_WRITE_BARRIER);
+ // We can't skip the write barrier when V8_EXTERNAL_CODE_SPACE is enabled
+ // because in this case the CodeT (CodeDataContainer) objects are not
+ // immovable.
+ result.set_c_wrapper_code(
+ *BUILTIN_CODE(isolate(), Illegal),
+ V8_EXTERNAL_CODE_SPACE_BOOL ? UPDATE_WRITE_BARRIER : SKIP_WRITE_BARRIER);
result.set_packed_args_size(0);
+ result.set_suspender(*undefined_value());
return handle(result, isolate());
}
Handle<WasmCapiFunctionData> Factory::NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code, Handle<Map> rtt,
+ Handle<CodeT> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig) {
- Handle<WasmApiFunctionRef> ref = NewWasmApiFunctionRef(Handle<JSReceiver>());
+ Handle<WasmApiFunctionRef> ref =
+ NewWasmApiFunctionRef(Handle<JSReceiver>(), Handle<HeapObject>());
Handle<WasmInternalFunction> internal =
NewWasmInternalFunction(call_target, ref, rtt);
Map map = *wasm_capi_function_data_map();
@@ -1601,12 +1637,13 @@ Handle<WasmCapiFunctionData> Factory::NewWasmCapiFunctionData(
return handle(result, isolate());
}
-Handle<WasmArray> Factory::NewWasmArray(
+Handle<WasmArray> Factory::NewWasmArrayFromElements(
const wasm::ArrayType* type, const std::vector<wasm::WasmValue>& elements,
Handle<Map> map) {
uint32_t length = static_cast<uint32_t>(elements.size());
HeapObject raw =
AllocateRaw(WasmArray::SizeFor(*map, length), AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
raw.set_map_after_allocation(*map);
WasmArray result = WasmArray::cast(raw);
result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
@@ -1627,6 +1664,27 @@ Handle<WasmArray> Factory::NewWasmArray(
return handle(result, isolate());
}
+Handle<WasmArray> Factory::NewWasmArrayFromMemory(uint32_t length,
+ Handle<Map> map,
+ Address source) {
+ wasm::ValueType element_type = reinterpret_cast<wasm::ArrayType*>(
+ map->wasm_type_info().foreign_address())
+ ->element_type();
+ DCHECK(element_type.is_numeric());
+ HeapObject raw =
+ AllocateRaw(WasmArray::SizeFor(*map, length), AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ raw.set_map_after_allocation(*map);
+ WasmArray result = WasmArray::cast(raw);
+ result.set_raw_properties_or_hash(*empty_fixed_array(), kRelaxedStore);
+ result.set_length(length);
+ MemCopy(reinterpret_cast<void*>(result.ElementAddress(0)),
+ reinterpret_cast<void*>(source),
+ length * element_type.element_size_bytes());
+
+ return handle(result, isolate());
+}
+
Handle<WasmStruct> Factory::NewWasmStruct(const wasm::StructType* type,
wasm::WasmValue* args,
Handle<Map> map) {
@@ -1659,6 +1717,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmJSFunction(
return NewSharedFunctionInfo(name, data, Builtin::kNoBuiltinId);
}
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmOnFulfilled(
+ Handle<WasmOnFulfilledData> data) {
+ return NewSharedFunctionInfo({}, data, Builtin::kNoBuiltinId);
+}
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
Handle<WasmCapiFunctionData> data) {
return NewSharedFunctionInfo(MaybeHandle<String>(), data,
@@ -1782,21 +1845,34 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
IsTerminalElementsKind(elements_kind));
DCHECK(allocation_type == AllocationType::kMap ||
allocation_type == AllocationType::kSharedMap);
- HeapObject result = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
+ HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
Map::kSize, allocation_type);
DisallowGarbageCollection no_gc;
- result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
+ Heap* roots = allocation_type == AllocationType::kMap
+ ? isolate()->heap()
+ : isolate()->shared_isolate()->heap();
+ result.set_map_after_allocation(ReadOnlyRoots(roots).meta_map(),
+ SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
- elements_kind, inobject_properties),
+ elements_kind, inobject_properties, roots),
isolate());
}
Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
- ElementsKind elements_kind,
- int inobject_properties) {
+ ElementsKind elements_kind, int inobject_properties,
+ Heap* roots) {
DisallowGarbageCollection no_gc;
+ map.set_bit_field(0);
+ map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
+ int bit_field3 =
+ Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+ Map::Bits3::OwnsDescriptorsBit::encode(true) |
+ Map::Bits3::ConstructionCounterBits::encode(Map::kNoSlackTracking) |
+ Map::Bits3::IsExtensibleBit::encode(true);
+ map.set_bit_field3(bit_field3);
map.set_instance_type(type);
- HeapObject raw_null_value = *null_value();
+ ReadOnlyRoots ro_roots(roots);
+ HeapObject raw_null_value = ro_roots.null_value();
map.set_prototype(raw_null_value, SKIP_WRITE_BARRIER);
map.set_constructor_or_back_pointer(raw_null_value, SKIP_WRITE_BARRIER);
map.set_instance_size(instance_size);
@@ -1805,30 +1881,21 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.SetInObjectPropertiesStartInWords(instance_size / kTaggedSize -
inobject_properties);
DCHECK_EQ(map.GetInObjectProperties(), inobject_properties);
- map.set_prototype_validity_cell(*invalid_prototype_validity_cell());
+ map.set_prototype_validity_cell(roots->invalid_prototype_validity_cell());
} else {
DCHECK_EQ(inobject_properties, 0);
map.set_inobject_properties_start_or_constructor_function_index(0);
map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid),
SKIP_WRITE_BARRIER);
}
- map.set_dependent_code(
- DependentCode::empty_dependent_code(ReadOnlyRoots(isolate())),
- SKIP_WRITE_BARRIER);
+ map.set_dependent_code(DependentCode::empty_dependent_code(ro_roots),
+ SKIP_WRITE_BARRIER);
map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()),
SKIP_WRITE_BARRIER);
map.SetInObjectUnusedPropertyFields(inobject_properties);
- map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
+ map.SetInstanceDescriptors(isolate(), ro_roots.empty_descriptor_array(), 0);
// Must be called only after |instance_type| and |instance_size| are set.
map.set_visitor_id(Map::GetVisitorId(map));
- map.set_bit_field(0);
- map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
- int bit_field3 =
- Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::Bits3::OwnsDescriptorsBit::encode(true) |
- Map::Bits3::ConstructionCounterBits::encode(Map::kNoSlackTracking) |
- Map::Bits3::IsExtensibleBit::encode(true);
- map.set_bit_field3(bit_field3);
DCHECK(!map.is_in_retained_map_list());
map.clear_padding();
map.set_elements_kind(elements_kind);
@@ -1871,8 +1938,9 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
adjusted_object_size += AllocationMemento::kSize;
}
- HeapObject raw_clone = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
- adjusted_object_size, AllocationType::kYoung);
+ HeapObject raw_clone =
+ allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ adjusted_object_size, AllocationType::kYoung);
DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
@@ -1942,10 +2010,9 @@ void initialize_length<PropertyArray>(PropertyArray array, int length) {
array.initialize_length(length);
}
-inline void ZeroEmbedderFields(i::JSObject obj) {
- int count = obj.GetEmbedderFieldCount();
- for (int i = 0; i < count; i++) {
- obj.SetEmbedderField(i, Smi::zero());
+inline void InitEmbedderFields(i::JSObject obj, i::Object initial_value) {
+ for (int i = 0; i < obj.GetEmbedderFieldCount(); i++) {
+ EmbedderDataSlot(obj, i).Initialize(initial_value);
}
}
@@ -2109,7 +2176,7 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
}
Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
- return isolate()->heap()->CanAllocateInReadOnlySpace()
+ return CanAllocateInReadOnlySpace()
? NewHeapNumber<AllocationType::kReadOnly>(value)
: NewHeapNumber<AllocationType::kOld>(value);
}
@@ -2136,7 +2203,7 @@ Handle<JSObject> Factory::NewError(Handle<JSFunction> constructor,
Handle<Object> no_caller;
return ErrorUtils::Construct(isolate(), constructor, constructor, message,
undefined_value(), SKIP_NONE, no_caller,
- ErrorUtils::StackTraceCollection::kDetailed)
+ ErrorUtils::StackTraceCollection::kEnabled)
.ToHandleChecked();
}
@@ -2205,9 +2272,10 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
}
Handle<JSObject> Factory::NewExternal(void* value) {
- Handle<Foreign> foreign = NewForeign(reinterpret_cast<Address>(value));
- Handle<JSObject> external = NewJSObjectFromMap(external_map());
- external->SetEmbedderField(0, *foreign);
+ auto external =
+ Handle<JSExternalObject>::cast(NewJSObjectFromMap(external_map()));
+ external->AllocateExternalPointerEntries(isolate());
+ external->set_value(isolate(), value);
return external;
}
@@ -2246,10 +2314,9 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Code raw_result = *result;
const bool set_is_off_heap_trampoline = true;
- const int stack_slots =
- raw_code.has_safepoint_info() ? raw_code.stack_slots() : 0;
raw_result.initialize_flags(raw_code.kind(), raw_code.is_turbofanned(),
- stack_slots, set_is_off_heap_trampoline);
+ raw_code.stack_slots(),
+ set_is_off_heap_trampoline);
raw_result.set_builtin_id(raw_code.builtin_id());
raw_result.set_handler_table_offset(raw_code.handler_table_offset());
raw_result.set_constant_pool_offset(raw_code.constant_pool_offset());
@@ -2273,10 +2340,14 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
#endif
raw_result.set_relocation_info(canonical_reloc_info);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ CodeDataContainer code_data_container =
+ raw_result.code_data_container(kAcquireLoad);
// Updating flags (in particular is_off_heap_trampoline one) might change
// the value of the instruction start, so update it here.
- raw_result.code_data_container(kAcquireLoad)
- .UpdateCodeEntryPoint(isolate(), raw_result);
+ code_data_container.UpdateCodeEntryPoint(isolate(), raw_result);
+ // Also update flag values cached on the code data container.
+ code_data_container.initialize_flags(raw_code.kind(),
+ raw_code.builtin_id());
}
}
@@ -2293,8 +2364,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
{
int obj_size = code->Size();
CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject result = heap->AllocateRawWith<Heap::kRetryOrFail>(
- obj_size, AllocationType::kCode, AllocationOrigin::kRuntime);
+ HeapObject result =
+ allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
+ obj_size, AllocationType::kCode, AllocationOrigin::kRuntime);
// Copy code object.
Address old_addr = code->address();
@@ -2315,6 +2387,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
#endif
}
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ data_container->initialize_flags(code->kind(), code->builtin_id());
data_container->SetCodeAndEntryPoint(isolate(), *new_code);
}
@@ -2649,6 +2722,27 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
return module_namespace;
}
+Handle<JSWrappedFunction> Factory::NewJSWrappedFunction(
+ Handle<NativeContext> creation_context, Handle<Object> target) {
+ DCHECK(target->IsCallable());
+ Handle<Map> map(
+ Map::cast(creation_context->get(Context::WRAPPED_FUNCTION_MAP_INDEX)),
+ isolate());
+ // 2. Let wrapped be ! MakeBasicObject(internalSlotsList).
+ // 3. Set wrapped.[[Prototype]] to
+ // callerRealm.[[Intrinsics]].[[%Function.prototype%]].
+ // 4. Set wrapped.[[Call]] as described in 2.1.
+ Handle<JSWrappedFunction> wrapped = Handle<JSWrappedFunction>::cast(
+ isolate()->factory()->NewJSObjectFromMap(map));
+ // 5. Set wrapped.[[WrappedTargetFunction]] to Target.
+ wrapped->set_wrapped_target_function(JSReceiver::cast(*target));
+ // 6. Set wrapped.[[Realm]] to callerRealm.
+ wrapped->set_context(*creation_context);
+ // TODO(v8:11989): https://github.com/tc39/proposal-shadowrealm/pull/348
+
+ return wrapped;
+}
+
Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
Handle<JSFunction> function) {
DCHECK(IsResumableFunction(function->shared().kind()));
@@ -2675,7 +2769,6 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
Handle<FixedArray> requested_modules =
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
- Handle<ArrayList> async_parent_modules = ArrayList::New(isolate(), 0);
ReadOnlyRoots roots(isolate());
SourceTextModule module = SourceTextModule::cast(
@@ -2699,7 +2792,7 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module.set_async(IsAsyncModule(sfi->kind()));
module.set_async_evaluating_ordinal(SourceTextModule::kNotAsyncEvaluated);
module.set_cycle_root(roots.the_hole_value(), SKIP_WRITE_BARRIER);
- module.set_async_parent_modules(*async_parent_modules);
+ module.set_async_parent_modules(roots.empty_array_list());
module.set_pending_async_dependencies(0);
return handle(module, isolate());
}
@@ -2867,7 +2960,8 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
raw.set_byte_offset(byte_offset);
raw.set_byte_length(byte_length);
raw.set_bit_field(0);
- ZeroEmbedderFields(raw);
+ // TODO(v8) remove once embedder data slots are always zero-initialized.
+ InitEmbedderFields(raw, Smi::zero());
DCHECK_EQ(raw.GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
return array_buffer_view;
@@ -3171,6 +3265,7 @@ Handle<Object> Factory::NumberToStringCacheGet(Object number, int hash) {
Handle<String> Factory::NumberToString(Handle<Object> number,
NumberCacheMode mode) {
+ SLOW_DCHECK(number->IsNumber());
if (number->IsSmi()) return SmiToString(Smi::cast(*number), mode);
double double_value = Handle<HeapNumber>::cast(number)->value();
@@ -3333,12 +3428,12 @@ Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
return handle(new_break_point, isolate());
}
-Handle<StackFrameInfo> Factory::NewStackFrameInfo(
+Handle<CallSiteInfo> Factory::NewCallSiteInfo(
Handle<Object> receiver_or_instance, Handle<Object> function,
Handle<HeapObject> code_object, int code_offset_or_source_position,
int flags, Handle<FixedArray> parameters) {
- auto info = NewStructInternal<StackFrameInfo>(STACK_FRAME_INFO_TYPE,
- AllocationType::kYoung);
+ auto info = NewStructInternal<CallSiteInfo>(CALL_SITE_INFO_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
info.set_receiver_or_instance(*receiver_or_instance, SKIP_WRITE_BARRIER);
info.set_function(*function, SKIP_WRITE_BARRIER);
@@ -3349,6 +3444,22 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
return handle(info, isolate());
}
+Handle<StackFrameInfo> Factory::NewStackFrameInfo(
+ Handle<HeapObject> shared_or_script, int bytecode_offset_or_source_position,
+ Handle<String> function_name, bool is_constructor) {
+ DCHECK_GE(bytecode_offset_or_source_position, 0);
+ StackFrameInfo info = NewStructInternal<StackFrameInfo>(
+ STACK_FRAME_INFO_TYPE, AllocationType::kYoung);
+ DisallowGarbageCollection no_gc;
+ info.set_flags(0);
+ info.set_shared_or_script(*shared_or_script, SKIP_WRITE_BARRIER);
+ info.set_bytecode_offset_or_source_position(
+ bytecode_offset_or_source_position);
+ info.set_function_name(*function_name, SKIP_WRITE_BARRIER);
+ info.set_is_constructor(is_constructor);
+ return handle(info, isolate());
+}
+
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
bool strict_mode_callee = is_strict(callee->shared().language_mode()) ||
@@ -3591,14 +3702,16 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
int field_index = 0;
- STATIC_ASSERT(JSFunctionOrBoundFunction::kLengthDescriptorIndex == 0);
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kLengthDescriptorIndex == 0);
{ // Add length accessor.
Descriptor d = Descriptor::AccessorConstant(
length_string(), function_length_accessor(), roc_attribs);
map->AppendDescriptor(isolate(), &d);
}
- STATIC_ASSERT(JSFunctionOrBoundFunction::kNameDescriptorIndex == 1);
+ STATIC_ASSERT(
+ JSFunctionOrBoundFunctionOrWrappedFunction::kNameDescriptorIndex == 1);
if (IsFunctionModeWithName(function_mode)) {
// Add name field.
Handle<Name> name = isolate()->factory()->name_string();
@@ -3763,7 +3876,8 @@ Handle<JSPromise> Factory::NewJSPromiseWithoutHook() {
JSPromise raw = *promise;
raw.set_reactions_or_result(Smi::zero(), SKIP_WRITE_BARRIER);
raw.set_flags(0);
- ZeroEmbedderFields(*promise);
+ // TODO(v8) remove once embedder data slots are always zero-initialized.
+ InitEmbedderFields(*promise, Smi::zero());
DCHECK_EQ(raw.GetEmbedderFieldCount(), v8::Promise::kEmbedderFieldCount);
return promise;
}
@@ -3789,7 +3903,7 @@ Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
}
bool Factory::CanAllocateInReadOnlySpace() {
- return isolate()->heap()->CanAllocateInReadOnlySpace();
+ return allocator()->CanAllocateInReadOnlySpace();
}
bool Factory::EmptyStringRootIsInitialized() {
@@ -3824,7 +3938,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
if (code->kind() == CodeKind::BASELINE) {
IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
- JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
+ JSFunction::EnsureFeedbackVector(isolate_, result, &is_compiled_scope);
}
Compiler::PostInstantiation(result);
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index a5dd9ce5a9..0387482010 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -36,6 +36,7 @@ class BreakPointInfo;
class CallableTask;
class CallbackTask;
class CallHandlerInfo;
+class CallSiteInfo;
class Expression;
class EmbedderDataArray;
class ArrayBoilerplateDescription;
@@ -110,11 +111,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
public:
inline ReadOnlyRoots read_only_roots() const;
- template <typename T>
- Handle<T> MakeHandle(T obj) {
- return handle(obj, isolate());
- }
-
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -127,7 +123,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Oddball> NewBasicBlockCountersMarker();
// Allocates a property array initialized with undefined values.
- Handle<PropertyArray> NewPropertyArray(int length);
+ Handle<PropertyArray> NewPropertyArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
// Tries allocating a fixed array initialized with undefined values.
// In case of an allocation failure (OOM) an empty handle is returned.
// The caller has to manually signal an
@@ -390,17 +387,25 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<AccessorInfo> NewAccessorInfo();
+ Handle<ErrorStackData> NewErrorStackData(
+ Handle<Object> call_site_infos_or_formatted_stack,
+ Handle<Object> limit_or_stack_frame_infos);
+
Handle<Script> CloneScript(Handle<Script> script);
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
- Handle<StackFrameInfo> NewStackFrameInfo(Handle<Object> receiver_or_instance,
- Handle<Object> function,
- Handle<HeapObject> code_object,
- int code_offset_or_source_position,
- int flags,
- Handle<FixedArray> parameters);
+ Handle<CallSiteInfo> NewCallSiteInfo(Handle<Object> receiver_or_instance,
+ Handle<Object> function,
+ Handle<HeapObject> code_object,
+ int code_offset_or_source_position,
+ int flags,
+ Handle<FixedArray> parameters);
+ Handle<StackFrameInfo> NewStackFrameInfo(
+ Handle<HeapObject> shared_or_script,
+ int bytecode_offset_or_source_position, Handle<String> function_name,
+ bool is_constructor);
// Allocate various microtasks.
Handle<CallableTask> NewCallableTask(Handle<JSReceiver> callable,
@@ -436,10 +441,12 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
int inobject_properties = 0,
AllocationType allocation_type = AllocationType::kMap);
- // Initializes the fields of a newly created Map. Exposed for tests and
- // heap setup; other code should just call NewMap which takes care of it.
+ // Initializes the fields of a newly created Map using roots from the
+ // passed-in Heap. Exposed for tests and heap setup; other code should just
+ // call NewMap which takes care of it.
Map InitializeMap(Map map, InstanceType type, int instance_size,
- ElementsKind elements_kind, int inobject_properties);
+ ElementsKind elements_kind, int inobject_properties,
+ Heap* roots);
// Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full.
@@ -579,6 +586,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<JSModuleNamespace> NewJSModuleNamespace();
+ Handle<JSWrappedFunction> NewJSWrappedFunction(
+ Handle<NativeContext> creation_context, Handle<Object> target);
+
#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> NewWasmTypeInfo(Address type_address,
Handle<Map> opt_parent,
@@ -589,29 +599,37 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Map> rtt);
Handle<WasmCapiFunctionData> NewWasmCapiFunctionData(
Address call_target, Handle<Foreign> embedder_data,
- Handle<Code> wrapper_code, Handle<Map> rtt,
+ Handle<CodeT> wrapper_code, Handle<Map> rtt,
Handle<PodArray<wasm::ValueType>> serialized_sig);
Handle<WasmExportedFunctionData> NewWasmExportedFunctionData(
- Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Handle<CodeT> export_wrapper, Handle<WasmInstanceObject> instance,
Address call_target, Handle<Object> ref, int func_index,
Address sig_address, int wrapper_budget, Handle<Map> rtt);
- Handle<WasmApiFunctionRef> NewWasmApiFunctionRef(Handle<JSReceiver> callable);
+ Handle<WasmApiFunctionRef> NewWasmApiFunctionRef(
+ Handle<JSReceiver> callable, Handle<HeapObject> suspender);
// {opt_call_target} is kNullAddress for JavaScript functions, and
// non-null for exported Wasm functions.
Handle<WasmJSFunctionData> NewWasmJSFunctionData(
Address opt_call_target, Handle<JSReceiver> callable, int return_count,
int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
- Handle<Code> wrapper_code, Handle<Map> rtt);
+ Handle<CodeT> wrapper_code, Handle<Map> rtt,
+ Handle<HeapObject> suspender);
+ Handle<WasmOnFulfilledData> NewWasmOnFulfilledData(
+ Handle<WasmSuspenderObject> suspender);
Handle<WasmStruct> NewWasmStruct(const wasm::StructType* type,
wasm::WasmValue* args, Handle<Map> map);
- Handle<WasmArray> NewWasmArray(const wasm::ArrayType* type,
- const std::vector<wasm::WasmValue>& elements,
- Handle<Map> map);
+ Handle<WasmArray> NewWasmArrayFromElements(
+ const wasm::ArrayType* type, const std::vector<wasm::WasmValue>& elements,
+ Handle<Map> map);
+ Handle<WasmArray> NewWasmArrayFromMemory(uint32_t length, Handle<Map> map,
+ Address source);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmExportedFunction(
Handle<String> name, Handle<WasmExportedFunctionData> data);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmJSFunction(
Handle<String> name, Handle<WasmJSFunctionData> data);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmOnFulfilled(
+ Handle<WasmOnFulfilledData> data);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmCapiFunction(
Handle<WasmCapiFunctionData> data);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1003,15 +1021,17 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
}
// This is the real Isolate that will be used for allocating and accessing
- // external pointer entries when V8_HEAP_SANDBOX is enabled.
- Isolate* isolate_for_heap_sandbox() const {
-#ifdef V8_HEAP_SANDBOX
+ // external pointer entries when V8_SANDBOXED_EXTERNAL_POINTERS is enabled.
+ Isolate* isolate_for_sandbox() const {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
return isolate();
#else
return nullptr;
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
+ V8_INLINE HeapAllocator* allocator() const;
+
bool CanAllocateInReadOnlySpace();
bool EmptyStringRootIsInitialized();
AllocationType AllocationTypeForInPlaceInternalizableString();
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 655930859a..9dbaa9717d 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -58,13 +58,13 @@ double GCTracer::MonotonicallyIncreasingTimeInMs() {
}
}
-CollectionEpoch GCTracer::CurrentEpoch(Scope::ScopeId scope_id) {
- if (Scope::NeedsYoungEpoch(scope_id)) {
- return heap_->epoch_young();
- } else {
- return heap_->epoch_full();
- }
+namespace {
+std::atomic<CollectionEpoch> global_epoch{0};
+
+CollectionEpoch next_epoch() {
+ return global_epoch.fetch_add(1, std::memory_order_relaxed) + 1;
}
+} // namespace
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
: tracer_(tracer), scope_(scope), thread_kind_(thread_kind) {
@@ -72,7 +72,9 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
#ifdef V8_RUNTIME_CALL_STATS
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
if (thread_kind_ == ThreadKind::kMain) {
- DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
+#if DEBUG
+ AssertMainThread();
+#endif // DEBUG
runtime_stats_ =
tracer_->heap_->isolate()->counters()->runtime_call_stats();
runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
@@ -89,7 +91,10 @@ GCTracer::Scope::~Scope() {
double duration_ms = tracer_->MonotonicallyIncreasingTimeInMs() - start_time_;
if (thread_kind_ == ThreadKind::kMain) {
- DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
+#if DEBUG
+ AssertMainThread();
+#endif // DEBUG
+
tracer_->AddScopeSample(scope_, duration_ms);
if (scope_ == ScopeId::MC_INCREMENTAL ||
scope_ == ScopeId::MC_INCREMENTAL_START ||
@@ -110,6 +115,19 @@ GCTracer::Scope::~Scope() {
#endif // defined(V8_RUNTIME_CALL_STATS)
}
+#if DEBUG
+void GCTracer::Scope::AssertMainThread() {
+ Isolate* isolate = tracer_->heap_->isolate();
+ Isolate* shared_isolate = isolate->shared_isolate();
+ ThreadId thread_id = ThreadId::Current();
+
+ // Either run on isolate's main thread or on the current main thread of the
+ // shared isolate during shared GCs.
+ DCHECK(isolate->thread_id() == thread_id ||
+ (shared_isolate && shared_isolate->thread_id() == thread_id));
+}
+#endif // DEBUG
+
const char* GCTracer::Scope::Name(ScopeId id) {
#define CASE(scope) \
case Scope::scope: \
@@ -137,9 +155,11 @@ bool GCTracer::Scope::NeedsYoungEpoch(ScopeId id) {
UNREACHABLE();
}
-GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
+GCTracer::Event::Event(Type type, State state,
+ GarbageCollectionReason gc_reason,
const char* collector_reason)
: type(type),
+ state(state),
gc_reason(gc_reason),
collector_reason(collector_reason),
start_time(0.0),
@@ -175,9 +195,47 @@ const char* GCTracer::Event::TypeName(bool short_name) const {
return "Unknown Event Type";
}
+GCTracer::RecordGCPhasesInfo::RecordGCPhasesInfo(Heap* heap,
+ GarbageCollector collector) {
+ Counters* counters = heap->isolate()->counters();
+ const bool in_background = heap->isolate()->IsIsolateInBackground();
+ if (Heap::IsYoungGenerationCollector(collector)) {
+ mode = Mode::Scavenger;
+ type_timer = type_priority_timer = nullptr;
+ } else {
+ DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
+ if (heap->incremental_marking()->IsStopped()) {
+ mode = Mode::None;
+ type_timer = counters->gc_compactor();
+ type_priority_timer = in_background ? counters->gc_compactor_background()
+ : counters->gc_compactor_foreground();
+ } else if (heap->ShouldReduceMemory()) {
+ mode = Mode::None;
+ type_timer = counters->gc_finalize_reduce_memory();
+ type_priority_timer =
+ in_background ? counters->gc_finalize_reduce_memory_background()
+ : counters->gc_finalize_reduce_memory_foreground();
+ } else {
+ if (heap->incremental_marking()->IsMarking() &&
+ heap->incremental_marking()
+ ->local_marking_worklists()
+ ->IsPerContextMode()) {
+ mode = Mode::None;
+ type_timer = counters->gc_finalize_measure_memory();
+ } else {
+ mode = Mode::Finalize;
+ type_timer = counters->gc_finalize();
+ }
+ type_priority_timer = in_background ? counters->gc_finalize_background()
+ : counters->gc_finalize_foreground();
+ }
+ }
+}
+
GCTracer::GCTracer(Heap* heap)
: heap_(heap),
- current_(Event::START, GarbageCollectionReason::kUnknown, nullptr),
+ current_(Event::START, Event::State::NOT_RUNNING,
+ GarbageCollectionReason::kUnknown, nullptr),
previous_(current_),
incremental_marking_bytes_(0),
incremental_marking_duration_(0.0),
@@ -210,9 +268,14 @@ GCTracer::GCTracer(Heap* heap)
}
void GCTracer::ResetForTesting() {
- current_ = Event(Event::START, GarbageCollectionReason::kTesting, nullptr);
+ current_ = Event(Event::START, Event::State::NOT_RUNNING,
+ GarbageCollectionReason::kTesting, nullptr);
current_.end_time = MonotonicallyIncreasingTimeInMs();
previous_ = current_;
+ start_of_observable_pause_ = 0.0;
+ notified_sweeping_completed_ = false;
+ notified_cppgc_completed_ = false;
+ young_gc_while_full_gc_ = false;
ResetIncrementalMarkingCounters();
allocation_time_ms_ = 0.0;
new_space_allocation_counter_bytes_ = 0.0;
@@ -243,66 +306,101 @@ void GCTracer::ResetForTesting() {
void GCTracer::NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling) {
- DCHECK(current_.type == Event::SCAVENGER || start_counter_ > 1);
+ DCHECK_GE(1, start_counter_);
+ DCHECK_EQ(Event::SCAVENGER, current_.type);
heap_->isolate()->counters()->young_generation_handling()->AddSample(
static_cast<int>(young_generation_handling));
}
-void GCTracer::Start(GarbageCollector collector,
- GarbageCollectionReason gc_reason,
- const char* collector_reason) {
+void GCTracer::StartObservablePause() {
+ DCHECK_EQ(0, start_counter_);
start_counter_++;
- if (start_counter_ != 1) return;
- previous_ = current_;
+ DCHECK(!IsInObservablePause());
+ start_of_observable_pause_ = MonotonicallyIncreasingTimeInMs();
+}
+
+void GCTracer::UpdateCurrentEvent(GarbageCollectionReason gc_reason,
+ const char* collector_reason) {
+ // For incremental marking, the event has already been created and we just
+ // need to update a few fields.
+ DCHECK_EQ(Event::INCREMENTAL_MARK_COMPACTOR, current_.type);
+ DCHECK_EQ(Event::State::ATOMIC, current_.state);
+ DCHECK(IsInObservablePause());
+ current_.gc_reason = gc_reason;
+ current_.collector_reason = collector_reason;
+ // TODO(chromium:1154636): The start_time of the current event contains
+ // currently the start time of the observable pause. This should be
+ // reconsidered.
+ current_.start_time = start_of_observable_pause_;
+ current_.reduce_memory = heap_->ShouldReduceMemory();
+}
+
+void GCTracer::StartCycle(GarbageCollector collector,
+ GarbageCollectionReason gc_reason,
+ const char* collector_reason, MarkingType marking) {
+ // We cannot start a new cycle while there's another one in its atomic pause.
+ DCHECK_NE(Event::State::ATOMIC, current_.state);
+ // We cannot start a new cycle while a young generation GC cycle has
+ // already interrupted a full GC cycle.
+ DCHECK(!young_gc_while_full_gc_);
+ young_gc_while_full_gc_ = current_.state != Event::State::NOT_RUNNING;
+
+ DCHECK_IMPLIES(young_gc_while_full_gc_,
+ Heap::IsYoungGenerationCollector(collector) &&
+ !Event::IsYoungGenerationEvent(current_.type));
+
+ Event::Type type;
switch (collector) {
case GarbageCollector::SCAVENGER:
- current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
+ type = Event::SCAVENGER;
break;
case GarbageCollector::MINOR_MARK_COMPACTOR:
- current_ =
- Event(Event::MINOR_MARK_COMPACTOR, gc_reason, collector_reason);
+ type = Event::MINOR_MARK_COMPACTOR;
break;
case GarbageCollector::MARK_COMPACTOR:
- if (heap_->incremental_marking()->WasActivated()) {
- current_ = Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason,
- collector_reason);
- } else {
- current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
- }
+ type = marking == MarkingType::kIncremental
+ ? Event::INCREMENTAL_MARK_COMPACTOR
+ : Event::MARK_COMPACTOR;
break;
}
- current_.reduce_memory = heap_->ShouldReduceMemory();
- current_.start_time = MonotonicallyIncreasingTimeInMs();
- current_.start_object_size = 0;
- current_.start_memory_size = 0;
- current_.start_holes_size = 0;
- current_.young_object_size = 0;
-
- current_.incremental_marking_bytes = 0;
- current_.incremental_marking_duration = 0;
+ DCHECK_IMPLIES(!young_gc_while_full_gc_,
+ current_.state == Event::State::NOT_RUNNING);
+ DCHECK_EQ(Event::State::NOT_RUNNING, previous_.state);
- for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
- current_.scopes[i] = 0;
+ previous_ = current_;
+ current_ = Event(type, Event::State::MARKING, gc_reason, collector_reason);
+
+ switch (marking) {
+ case MarkingType::kAtomic:
+ DCHECK(IsInObservablePause());
+ // TODO(chromium:1154636): The start_time of the current event contains
+ // currently the start time of the observable pause. This should be
+ // reconsidered.
+ current_.start_time = start_of_observable_pause_;
+ current_.reduce_memory = heap_->ShouldReduceMemory();
+ break;
+ case MarkingType::kIncremental:
+ // The current event will be updated later.
+ DCHECK(!Heap::IsYoungGenerationCollector(collector));
+ DCHECK(!IsInObservablePause());
+ break;
}
- Counters* counters = heap_->isolate()->counters();
-
if (Heap::IsYoungGenerationCollector(collector)) {
- counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
+ epoch_young_ = next_epoch();
} else {
- counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
-
- if (FLAG_trace_gc_freelists) {
- PrintIsolate(heap_->isolate(),
- "FreeLists statistics before collection:\n");
- heap_->PrintFreeListsStats();
- }
+ epoch_full_ = next_epoch();
}
}
+void GCTracer::StartAtomicPause() {
+ DCHECK_EQ(Event::State::MARKING, current_.state);
+ current_.state = Event::State::ATOMIC;
+}
+
void GCTracer::StartInSafepoint() {
SampleAllocation(current_.start_time, heap_->NewSpaceAllocationCounter(),
heap_->OldGenerationAllocationCounter(),
@@ -332,27 +430,22 @@ void GCTracer::StopInSafepoint() {
current_.survived_young_object_size = heap_->SurvivedYoungObjectSize();
}
-void GCTracer::Stop(GarbageCollector collector) {
+void GCTracer::StopObservablePause() {
start_counter_--;
- if (start_counter_ != 0) {
- if (FLAG_trace_gc_verbose) {
- heap_->isolate()->PrintWithTimestamp(
- "[Finished reentrant %s during %s.]\n",
- Heap::CollectorName(collector), current_.TypeName(false));
- }
- return;
- }
+ DCHECK_EQ(0, start_counter_);
- DCHECK_LE(0, start_counter_);
- DCHECK((collector == GarbageCollector::SCAVENGER &&
- current_.type == Event::SCAVENGER) ||
- (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
- current_.type == Event::MINOR_MARK_COMPACTOR) ||
- (collector == GarbageCollector::MARK_COMPACTOR &&
- (current_.type == Event::MARK_COMPACTOR ||
- current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
+ DCHECK(IsInObservablePause());
+ start_of_observable_pause_ = 0.0;
+ // TODO(chromium:1154636): The end_time of the current event contains
+ // currently the end time of the observable pause. This should be
+ // reconsidered.
current_.end_time = MonotonicallyIncreasingTimeInMs();
+}
+
+void GCTracer::UpdateStatistics(GarbageCollector collector) {
+ const bool is_young = Heap::IsYoungGenerationCollector(collector);
+ DCHECK(IsConsistentWithCollector(collector));
AddAllocation(current_.end_time);
@@ -361,61 +454,44 @@ void GCTracer::Stop(GarbageCollector collector) {
static_cast<int64_t>(duration * base::Time::kMicrosecondsPerMillisecond);
auto* long_task_stats = heap_->isolate()->GetCurrentLongTaskStats();
- switch (current_.type) {
- case Event::SCAVENGER:
- case Event::MINOR_MARK_COMPACTOR:
- recorded_minor_gcs_total_.Push(
- MakeBytesAndDuration(current_.young_object_size, duration));
- recorded_minor_gcs_survived_.Push(
- MakeBytesAndDuration(current_.survived_young_object_size, duration));
- FetchBackgroundMinorGCCounters();
- long_task_stats->gc_young_wall_clock_duration_us += duration_us;
- break;
- case Event::INCREMENTAL_MARK_COMPACTOR:
+ if (is_young) {
+ recorded_minor_gcs_total_.Push(
+ MakeBytesAndDuration(current_.young_object_size, duration));
+ recorded_minor_gcs_survived_.Push(
+ MakeBytesAndDuration(current_.survived_young_object_size, duration));
+ FetchBackgroundMinorGCCounters();
+ long_task_stats->gc_young_wall_clock_duration_us += duration_us;
+ } else {
+ if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
current_.incremental_marking_bytes = incremental_marking_bytes_;
current_.incremental_marking_duration = incremental_marking_duration_;
for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
current_.scopes[i] = incremental_marking_scopes_[i].duration;
}
-
- RecordMutatorUtilization(
- current_.end_time, duration + current_.incremental_marking_duration);
RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.end_object_size, duration));
- RecordGCSumCounters(duration);
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
- FetchBackgroundMarkCompactCounters();
- long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
- break;
- case Event::MARK_COMPACTOR:
+ } else {
DCHECK_EQ(0u, current_.incremental_marking_bytes);
DCHECK_EQ(0, current_.incremental_marking_duration);
- RecordMutatorUtilization(
- current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.end_object_size, duration));
- RecordGCSumCounters(duration);
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
- FetchBackgroundMarkCompactCounters();
- long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
- break;
- case Event::START:
- UNREACHABLE();
+ }
+ RecordMutatorUtilization(current_.end_time,
+ duration + current_.incremental_marking_duration);
+ RecordGCSumCounters();
+ ResetIncrementalMarkingCounters();
+ combined_mark_compact_speed_cache_ = 0.0;
+ FetchBackgroundMarkCompactCounters();
+ long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
}
FetchBackgroundGeneralCounters();
heap_->UpdateTotalGCTime(duration);
- if (current_.type == Event::SCAVENGER ||
- current_.type == Event::MINOR_MARK_COMPACTOR) {
- ReportYoungCycleToRecorder();
- if (FLAG_trace_gc_ignore_scavenger) return;
- }
+ if (FLAG_trace_gc_ignore_scavenger && is_young) return;
if (FLAG_trace_gc_nvp) {
PrintNVP();
@@ -438,7 +514,65 @@ void GCTracer::Stop(GarbageCollector collector) {
}
}
+void GCTracer::StopAtomicPause() {
+ DCHECK_EQ(Event::State::ATOMIC, current_.state);
+ current_.state = Event::State::SWEEPING;
+}
+
+void GCTracer::StopCycle(GarbageCollector collector) {
+ DCHECK_EQ(Event::State::SWEEPING, current_.state);
+ current_.state = Event::State::NOT_RUNNING;
+
+ DCHECK(IsConsistentWithCollector(collector));
+
+ Counters* counters = heap_->isolate()->counters();
+ GarbageCollectionReason gc_reason = current_.gc_reason;
+
+ if (Heap::IsYoungGenerationCollector(collector)) {
+ ReportYoungCycleToRecorder();
+
+ // If a young generation GC interrupted an unfinished full GC cycle, restore
+ // the event corresponding to the full GC cycle.
+ if (young_gc_while_full_gc_) {
+ std::swap(current_, previous_);
+ young_gc_while_full_gc_ = false;
+ }
+ } else {
+ ReportFullCycleToRecorder();
+
+ counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics before collection:\n");
+ heap_->PrintFreeListsStats();
+ }
+ }
+}
+
+void GCTracer::StopCycleIfNeeded() {
+ if (current_.state != Event::State::SWEEPING) return;
+ if (!notified_sweeping_completed_) return;
+ if (heap_->cpp_heap() && !notified_cppgc_completed_) return;
+ StopCycle(GarbageCollector::MARK_COMPACTOR);
+ notified_sweeping_completed_ = false;
+ notified_cppgc_completed_ = false;
+}
+
void GCTracer::NotifySweepingCompleted() {
+#ifdef VERIFY_HEAP
+ // If heap verification is enabled, sweeping finalization can also be
+ // triggered from inside a full GC cycle's atomic pause.
+ DCHECK((current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR) &&
+ (current_.state == Event::State::SWEEPING ||
+ (FLAG_verify_heap && current_.state == Event::State::ATOMIC)));
+#else
+ DCHECK(IsSweepingInProgress());
+#endif
+
+ // Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
+ // finished sweeping. This method is invoked by v8.
if (FLAG_trace_gc_freelists) {
PrintIsolate(heap_->isolate(),
"FreeLists statistics after sweeping completed:\n");
@@ -450,8 +584,21 @@ void GCTracer::NotifySweepingCompleted() {
heap_->code_space()->PrintAllocationsOrigins();
heap_->map_space()->PrintAllocationsOrigins();
}
- metrics_report_pending_ = true;
- NotifyGCCompleted();
+ DCHECK(!notified_sweeping_completed_);
+ notified_sweeping_completed_ = true;
+ StopCycleIfNeeded();
+}
+
+void GCTracer::NotifyCppGCCompleted() {
+ // Stop a full GC cycle only when both v8 and cppgc (if available) GCs have
+ // finished sweeping. This method is invoked by cppgc.
+ DCHECK(heap_->cpp_heap());
+ DCHECK(CppHeap::From(heap_->cpp_heap())
+ ->GetMetricRecorder()
+ ->MetricsReportPending());
+ DCHECK(!notified_cppgc_completed_);
+ notified_cppgc_completed_ = true;
+ StopCycleIfNeeded();
}
void GCTracer::SampleAllocation(double current_ms,
@@ -510,7 +657,6 @@ void GCTracer::AddCompactionEvent(double duration,
MakeBytesAndDuration(live_bytes_compacted, duration));
}
-
void GCTracer::AddSurvivalRatio(double promotion_ratio) {
recorded_survival_ratios_.Push(promotion_ratio);
}
@@ -564,7 +710,7 @@ void GCTracer::Print() const {
"[%d:%p] "
"%8.0f ms: "
"%s%s%s %.1f (%.1f) -> %.1f (%.1f) MB, "
- "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
+ "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s; %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(),
@@ -581,7 +727,6 @@ void GCTracer::Print() const {
current_.collector_reason != nullptr ? current_.collector_reason : "");
}
-
void GCTracer::PrintNVP() const {
double duration = current_.end_time - current_.start_time;
double spent_in_mutator = current_.start_time - previous_.end_time;
@@ -1172,9 +1317,6 @@ void GCTracer::FetchBackgroundMarkCompactCounters() {
void GCTracer::FetchBackgroundMinorGCCounters() {
FetchBackgroundCounters(Scope::FIRST_MINOR_GC_BACKGROUND_SCOPE,
Scope::LAST_MINOR_GC_BACKGROUND_SCOPE);
- heap_->isolate()->counters()->background_scavenger()->AddSample(
- static_cast<int>(
- current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]));
}
void GCTracer::FetchBackgroundGeneralCounters() {
@@ -1196,9 +1338,9 @@ void GCTracer::AddScopeSampleBackground(Scope::ScopeId scope, double duration) {
counter.total_duration_ms += duration;
}
-void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
+void GCTracer::RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode) {
Counters* counters = heap_->isolate()->counters();
- if (gc_timer == counters->gc_finalize()) {
+ if (mode == RecordGCPhasesInfo::Mode::Finalize) {
DCHECK_EQ(Scope::FIRST_TOP_MC_SCOPE, Scope::MC_CLEAR);
counters->gc_finalize_clear()->AddSample(
static_cast<int>(current_.scopes[Scope::MC_CLEAR]));
@@ -1247,7 +1389,7 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
}
DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
- } else if (gc_timer == counters->gc_scavenger()) {
+ } else if (mode == RecordGCPhasesInfo::Mode::Scavenger) {
counters->gc_scavenger_scavenge_main()->AddSample(
static_cast<int>(current_.scopes[Scope::SCAVENGER_SCAVENGE_PARALLEL]));
counters->gc_scavenger_scavenge_roots()->AddSample(
@@ -1255,20 +1397,23 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
}
}
-void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
+void GCTracer::RecordGCSumCounters() {
base::MutexGuard guard(&background_counter_mutex_);
- const double overall_duration =
+ const double atomic_pause_duration = current_.scopes[Scope::MARK_COMPACTOR];
+ const double incremental_marking =
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
.duration +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
.duration +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
- .duration +
incremental_marking_duration_ +
current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
- .duration +
- atomic_pause_duration;
+ .duration;
+ const double incremental_sweeping =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
+ .duration;
+ const double overall_duration =
+ atomic_pause_duration + incremental_marking + incremental_sweeping;
const double background_duration =
background_counter_[Scope::MC_BACKGROUND_EVACUATE_COPY]
.total_duration_ms +
@@ -1276,23 +1421,12 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
.total_duration_ms +
background_counter_[Scope::MC_BACKGROUND_MARKING].total_duration_ms +
background_counter_[Scope::MC_BACKGROUND_SWEEPING].total_duration_ms;
-
- const double marking_duration =
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
- .duration +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
- .duration +
- incremental_marking_duration_ +
- current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
- .duration +
- current_.scopes[Scope::MC_MARK];
+ const double atomic_marking_duration =
+ current_.scopes[Scope::MC_PROLOGUE] + current_.scopes[Scope::MC_MARK];
+ const double marking_duration = atomic_marking_duration + incremental_marking;
const double marking_background_duration =
background_counter_[Scope::MC_BACKGROUND_MARKING].total_duration_ms;
- // UMA.
- heap_->isolate()->counters()->gc_mark_compactor()->AddSample(
- static_cast<int>(overall_duration));
-
// Emit trace event counters.
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"V8.GCMarkCompactorSummary", TRACE_EVENT_SCOPE_THREAD,
@@ -1304,22 +1438,6 @@ void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
"background_duration", marking_background_duration);
}
-void GCTracer::NotifyGCCompleted() {
- // Report full GC cycle metric to recorder only when both v8 and cppgc (if
- // available) GCs have finished. This method is invoked by both v8 and cppgc.
- if (!metrics_report_pending_) {
- // V8 sweeping is not done yet.
- return;
- }
- const auto* cpp_heap = heap_->cpp_heap();
- if (cpp_heap &&
- !CppHeap::From(cpp_heap)->GetMetricRecorder()->MetricsReportPending()) {
- // Cppgc sweeping is not done yet.
- return;
- }
- ReportFullCycleToRecorder();
-}
-
namespace {
void CopyTimeMetrics(
@@ -1330,6 +1448,9 @@ void CopyTimeMetrics(
metrics.mark_wall_clock_duration_in_us = cppgc_metrics.mark_duration_us;
DCHECK_NE(-1, cppgc_metrics.sweep_duration_us);
metrics.sweep_wall_clock_duration_in_us = cppgc_metrics.sweep_duration_us;
+ metrics.total_wall_clock_duration_in_us =
+ metrics.mark_wall_clock_duration_in_us +
+ metrics.sweep_wall_clock_duration_in_us;
}
void CopyTimeMetrics(
@@ -1343,6 +1464,11 @@ void CopyTimeMetrics(
metrics.sweep_wall_clock_duration_in_us = cppgc_metrics.sweep_duration_us;
DCHECK_NE(-1, cppgc_metrics.weak_duration_us);
metrics.weak_wall_clock_duration_in_us = cppgc_metrics.weak_duration_us;
+ metrics.total_wall_clock_duration_in_us =
+ metrics.compact_wall_clock_duration_in_us +
+ metrics.mark_wall_clock_duration_in_us +
+ metrics.sweep_wall_clock_duration_in_us +
+ metrics.weak_wall_clock_duration_in_us;
}
void CopySizeMetrics(
@@ -1373,27 +1499,43 @@ void FlushBatchedIncrementalEvents(
DCHECK(!batched_events.events.empty());
isolate->metrics_recorder()->AddMainThreadEvent(std::move(batched_events),
GetContextId(isolate));
+ batched_events = {};
}
} // namespace
void GCTracer::ReportFullCycleToRecorder() {
+ DCHECK(!Event::IsYoungGenerationEvent(current_.type));
+ DCHECK_EQ(Event::State::NOT_RUNNING, current_.state);
+ auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
+ DCHECK_IMPLIES(cpp_heap,
+ cpp_heap->GetMetricRecorder()->MetricsReportPending());
const std::shared_ptr<metrics::Recorder>& recorder =
heap_->isolate()->metrics_recorder();
DCHECK_NOT_NULL(recorder);
- if (!recorder->HasEmbedderRecorder()) return;
+ if (!recorder->HasEmbedderRecorder()) {
+ incremental_mark_batched_events_ = {};
+ if (cpp_heap) {
+ cpp_heap->GetMetricRecorder()->ClearCachedEvents();
+ }
+ return;
+ }
if (!incremental_mark_batched_events_.events.empty()) {
FlushBatchedIncrementalEvents(incremental_mark_batched_events_,
heap_->isolate());
}
+
v8::metrics::GarbageCollectionFullCycle event;
- if (heap_->cpp_heap()) {
- auto* cpp_heap = v8::internal::CppHeap::From(heap_->cpp_heap());
+ event.reason = static_cast<int>(current_.gc_reason);
+
+ // Managed C++ heap statistics:
+ if (cpp_heap) {
cpp_heap->GetMetricRecorder()->FlushBatchedIncrementalEvents();
const base::Optional<cppgc::internal::MetricRecorder::FullCycle>
optional_cppgc_event =
cpp_heap->GetMetricRecorder()->ExtractLastFullGcEvent();
DCHECK(optional_cppgc_event.has_value());
+ DCHECK(!cpp_heap->GetMetricRecorder()->MetricsReportPending());
const cppgc::internal::MetricRecorder::FullCycle& cppgc_event =
optional_cppgc_event.value();
CopyTimeMetrics(event.total_cpp, cppgc_event.total);
@@ -1414,12 +1556,92 @@ void GCTracer::ReportFullCycleToRecorder() {
event.main_thread_efficiency_cpp_in_bytes_per_us =
cppgc_event.main_thread_efficiency_in_bytes_per_us;
}
- // TODO(chromium:1154636): Populate v8 metrics.
+
+ // Unified heap statistics:
+ const double atomic_pause_duration = current_.scopes[Scope::MARK_COMPACTOR];
+ const double incremental_marking =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_LAYOUT_CHANGE]
+ .duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
+ .duration +
+ current_.incremental_marking_duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
+ .duration;
+ const double incremental_sweeping =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
+ .duration;
+ const double overall_duration =
+ atomic_pause_duration + incremental_marking + incremental_sweeping;
+ const double marking_background_duration =
+ current_.scopes[Scope::MC_BACKGROUND_MARKING];
+ const double sweeping_background_duration =
+ current_.scopes[Scope::MC_BACKGROUND_SWEEPING];
+ const double compact_background_duration =
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_COPY] +
+ current_.scopes[Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS];
+ const double background_duration = marking_background_duration +
+ sweeping_background_duration +
+ compact_background_duration;
+ const double atomic_marking_duration =
+ current_.scopes[Scope::MC_PROLOGUE] + current_.scopes[Scope::MC_MARK];
+ const double marking_duration = atomic_marking_duration + incremental_marking;
+ const double weak_duration = current_.scopes[Scope::MC_CLEAR];
+ const double compact_duration = current_.scopes[Scope::MC_EVACUATE] +
+ current_.scopes[Scope::MC_FINISH] +
+ current_.scopes[Scope::MC_EPILOGUE];
+ const double atomic_sweeping_duration = current_.scopes[Scope::MC_SWEEP];
+ const double sweeping_duration =
+ atomic_sweeping_duration + incremental_sweeping;
+
+ event.main_thread_atomic.total_wall_clock_duration_in_us =
+ static_cast<int64_t>(atomic_pause_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread.total_wall_clock_duration_in_us = static_cast<int64_t>(
+ overall_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.total.total_wall_clock_duration_in_us =
+ static_cast<int64_t>((overall_duration + background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.mark_wall_clock_duration_in_us =
+ static_cast<int64_t>(atomic_marking_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread.mark_wall_clock_duration_in_us = static_cast<int64_t>(
+ marking_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.total.mark_wall_clock_duration_in_us =
+ static_cast<int64_t>((marking_duration + marking_background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.weak_wall_clock_duration_in_us =
+ event.main_thread.weak_wall_clock_duration_in_us =
+ event.total.weak_wall_clock_duration_in_us = static_cast<int64_t>(
+ weak_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.compact_wall_clock_duration_in_us =
+ event.main_thread.compact_wall_clock_duration_in_us =
+ static_cast<int64_t>(compact_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.total.compact_wall_clock_duration_in_us =
+ static_cast<int64_t>((compact_duration + compact_background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread_atomic.sweep_wall_clock_duration_in_us =
+ static_cast<int64_t>(atomic_sweeping_duration *
+ base::Time::kMicrosecondsPerMillisecond);
+ event.main_thread.sweep_wall_clock_duration_in_us = static_cast<int64_t>(
+ sweeping_duration * base::Time::kMicrosecondsPerMillisecond);
+ event.total.sweep_wall_clock_duration_in_us =
+ static_cast<int64_t>((sweeping_duration + sweeping_background_duration) *
+ base::Time::kMicrosecondsPerMillisecond);
+
+ // TODO(chromium:1154636): Populate the following:
+ // - event.main_thread_incremental
+ // - event.objects
+ // - event.memory
+ // - event.collection_rate_in_percent
+ // - event.efficiency_in_bytes_per_us
+ // - event.main_thread_efficiency_in_bytes_per_us
+
recorder->AddMainThreadEvent(event, GetContextId(heap_->isolate()));
- metrics_report_pending_ = false;
}
void GCTracer::ReportIncrementalMarkingStepToRecorder() {
+ DCHECK_EQ(Event::Type::INCREMENTAL_MARK_COMPACTOR, current_.type);
static constexpr int kMaxBatchedEvents =
CppHeap::MetricRecorderAdapter::kMaxBatchedEvents;
const std::shared_ptr<metrics::Recorder>& recorder =
@@ -1447,21 +1669,33 @@ void GCTracer::ReportIncrementalMarkingStepToRecorder() {
}
void GCTracer::ReportYoungCycleToRecorder() {
+ DCHECK(Event::IsYoungGenerationEvent(current_.type));
+ DCHECK_EQ(Event::State::NOT_RUNNING, current_.state);
const std::shared_ptr<metrics::Recorder>& recorder =
heap_->isolate()->metrics_recorder();
DCHECK_NOT_NULL(recorder);
if (!recorder->HasEmbedderRecorder()) return;
v8::metrics::GarbageCollectionYoungCycle event;
+ // Reason:
+ event.reason = static_cast<int>(current_.gc_reason);
// Total:
const double total_wall_clock_duration_in_us =
(current_.scopes[Scope::SCAVENGER] +
- current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL]) *
+ current_.scopes[Scope::MINOR_MARK_COMPACTOR] +
+ current_.scopes[Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL] +
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY] +
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_MARKING] +
+ current_.scopes[Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]) *
base::Time::kMicrosecondsPerMillisecond;
+ // TODO(chromium:1154636): Consider adding BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP
+ // (both for the case of the scavenger and the minor mark-compactor), and
+ // BACKGROUND_UNMAPPER (for the case of the minor mark-compactor).
event.total_wall_clock_duration_in_us =
static_cast<int64_t>(total_wall_clock_duration_in_us);
// MainThread:
const double main_thread_wall_clock_duration_in_us =
- current_.scopes[Scope::SCAVENGER] *
+ (current_.scopes[Scope::SCAVENGER] +
+ current_.scopes[Scope::MINOR_MARK_COMPACTOR]) *
base::Time::kMicrosecondsPerMillisecond;
event.main_thread_wall_clock_duration_in_us =
static_cast<int64_t>(main_thread_wall_clock_duration_in_us);
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 2c9b7b01ec..c008b1da06 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -51,6 +51,8 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)), \
"epoch", tracer->CurrentEpoch(scope_id))
+using CollectionEpoch = uint32_t;
+
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
class V8_EXPORT_PRIVATE GCTracer {
@@ -112,6 +114,10 @@ class V8_EXPORT_PRIVATE GCTracer {
static bool NeedsYoungEpoch(ScopeId id);
private:
+#if DEBUG
+ void AssertMainThread();
+#endif // DEBUG
+
GCTracer* tracer_;
ScopeId scope_;
ThreadKind thread_kind_;
@@ -133,15 +139,33 @@ class V8_EXPORT_PRIVATE GCTracer {
START = 4
};
- Event(Type type, GarbageCollectionReason gc_reason,
+#ifdef DEBUG
+ // Returns true if the event corresponds to a young generation GC.
+ static constexpr bool IsYoungGenerationEvent(Type type) {
+ DCHECK_NE(START, type);
+ return type == SCAVENGER || type == MINOR_MARK_COMPACTOR;
+ }
+#endif
+
+ // The state diagram for a GC cycle:
+ // (NOT_RUNNING) -----(StartCycle)----->
+ // MARKING --(StartAtomicPause)-->
+ // ATOMIC ---(StopAtomicPause)-->
+ // SWEEPING ------(StopCycle)-----> NOT_RUNNING
+ enum class State { NOT_RUNNING, MARKING, ATOMIC, SWEEPING };
+
+ Event(Type type, State state, GarbageCollectionReason gc_reason,
const char* collector_reason);
// Returns a string describing the event type.
const char* TypeName(bool short_name) const;
- // Type of event
+ // Type of the event.
Type type;
+ // State of the cycle corresponding to the event.
+ State state;
+
GarbageCollectionReason gc_reason;
const char* collector_reason;
@@ -195,6 +219,24 @@ class V8_EXPORT_PRIVATE GCTracer {
incremental_marking_scopes[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
};
+ class RecordGCPhasesInfo {
+ public:
+ RecordGCPhasesInfo(Heap* heap, GarbageCollector collector);
+
+ enum class Mode { None, Scavenger, Finalize };
+
+ Mode mode;
+
+ // The timer used for a given GC type:
+ // - GCScavenger: young generation GC
+ // - GCCompactor: full GC
+ // - GCFinalizeMC: finalization of incremental full GC
+ // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
+ // memory reduction.
+ TimedHistogram* type_timer;
+ TimedHistogram* type_priority_timer;
+ };
+
static const int kThroughputTimeFrameMs = 5000;
static constexpr double kConservativeSpeedInBytesPerMillisecond = 128 * KB;
@@ -207,22 +249,64 @@ class V8_EXPORT_PRIVATE GCTracer {
explicit GCTracer(Heap* heap);
- // Start collecting data.
- void Start(GarbageCollector collector, GarbageCollectionReason gc_reason,
- const char* collector_reason);
- void StartInSafepoint();
+ CollectionEpoch CurrentEpoch(Scope::ScopeId id) const {
+ return Scope::NeedsYoungEpoch(id) ? epoch_young_ : epoch_full_;
+ }
+
+ // Start and stop an observable pause.
+ void StartObservablePause();
+ void StopObservablePause();
+
+ // Update the current event if it precedes the start of the observable pause.
+ void UpdateCurrentEvent(GarbageCollectionReason gc_reason,
+ const char* collector_reason);
- // Stop collecting data and print results.
- void Stop(GarbageCollector collector);
+ void UpdateStatistics(GarbageCollector collector);
+
+ enum class MarkingType { kAtomic, kIncremental };
+
+ // Start and stop a GC cycle (collecting data and reporting results).
+ void StartCycle(GarbageCollector collector, GarbageCollectionReason gc_reason,
+ const char* collector_reason, MarkingType marking);
+ void StopCycle(GarbageCollector collector);
+ void StopCycleIfNeeded();
+
+ // Start and stop a cycle's atomic pause.
+ void StartAtomicPause();
+ void StopAtomicPause();
+
+ void StartInSafepoint();
void StopInSafepoint();
void NotifySweepingCompleted();
-
- void NotifyGCCompleted();
+ void NotifyCppGCCompleted();
void NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling);
+#ifdef DEBUG
+ bool IsInObservablePause() const { return 0.0 < start_of_observable_pause_; }
+
+ // Checks if the current event is consistent with a collector.
+ bool IsConsistentWithCollector(GarbageCollector collector) const {
+ return (collector == GarbageCollector::SCAVENGER &&
+ current_.type == Event::SCAVENGER) ||
+ (collector == GarbageCollector::MINOR_MARK_COMPACTOR &&
+ current_.type == Event::MINOR_MARK_COMPACTOR) ||
+ (collector == GarbageCollector::MARK_COMPACTOR &&
+ (current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR));
+ }
+
+ // Checks if the current event corresponds to a full GC cycle whose sweeping
+ // has not finalized yet.
+ bool IsSweepingInProgress() const {
+ return (current_.type == Event::MARK_COMPACTOR ||
+ current_.type == Event::INCREMENTAL_MARK_COMPACTOR) &&
+ current_.state == Event::State::SWEEPING;
+ }
+#endif
+
// Sample and accumulate bytes allocated since the last GC.
void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
size_t old_generation_counter_bytes,
@@ -336,7 +420,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void AddScopeSampleBackground(Scope::ScopeId scope, double duration);
- void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
+ void RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode);
void RecordEmbedderSpeed(size_t bytes, double duration);
@@ -349,8 +433,6 @@ class V8_EXPORT_PRIVATE GCTracer {
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
#endif // defined(V8_RUNTIME_CALL_STATS)
- CollectionEpoch CurrentEpoch(Scope::ScopeId id);
-
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
@@ -367,7 +449,6 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
FRIEND_TEST(GCTracerTest, MutatorUtilization);
- FRIEND_TEST(GCTracerTest, RecordGCSumHistograms);
FRIEND_TEST(GCTracerTest, RecordMarkCompactHistograms);
FRIEND_TEST(GCTracerTest, RecordScavengerHistograms);
@@ -388,10 +469,10 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordMutatorUtilization(double mark_compactor_end_time,
double mark_compactor_duration);
- // Overall time spent in mark compact within a given GC cycle. Exact
- // accounting of events within a GC is not necessary which is why the
- // recording takes place at the end of the atomic pause.
- void RecordGCSumCounters(double atomic_pause_duration);
+ // Update counters for an entire full GC cycle. Exact accounting of events
+ // within a GC is not necessary which is why the recording takes place at the
+ // end of the atomic pause.
+ void RecordGCSumCounters();
double MonotonicallyIncreasingTimeInMs();
@@ -434,6 +515,14 @@ class V8_EXPORT_PRIVATE GCTracer {
// Previous tracer event.
Event previous_;
+ // The starting time of the observable pause or 0.0 if we're not inside it.
+ double start_of_observable_pause_ = 0.0;
+
+ // We need two epochs, since there can be scavenges during incremental
+ // marking.
+ CollectionEpoch epoch_young_ = 0;
+ CollectionEpoch epoch_full_ = 0;
+
// Size of incremental marking steps (in bytes) accumulated since the end of
// the last mark compact GC.
size_t incremental_marking_bytes_;
@@ -455,7 +544,6 @@ class V8_EXPORT_PRIVATE GCTracer {
IncrementalMarkingInfos
incremental_marking_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
-
// Timestamp and allocation counter at the last sampled allocation event.
double allocation_time_ms_;
size_t new_space_allocation_counter_bytes_;
@@ -489,7 +577,15 @@ class V8_EXPORT_PRIVATE GCTracer {
base::RingBuffer<BytesAndDuration> recorded_embedder_generation_allocations_;
base::RingBuffer<double> recorded_survival_ratios_;
- bool metrics_report_pending_ = false;
+ // A full GC cycle stops only when both v8 and cppgc (if available) GCs have
+ // finished sweeping.
+ bool notified_sweeping_completed_ = false;
+ bool notified_cppgc_completed_ = false;
+
+ // When a full GC cycle is interrupted by a young generation GC cycle, the
+ // |previous_| event is used as temporary storage for the |current_| event
+ // that corresponded to the full GC cycle, and this field is set to true.
+ bool young_gc_while_full_gc_ = false;
v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalMark
incremental_mark_batched_events_;
diff --git a/deps/v8/src/heap/heap-allocator-inl.h b/deps/v8/src/heap/heap-allocator-inl.h
new file mode 100644
index 0000000000..043f4c629b
--- /dev/null
+++ b/deps/v8/src/heap/heap-allocator-inl.h
@@ -0,0 +1,250 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_ALLOCATOR_INL_H_
+#define V8_HEAP_HEAP_ALLOCATOR_INL_H_
+
+#include "src/base/logging.h"
+#include "src/common/globals.h"
+#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/heap-allocator.h"
+#include "src/heap/large-spaces.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/read-only-spaces.h"
+#include "src/heap/third-party/heap-api.h"
+
+namespace v8 {
+namespace internal {
+
+PagedSpace* HeapAllocator::code_space() const {
+ return static_cast<PagedSpace*>(spaces_[CODE_SPACE]);
+}
+
+CodeLargeObjectSpace* HeapAllocator::code_lo_space() const {
+ return static_cast<CodeLargeObjectSpace*>(spaces_[CODE_LO_SPACE]);
+}
+
+OldLargeObjectSpace* HeapAllocator::lo_space() const {
+ return static_cast<OldLargeObjectSpace*>(spaces_[LO_SPACE]);
+}
+
+PagedSpace* HeapAllocator::map_space() const {
+ return static_cast<PagedSpace*>(spaces_[MAP_SPACE]);
+}
+
+NewSpace* HeapAllocator::new_space() const {
+ return static_cast<NewSpace*>(spaces_[NEW_SPACE]);
+}
+
+NewLargeObjectSpace* HeapAllocator::new_lo_space() const {
+ return static_cast<NewLargeObjectSpace*>(spaces_[NEW_LO_SPACE]);
+}
+
+PagedSpace* HeapAllocator::old_space() const {
+ return static_cast<PagedSpace*>(spaces_[OLD_SPACE]);
+}
+
+ReadOnlySpace* HeapAllocator::read_only_space() const {
+ return read_only_space_;
+}
+
+bool HeapAllocator::CanAllocateInReadOnlySpace() const {
+ return read_only_space()->writable();
+}
+
+template <AllocationType type>
+V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult HeapAllocator::AllocateRaw(
+ int size_in_bytes, AllocationOrigin origin, AllocationAlignment alignment) {
+ DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
+ DCHECK(AllowHandleAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
+
+ if (FLAG_single_generation && type == AllocationType::kYoung) {
+ return AllocateRaw(size_in_bytes, AllocationType::kOld, origin, alignment);
+ }
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
+ if (!heap_->always_allocate() && allocation_timeout_-- <= 0) {
+ return AllocationResult::Failure();
+ }
+ }
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+#ifdef DEBUG
+ IncrementObjectCounters();
+#endif // DEBUG
+
+ if (heap_->CanSafepoint()) {
+ heap_->main_thread_local_heap()->Safepoint();
+ }
+
+ const size_t large_object_threshold = heap_->MaxRegularHeapObjectSize(type);
+ const bool large_object =
+ static_cast<size_t>(size_in_bytes) > large_object_threshold;
+
+ HeapObject object;
+ AllocationResult allocation;
+
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ allocation = heap_->tp_heap_->Allocate(size_in_bytes, type, alignment);
+ } else {
+ if (V8_UNLIKELY(large_object)) {
+ allocation =
+ AllocateRawLargeInternal(size_in_bytes, type, origin, alignment);
+ } else {
+ switch (type) {
+ case AllocationType::kYoung:
+ allocation =
+ new_space()->AllocateRaw(size_in_bytes, alignment, origin);
+ break;
+ case AllocationType::kOld:
+ allocation =
+ old_space()->AllocateRaw(size_in_bytes, alignment, origin);
+ break;
+ case AllocationType::kCode:
+ DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
+ DCHECK(AllowCodeAllocation::IsAllowed());
+ allocation = code_space()->AllocateRawUnaligned(size_in_bytes);
+ break;
+ case AllocationType::kMap:
+ DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
+ allocation = map_space()->AllocateRawUnaligned(size_in_bytes);
+ break;
+ case AllocationType::kReadOnly:
+ DCHECK(read_only_space()->writable());
+ DCHECK_EQ(AllocationOrigin::kRuntime, origin);
+ allocation = read_only_space()->AllocateRaw(size_in_bytes, alignment);
+ break;
+ case AllocationType::kSharedMap:
+ allocation = shared_map_allocator_->AllocateRaw(size_in_bytes,
+ alignment, origin);
+ break;
+ case AllocationType::kSharedOld:
+ allocation = shared_old_allocator_->AllocateRaw(size_in_bytes,
+ alignment, origin);
+ break;
+ }
+ }
+ }
+
+ if (allocation.To(&object)) {
+ if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ // Unprotect the memory chunk of the object if it was not unprotected
+ // already.
+ heap_->UnprotectAndRegisterMemoryChunk(
+ object, UnprotectMemoryOrigin::kMainThread);
+ heap_->ZapCodeObject(object.address(), size_in_bytes);
+ if (!large_object) {
+ MemoryChunk::FromHeapObject(object)
+ ->GetCodeObjectRegistry()
+ ->RegisterNewlyAllocatedCodeObject(object.address());
+ }
+ }
+
+#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+ if (AllocationType::kReadOnly != type) {
+ DCHECK_TAG_ALIGNED(object.address());
+ Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
+ object.address());
+ }
+#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
+
+ for (auto& tracker : heap_->allocation_trackers_) {
+ tracker->AllocationEvent(object.address(), size_in_bytes);
+ }
+ }
+
+ return allocation;
+}
+
+AllocationResult HeapAllocator::AllocateRaw(int size_in_bytes,
+ AllocationType type,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ switch (type) {
+ case AllocationType::kYoung:
+ return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kOld:
+ return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kCode:
+ return AllocateRaw<AllocationType::kCode>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kMap:
+ return AllocateRaw<AllocationType::kMap>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kReadOnly:
+ return AllocateRaw<AllocationType::kReadOnly>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kSharedMap:
+ return AllocateRaw<AllocationType::kSharedMap>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kSharedOld:
+ return AllocateRaw<AllocationType::kSharedOld>(size_in_bytes, origin,
+ alignment);
+ }
+ UNREACHABLE();
+}
+
+AllocationResult HeapAllocator::AllocateRawData(int size_in_bytes,
+ AllocationType type,
+ AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ switch (type) {
+ case AllocationType::kYoung:
+ return AllocateRaw<AllocationType::kYoung>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kOld:
+ return AllocateRaw<AllocationType::kOld>(size_in_bytes, origin,
+ alignment);
+ case AllocationType::kCode:
+ case AllocationType::kMap:
+ case AllocationType::kReadOnly:
+ case AllocationType::kSharedMap:
+ case AllocationType::kSharedOld:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+template <HeapAllocator::AllocationRetryMode mode>
+V8_WARN_UNUSED_RESULT V8_INLINE HeapObject HeapAllocator::AllocateRawWith(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ AllocationResult result;
+ HeapObject object;
+ if (allocation == AllocationType::kYoung) {
+ result = AllocateRaw<AllocationType::kYoung>(size, origin, alignment);
+ if (result.To(&object)) {
+ return object;
+ }
+ } else if (allocation == AllocationType::kOld) {
+ result = AllocateRaw<AllocationType::kOld>(size, origin, alignment);
+ if (result.To(&object)) {
+ return object;
+ }
+ }
+ switch (mode) {
+ case kLightRetry:
+ result = AllocateRawWithLightRetrySlowPath(size, allocation, origin,
+ alignment);
+ break;
+ case kRetryOrFail:
+ result = AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
+ alignment);
+ break;
+ }
+ if (result.To(&object)) {
+ return object;
+ }
+ return HeapObject();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc
new file mode 100644
index 0000000000..580f56c9e0
--- /dev/null
+++ b/deps/v8/src/heap/heap-allocator.cc
@@ -0,0 +1,163 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/heap-allocator.h"
+
+#include "src/base/logging.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/heap/heap-allocator-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/logging/counters.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+HeapAllocator::HeapAllocator(Heap* heap) : heap_(heap) {}
+
+void HeapAllocator::Setup() {
+ for (int i = FIRST_SPACE; i <= LAST_SPACE; ++i) {
+ spaces_[i] = heap_->space(i);
+ }
+ shared_old_allocator_ = heap_->shared_old_allocator_.get();
+ shared_map_allocator_ = heap_->shared_map_allocator_.get();
+}
+
+void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) {
+ read_only_space_ = read_only_space;
+}
+
+AllocationResult HeapAllocator::AllocateRawLargeInternal(
+ int size_in_bytes, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ DCHECK_GT(size_in_bytes, heap_->MaxRegularHeapObjectSize(allocation));
+ switch (allocation) {
+ case AllocationType::kYoung:
+ return new_lo_space()->AllocateRaw(size_in_bytes);
+ case AllocationType::kOld:
+ return lo_space()->AllocateRaw(size_in_bytes);
+ case AllocationType::kCode:
+ return code_lo_space()->AllocateRaw(size_in_bytes);
+ case AllocationType::kMap:
+ case AllocationType::kReadOnly:
+ case AllocationType::kSharedMap:
+ case AllocationType::kSharedOld:
+ UNREACHABLE();
+ }
+}
+
+namespace {
+
+constexpr AllocationSpace AllocationTypeToGCSpace(AllocationType type) {
+ switch (type) {
+ case AllocationType::kYoung:
+ return NEW_SPACE;
+ case AllocationType::kOld:
+ case AllocationType::kCode:
+ case AllocationType::kMap:
+ // OLD_SPACE indicates full GC.
+ return OLD_SPACE;
+ case AllocationType::kReadOnly:
+ case AllocationType::kSharedMap:
+ case AllocationType::kSharedOld:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
+AllocationResult HeapAllocator::AllocateRawWithLightRetrySlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ AllocationResult result = AllocateRaw(size, allocation, origin, alignment);
+ if (!result.IsFailure()) {
+ return result;
+ }
+
+ // Two GCs before returning failure.
+ for (int i = 0; i < 2; i++) {
+ if (IsSharedAllocationType(allocation)) {
+ heap_->CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
+ } else {
+ heap_->CollectGarbage(AllocationTypeToGCSpace(allocation),
+ GarbageCollectionReason::kAllocationFailure);
+ }
+ result = AllocateRaw(size, allocation, origin, alignment);
+ if (!result.IsFailure()) {
+ return result;
+ }
+ }
+ return result;
+}
+
+AllocationResult HeapAllocator::AllocateRawWithRetryOrFailSlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment) {
+ AllocationResult result =
+ AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
+ if (!result.IsFailure()) return result;
+
+ heap_->isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ if (IsSharedAllocationType(allocation)) {
+ heap_->CollectSharedGarbage(GarbageCollectionReason::kLastResort);
+
+ // We need always_allocate() to be true both on the client- and
+ // server-isolate. It is used in both code paths.
+ AlwaysAllocateScope shared_scope(
+ heap_->isolate()->shared_isolate()->heap());
+ AlwaysAllocateScope client_scope(heap_);
+ result = AllocateRaw(size, allocation, origin, alignment);
+ } else {
+ heap_->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+
+ AlwaysAllocateScope scope(heap_);
+ result = AllocateRaw(size, allocation, origin, alignment);
+ }
+
+ if (!result.IsFailure()) {
+ return result;
+ }
+
+ v8::internal::V8::FatalProcessOutOfMemory(heap_->isolate(),
+ "CALL_AND_RETRY_LAST", true);
+}
+
+#ifdef DEBUG
+
+void HeapAllocator::IncrementObjectCounters() {
+ heap_->isolate()->counters()->objs_since_last_full()->Increment();
+ heap_->isolate()->counters()->objs_since_last_young()->Increment();
+}
+
+#endif // DEBUG
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+
+void HeapAllocator::SetAllocationTimeout(int allocation_timeout) {
+ allocation_timeout_ = allocation_timeout;
+}
+
+void HeapAllocator::UpdateAllocationTimeout() {
+ if (FLAG_random_gc_interval > 0) {
+ const int new_timeout = allocation_timeout_ <= 0
+ ? heap_->isolate()->fuzzer_rng()->NextInt(
+ FLAG_random_gc_interval + 1)
+ : allocation_timeout_;
+ // Reset the allocation timeout, but make sure to allow at least a few
+ // allocations after a collection. The reason for this is that we have a lot
+ // of allocation sequences and we assume that a garbage collection will
+ // allow the subsequent allocation attempts to go through.
+ constexpr int kFewAllocationsHeadroom = 6;
+ allocation_timeout_ = std::max(kFewAllocationsHeadroom, new_timeout);
+ } else if (FLAG_gc_interval >= 0) {
+ allocation_timeout_ = FLAG_gc_interval;
+ }
+}
+
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/heap-allocator.h b/deps/v8/src/heap/heap-allocator.h
new file mode 100644
index 0000000000..9de82295f2
--- /dev/null
+++ b/deps/v8/src/heap/heap-allocator.h
@@ -0,0 +1,119 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_ALLOCATOR_H_
+#define V8_HEAP_HEAP_ALLOCATOR_H_
+
+#include "include/v8config.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/allocation-result.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeLargeObjectSpace;
+class ConcurrentAllocator;
+class Heap;
+class NewSpace;
+class NewLargeObjectSpace;
+class OldLargeObjectSpace;
+class PagedSpace;
+class ReadOnlySpace;
+class Space;
+
+// Allocator for the main thread. All exposed functions internally call the
+// right bottleneck.
+class V8_EXPORT_PRIVATE HeapAllocator final {
+ public:
+ explicit HeapAllocator(Heap*);
+
+ void Setup();
+ void SetReadOnlySpace(ReadOnlySpace*);
+
+ // Supports all `AllocationType` types.
+ //
+ // Returns a failed result on an unsuccessful allocation attempt.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ // Supports all `AllocationType` types. Use when type is statically known.
+ //
+ // Returns a failed result on an unsuccessful allocation attempt.
+ template <AllocationType type>
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ // Supports only `AllocationType::kYoung` and `AllocationType::kOld`.
+ //
+ // Returns a failed result on an unsuccessful allocation attempt.
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawData(int size_in_bytes, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ enum AllocationRetryMode { kLightRetry, kRetryOrFail };
+
+ // Supports all `AllocationType` types and allows specifying retry handling.
+ template <AllocationRetryMode mode>
+ V8_WARN_UNUSED_RESULT V8_INLINE HeapObject
+ AllocateRawWith(int size, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
+
+ V8_INLINE bool CanAllocateInReadOnlySpace() const;
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ void UpdateAllocationTimeout();
+ void SetAllocationTimeout(int allocation_timeout);
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+ private:
+ V8_INLINE PagedSpace* code_space() const;
+ V8_INLINE CodeLargeObjectSpace* code_lo_space() const;
+ V8_INLINE PagedSpace* map_space() const;
+ V8_INLINE NewSpace* new_space() const;
+ V8_INLINE NewLargeObjectSpace* new_lo_space() const;
+ V8_INLINE OldLargeObjectSpace* lo_space() const;
+ V8_INLINE PagedSpace* old_space() const;
+ V8_INLINE ReadOnlySpace* read_only_space() const;
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRawLargeInternal(
+ int size_in_bytes, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment);
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRawWithRetryOrFailSlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment);
+
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRawWithLightRetrySlowPath(
+ int size, AllocationType allocation, AllocationOrigin origin,
+ AllocationAlignment alignment);
+
+#ifdef DEBUG
+ void IncrementObjectCounters();
+#endif // DEBUG
+
+ Heap* const heap_;
+ Space* spaces_[LAST_SPACE + 1];
+ ReadOnlySpace* read_only_space_;
+
+ ConcurrentAllocator* shared_old_allocator_;
+ ConcurrentAllocator* shared_map_allocator_;
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ // If the --gc-interval flag is set to a positive value, this variable
+ // holds the value indicating the number of allocations remain until the
+ // next failure and garbage collection.
+ int allocation_timeout_ = 0;
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_HEAP_ALLOCATOR_H_
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 68abf816b0..d14ba247ca 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -20,6 +20,8 @@
#include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/concurrent-allocator.h"
+#include "src/heap/heap-allocator-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
@@ -66,26 +68,6 @@ T ForwardingAddress(T heap_obj) {
}
}
-AllocationSpace AllocationResult::RetrySpace() {
- DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::ToInt(object_));
-}
-
-HeapObject AllocationResult::ToObjectChecked() {
- CHECK(!IsRetry());
- return HeapObject::cast(object_);
-}
-
-HeapObject AllocationResult::ToObject() {
- DCHECK(!IsRetry());
- return HeapObject::cast(object_);
-}
-
-Address AllocationResult::ToAddress() {
- DCHECK(!IsRetry());
- return HeapObject::cast(object_).address();
-}
-
// static
base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
if (isolate->disable_bytecode_flushing()) {
@@ -120,6 +102,16 @@ int64_t Heap::update_external_memory(int64_t delta) {
return external_memory_.Update(delta);
}
+PagedSpace* Heap::space_for_maps() {
+ return V8_LIKELY(map_space_) ? static_cast<PagedSpace*>(map_space_)
+ : static_cast<PagedSpace*>(old_space_);
+}
+
+ConcurrentAllocator* Heap::concurrent_allocator_for_maps() {
+ return V8_LIKELY(shared_map_allocator_) ? shared_map_allocator_.get()
+ : shared_old_allocator_.get();
+}
+
RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
#define ROOT_ACCESSOR(Type, name, CamelName) \
@@ -191,6 +183,10 @@ inline const base::AddressRegion& Heap::code_region() {
#endif
}
+Address Heap::code_range_base() {
+ return code_range_ ? code_range_->base() : kNullAddress;
+}
+
int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
(allocation == AllocationType::kCode)) {
@@ -204,208 +200,18 @@ int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
- DCHECK(AllowHandleAllocation::IsAllowed());
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
- alignment == AllocationAlignment::kTaggedAligned);
- DCHECK_EQ(gc_state(), NOT_IN_GC);
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
- if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
- AllocationSpace space = FLAG_single_generation ? OLD_SPACE : NEW_SPACE;
- return AllocationResult::Retry(space);
- }
- }
-#endif
-#ifdef DEBUG
- IncrementObjectCounters();
-#endif
-
- if (CanSafepoint()) {
- main_thread_local_heap()->Safepoint();
- }
-
- size_t large_object_threshold = MaxRegularHeapObjectSize(type);
- bool large_object =
- static_cast<size_t>(size_in_bytes) > large_object_threshold;
-
- HeapObject object;
- AllocationResult allocation;
-
- if (FLAG_single_generation && type == AllocationType::kYoung) {
- type = AllocationType::kOld;
- }
-
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- allocation = tp_heap_->Allocate(size_in_bytes, type, alignment);
- } else {
- if (AllocationType::kYoung == type) {
- if (large_object) {
- if (FLAG_young_generation_large_objects) {
- allocation = new_lo_space_->AllocateRaw(size_in_bytes);
- } else {
- // If young generation large objects are disalbed we have to tenure
- // the allocation and violate the given allocation type. This could be
- // dangerous. We may want to remove
- // FLAG_young_generation_large_objects and avoid patching.
- allocation = lo_space_->AllocateRaw(size_in_bytes);
- }
- } else {
- allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
- }
- } else if (AllocationType::kOld == type) {
- if (large_object) {
- allocation = lo_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
- }
- } else if (AllocationType::kCode == type) {
- DCHECK(AllowCodeAllocation::IsAllowed());
- if (large_object) {
- allocation = code_lo_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
- }
- } else if (AllocationType::kMap == type) {
- allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
- } else if (AllocationType::kReadOnly == type) {
- DCHECK(!large_object);
- DCHECK(CanAllocateInReadOnlySpace());
- DCHECK_EQ(AllocationOrigin::kRuntime, origin);
- allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
- } else if (AllocationType::kSharedOld == type) {
- allocation =
- shared_old_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
- } else if (AllocationType::kSharedMap == type) {
- allocation =
- shared_map_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
- } else {
- UNREACHABLE();
- }
- }
-
- if (allocation.To(&object)) {
- if (AllocationType::kCode == type && !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- // Unprotect the memory chunk of the object if it was not unprotected
- // already.
- UnprotectAndRegisterMemoryChunk(object,
- UnprotectMemoryOrigin::kMainThread);
- ZapCodeObject(object.address(), size_in_bytes);
- if (!large_object) {
- MemoryChunk::FromHeapObject(object)
- ->GetCodeObjectRegistry()
- ->RegisterNewlyAllocatedCodeObject(object.address());
- }
- }
-
-#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- if (AllocationType::kReadOnly != type) {
- DCHECK_TAG_ALIGNED(object.address());
- Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
- object.address());
- }
-#endif
-
- OnAllocationEvent(object, size_in_bytes);
- }
-
- return allocation;
-}
-
-template <Heap::AllocationRetryMode mode>
-HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
- AllocationOrigin origin,
- AllocationAlignment alignment) {
- DCHECK(AllowHandleAllocation::IsAllowed());
- DCHECK(AllowHeapAllocation::IsAllowed());
- DCHECK_EQ(gc_state(), NOT_IN_GC);
- Heap* heap = isolate()->heap();
- if (allocation == AllocationType::kYoung &&
- alignment == AllocationAlignment::kTaggedAligned &&
- size <= MaxRegularHeapObjectSize(allocation) &&
- V8_LIKELY(!FLAG_single_generation && FLAG_inline_new &&
- FLAG_gc_interval == -1)) {
- Address* top = heap->NewSpaceAllocationTopAddress();
- Address* limit = heap->NewSpaceAllocationLimitAddress();
- if (*limit - *top >= static_cast<unsigned>(size)) {
- DCHECK(IsAligned(size, kTaggedSize));
- HeapObject obj = HeapObject::FromAddress(*top);
- *top += size;
- heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size);
- return obj;
- }
- }
- switch (mode) {
- case kLightRetry:
- return AllocateRawWithLightRetrySlowPath(size, allocation, origin,
- alignment);
- case kRetryOrFail:
- return AllocateRawWithRetryOrFailSlowPath(size, allocation, origin,
- alignment);
- }
- UNREACHABLE();
+ return heap_allocator_.AllocateRaw(size_in_bytes, type, origin, alignment);
}
Address Heap::AllocateRawOrFail(int size, AllocationType allocation,
AllocationOrigin origin,
AllocationAlignment alignment) {
- return AllocateRawWith<kRetryOrFail>(size, allocation, origin, alignment)
+ return heap_allocator_
+ .AllocateRawWith<HeapAllocator::kRetryOrFail>(size, allocation, origin,
+ alignment)
.address();
}
-void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
- for (auto& tracker : allocation_trackers_) {
- tracker->AllocationEvent(object.address(), size_in_bytes);
- }
-
- if (FLAG_verify_predictable) {
- ++allocations_count_;
- // Advance synthetic time by making a time request.
- MonotonicallyIncreasingTimeInMs();
-
- UpdateAllocationsHash(object);
- UpdateAllocationsHash(size_in_bytes);
-
- if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAllocationsHash();
- }
- } else if (FLAG_fuzzer_gc_analysis) {
- ++allocations_count_;
- } else if (FLAG_trace_allocation_stack_interval > 0) {
- ++allocations_count_;
- if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
- isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
- }
- }
-}
-
-bool Heap::CanAllocateInReadOnlySpace() {
- return read_only_space()->writable();
-}
-
-void Heap::UpdateAllocationsHash(HeapObject object) {
- Address object_address = object.address();
- MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
- AllocationSpace allocation_space = memory_chunk->owner_identity();
-
- STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
- uint32_t value =
- static_cast<uint32_t>(object_address - memory_chunk->address()) |
- (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
-
- UpdateAllocationsHash(value);
-}
-
-void Heap::UpdateAllocationsHash(uint32_t value) {
- uint16_t c1 = static_cast<uint16_t>(value);
- uint16_t c2 = static_cast<uint16_t>(value >> 16);
- raw_allocations_hash_ =
- StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
- raw_allocations_hash_ =
- StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
-}
-
void Heap::RegisterExternalString(String string) {
DCHECK(string.IsExternalString());
DCHECK(!string.IsThinString());
diff --git a/deps/v8/src/heap/heap-layout-tracer.cc b/deps/v8/src/heap/heap-layout-tracer.cc
index 53ac5726a7..0e984ce761 100644
--- a/deps/v8/src/heap/heap-layout-tracer.cc
+++ b/deps/v8/src/heap/heap-layout-tracer.cc
@@ -20,7 +20,8 @@ void HeapLayoutTracer::GCProloguePrintHeapLayout(v8::Isolate* isolate,
v8::GCCallbackFlags flags,
void* data) {
Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- PrintF("Before GC:%d,", heap->gc_count());
+ // gc_count_ will increase after this callback, manually add 1.
+ PrintF("Before GC:%d,", heap->gc_count() + 1);
PrintF("collector_name:%s\n", Heap::CollectorName(gc_type));
PrintHeapLayout(std::cout, heap);
}
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index a1b03256af..4d48679dfa 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -45,10 +45,10 @@ struct MemoryChunk {
static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset = kSizetSize + kUIntptrSize;
static constexpr uintptr_t kIsExecutableBit = uintptr_t{1} << 0;
- static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
+ static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 17;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
- static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
+ static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 20;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
@@ -283,6 +283,19 @@ void WriteBarrier::MarkingFromInternalFields(JSObject host) {
MarkingSlowFromInternalFields(*heap, host);
}
+#ifdef ENABLE_SLOW_DCHECKS
+// static
+template <typename T>
+bool WriteBarrier::IsRequired(HeapObject host, T value) {
+ if (BasicMemoryChunk::FromHeapObject(host)->InYoungGeneration()) return false;
+ if (value.IsSmi()) return false;
+ if (value.IsCleared()) return false;
+ HeapObject target = value.GetHeapObject();
+ if (ReadOnlyHeap::Contains(target)) return false;
+ return !IsImmortalImmovableHeapObject(target);
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-write-barrier.cc b/deps/v8/src/heap/heap-write-barrier.cc
index dce052f00e..461af50f98 100644
--- a/deps/v8/src/heap/heap-write-barrier.cc
+++ b/deps/v8/src/heap/heap-write-barrier.cc
@@ -7,6 +7,7 @@
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/marking-barrier.h"
+#include "src/objects/code-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/maybe-object.h"
@@ -49,13 +50,10 @@ void WriteBarrier::MarkingSlowFromGlobalHandle(Heap* heap, HeapObject value) {
// static
void WriteBarrier::MarkingSlowFromInternalFields(Heap* heap, JSObject host) {
- // We are not checking the mark bits of host here as (a) there's no
- // synchronization with the marker and (b) we are writing into a live object
- // (independent of the mark bits).
- if (!heap->local_embedder_heap_tracer()->InUse()) return;
- LocalEmbedderHeapTracer::ProcessingScope scope(
- heap->local_embedder_heap_tracer());
- scope.TracePossibleWrapper(host);
+ auto* local_embedder_heap_tracer = heap->local_embedder_heap_tracer();
+ if (!local_embedder_heap_tracer->InUse()) return;
+
+ local_embedder_heap_tracer->EmbedderWriteBarrier(heap, host);
}
void WriteBarrier::MarkingSlow(Heap* heap, Code host, RelocInfo* reloc_info,
@@ -99,5 +97,21 @@ int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
return 0;
}
+#ifdef ENABLE_SLOW_DCHECKS
+bool WriteBarrier::IsImmortalImmovableHeapObject(HeapObject object) {
+ BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
+ // All objects in readonly space are immortal and immovable.
+ if (basic_chunk->InReadOnlySpace()) return true;
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ // There are also objects in "regular" spaces which are immortal and
+ // immovable. Objects on a page that can get compacted are movable and can be
+ // filtered out.
+ if (!chunk->IsFlagSet(MemoryChunk::NEVER_EVACUATE)) return false;
+ // Now we know the object is immovable, check whether it is also immortal.
+ // Builtins are roots and therefore always kept alive by the GC.
+ return object.IsCode() && Code::cast(object).is_builtin();
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index b221fae2ed..9e2cf8652a 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -65,6 +65,12 @@ class V8_EXPORT_PRIVATE WriteBarrier {
static MarkingBarrier* CurrentMarkingBarrier(Heap* heap);
+#ifdef ENABLE_SLOW_DCHECKS
+ template <typename T>
+ static inline bool IsRequired(HeapObject host, T value);
+ static bool IsImmortalImmovableHeapObject(HeapObject object);
+#endif
+
private:
static inline base::Optional<Heap*> GetHeapIfMarking(HeapObject object);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 5f80f2fd4f..81255e531c 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -30,7 +30,6 @@
#include "src/execution/embedder-state.h"
#include "src/execution/isolate-utils-inl.h"
#include "src/execution/microtask-queue.h"
-#include "src/execution/runtime-profiler.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles-inl.h"
@@ -88,6 +87,7 @@
#include "src/objects/feedback-vector.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
@@ -114,14 +114,6 @@
namespace v8 {
namespace internal {
-namespace {
-std::atomic<CollectionEpoch> global_epoch{0};
-
-CollectionEpoch next_epoch() {
- return global_epoch.fetch_add(1, std::memory_order_relaxed) + 1;
-}
-} // namespace
-
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
Isolate* Heap::GetIsolateFromWritableObject(HeapObject object) {
return reinterpret_cast<Isolate*>(
@@ -212,6 +204,7 @@ class ScavengeTaskObserver : public AllocationObserver {
Heap::Heap()
: isolate_(isolate()),
+ heap_allocator_(this),
memory_pressure_level_(MemoryPressureLevel::kNone),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
safepoint_(std::make_unique<IsolateSafepoint>(this)),
@@ -488,6 +481,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return GarbageCollector::MARK_COMPACTOR;
}
+ DCHECK(!FLAG_single_generation);
+ DCHECK(!FLAG_gc_global);
// Default
*reason = nullptr;
return YoungGenerationCollector();
@@ -540,12 +535,14 @@ void Heap::PrintShortHeapStatistics() {
", committed: %6zu KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_,
- "Map space, used: %6zu KB"
- ", available: %6zu KB"
- ", committed: %6zu KB\n",
- map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
- map_space_->CommittedMemory() / KB);
+ if (map_space()) {
+ PrintIsolate(isolate_,
+ "Map space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
+ map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
+ map_space_->CommittedMemory() / KB);
+ }
PrintIsolate(isolate_,
"Large object space, used: %6zu KB"
", available: %6zu KB"
@@ -723,6 +720,106 @@ void Heap::ReportStatisticsAfterGC() {
}
}
+class Heap::AllocationTrackerForDebugging final
+ : public HeapObjectAllocationTracker {
+ public:
+ static bool IsNeeded() {
+ return FLAG_verify_predictable || FLAG_fuzzer_gc_analysis ||
+ (FLAG_trace_allocation_stack_interval > 0);
+ }
+
+ explicit AllocationTrackerForDebugging(Heap* heap) : heap_(heap) {
+ CHECK(IsNeeded());
+ heap_->AddHeapObjectAllocationTracker(this);
+ }
+
+ ~AllocationTrackerForDebugging() final {
+ heap_->RemoveHeapObjectAllocationTracker(this);
+ if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
+ PrintAllocationsHash();
+ }
+ }
+
+ void AllocationEvent(Address addr, int size) final {
+ if (FLAG_verify_predictable) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ // Advance synthetic time by making a time request.
+ heap_->MonotonicallyIncreasingTimeInMs();
+
+ UpdateAllocationsHash(HeapObject::FromAddress(addr));
+ UpdateAllocationsHash(size);
+
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
+ PrintAllocationsHash();
+ }
+ } else if (FLAG_fuzzer_gc_analysis) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ } else if (FLAG_trace_allocation_stack_interval > 0) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
+ heap_->isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
+ }
+ }
+ }
+
+ void MoveEvent(Address source, Address target, int size) final {
+ if (FLAG_verify_predictable) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ // Advance synthetic time by making a time request.
+ heap_->MonotonicallyIncreasingTimeInMs();
+
+ UpdateAllocationsHash(HeapObject::FromAddress(source));
+ UpdateAllocationsHash(HeapObject::FromAddress(target));
+ UpdateAllocationsHash(size);
+
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
+ PrintAllocationsHash();
+ }
+ } else if (FLAG_fuzzer_gc_analysis) {
+ allocations_count_.fetch_add(1, std::memory_order_relaxed);
+ }
+ }
+
+ void UpdateObjectSizeEvent(Address, int) final {}
+
+ private:
+ void UpdateAllocationsHash(HeapObject object) {
+ Address object_address = object.address();
+ MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
+ AllocationSpace allocation_space = memory_chunk->owner_identity();
+
+ STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
+ uint32_t value =
+ static_cast<uint32_t>(object_address - memory_chunk->address()) |
+ (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
+
+ UpdateAllocationsHash(value);
+ }
+
+ void UpdateAllocationsHash(uint32_t value) {
+ const uint16_t c1 = static_cast<uint16_t>(value);
+ const uint16_t c2 = static_cast<uint16_t>(value >> 16);
+ raw_allocations_hash_ =
+ StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
+ raw_allocations_hash_ =
+ StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
+ }
+
+ void PrintAllocationsHash() {
+ uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+ PrintF("\n### Allocations = %zu, hash = 0x%08x\n",
+ allocations_count_.load(std::memory_order_relaxed), hash);
+ }
+
+ Heap* const heap_;
+ // Count of all allocations performed through C++ bottlenecks. This needs to
+ // be atomic as objects are moved in parallel in the GC which counts as
+ // allocations.
+ std::atomic<size_t> allocations_count_{0};
+ // Running hash over allocations performed.
+ uint32_t raw_allocations_hash_ = 0;
+};
+
void Heap::AddHeapObjectAllocationTracker(
HeapObjectAllocationTracker* tracker) {
if (allocation_trackers_.empty() && FLAG_inline_new) {
@@ -913,9 +1010,28 @@ void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
-void Heap::GarbageCollectionPrologue() {
+void Heap::GarbageCollectionPrologue(
+ GarbageCollectionReason gc_reason,
+ const v8::GCCallbackFlags gc_callback_flags) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
+ is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
+ current_gc_flags_ & kForcedGC ||
+ force_gc_on_next_allocation_;
+ is_current_gc_for_heap_profiler_ =
+ gc_reason == GarbageCollectionReason::kHeapProfiler;
+ if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
+
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+ heap_allocator_.UpdateAllocationTimeout();
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+ // There may be an allocation memento behind objects in new space. Upon
+ // evacuation of a non-full new space (or if we are on the last page) there
+ // may be uninitialized memory behind top. We fill the remainder of the page
+ // with a filler.
+ if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
+
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
@@ -1289,7 +1405,11 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+
+ if (map_space()) {
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+ }
+
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
@@ -1358,8 +1478,10 @@ void Heap::GarbageCollectionEpilogue(GarbageCollector collector) {
static_cast<int>(CommittedMemory() / KB));
isolate_->counters()->heap_sample_total_used()->AddSample(
static_cast<int>(SizeOfObjects() / KB));
- isolate_->counters()->heap_sample_map_space_committed()->AddSample(
- static_cast<int>(map_space()->CommittedMemory() / KB));
+ if (map_space()) {
+ isolate_->counters()->heap_sample_map_space_committed()->AddSample(
+ static_cast<int>(map_space()->CommittedMemory() / KB));
+ }
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
@@ -1388,7 +1510,7 @@ class V8_NODISCARD GCCallbacksScope {
};
void Heap::HandleGCRequest() {
- if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
+ if (IsStressingScavenge() && stress_scavenge_observer_->HasRequestedGC()) {
CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
stress_scavenge_observer_->RequestedGCDone();
} else if (HighMemoryPressure()) {
@@ -1417,51 +1539,6 @@ void Heap::ScheduleScavengeTaskIfNeeded() {
scavenge_job_->ScheduleTaskIfNeeded(this);
}
-TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
- if (IsYoungGenerationCollector(collector)) {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_scavenger_background();
- }
- return isolate_->counters()->gc_scavenger_foreground();
- } else {
- if (!incremental_marking()->IsStopped()) {
- if (ShouldReduceMemory()) {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_finalize_reduce_memory_background();
- }
- return isolate_->counters()->gc_finalize_reduce_memory_foreground();
- } else {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_finalize_background();
- }
- return isolate_->counters()->gc_finalize_foreground();
- }
- } else {
- if (isolate_->IsIsolateInBackground()) {
- return isolate_->counters()->gc_compactor_background();
- }
- return isolate_->counters()->gc_compactor_foreground();
- }
- }
-}
-
-TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
- if (IsYoungGenerationCollector(collector)) {
- return isolate_->counters()->gc_scavenger();
- }
- if (incremental_marking()->IsStopped()) {
- return isolate_->counters()->gc_compactor();
- }
- if (ShouldReduceMemory()) {
- return isolate_->counters()->gc_finalize_reduce_memory();
- }
- if (incremental_marking()->IsMarking() &&
- incremental_marking()->local_marking_worklists()->IsPerContextMode()) {
- return isolate_->counters()->gc_finalize_measure_memory();
- }
- return isolate_->counters()->gc_finalize();
-}
-
void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
@@ -1662,6 +1739,19 @@ Heap::DevToolsTraceEventScope::~DevToolsTraceEventScope() {
heap_->SizeOfObjects());
}
+static GCType GetGCTypeFromGarbageCollector(GarbageCollector collector) {
+ switch (collector) {
+ case GarbageCollector::MARK_COMPACTOR:
+ return kGCTypeMarkSweepCompact;
+ case GarbageCollector::SCAVENGER:
+ return kGCTypeScavenge;
+ case GarbageCollector::MINOR_MARK_COMPACTOR:
+ return kGCTypeMinorMarkCompact;
+ default:
+ UNREACHABLE();
+ }
+}
+
bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
@@ -1673,62 +1763,64 @@ bool Heap::CollectGarbage(AllocationSpace space,
CHECK(always_allocate());
FatalProcessOutOfMemory("GC during deserialization");
}
- const char* collector_reason = nullptr;
- GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
- current_gc_flags_ & kForcedGC ||
- force_gc_on_next_allocation_;
- is_current_gc_for_heap_profiler_ =
- gc_reason == GarbageCollectionReason::kHeapProfiler;
- if (force_gc_on_next_allocation_) force_gc_on_next_allocation_ = false;
- DevToolsTraceEventScope devtools_trace_event_scope(
- this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
- GarbageCollectionReasonToString(gc_reason));
+ // CollectGarbage consists of three parts:
+ // 1. The prologue part which may execute callbacks. These callbacks may
+ // allocate and trigger another garbage collection.
+ // 2. The main garbage collection phase.
+ // 3. The epilogue part which may execute callbacks. These callbacks may
+ // allocate and trigger another garbage collection
+
+ // Part 1: Invoke all callbacks which should happen before the actual garbage
+ // collection is triggered. Note that these callbacks may trigger another
+ // garbage collection since they may allocate.
- // Filter on-stack reference below this method.
- isolate()
- ->global_handles()
- ->CleanupOnStackReferencesBelowCurrentStackPosition();
+ DCHECK(AllowGarbageCollection::IsAllowed());
// Ensure that all pending phantom callbacks are invoked.
isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
- // The VM is in the GC state until exiting this function.
- VMState<GC> state(isolate());
-
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- // Reset the allocation timeout, but make sure to allow at least a few
- // allocations after a collection. The reason for this is that we have a lot
- // of allocation sequences and we assume that a garbage collection will allow
- // the subsequent allocation attempts to go through.
- if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
- allocation_timeout_ =
- std::max(6, NextAllocationTimeout(allocation_timeout_));
- }
-#endif
-
- // There may be an allocation memento behind objects in new space. Upon
- // evacuation of a non-full new space (or if we are on the last page) there
- // may be uninitialized memory behind top. We fill the remainder of the page
- // with a filler.
- if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
+ const char* collector_reason = nullptr;
+ GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+ GCType gc_type = GetGCTypeFromGarbageCollector(collector);
- if (IsYoungGenerationCollector(collector) &&
- !incremental_marking()->IsStopped()) {
- if (FLAG_trace_incremental_marking) {
- isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Scavenge during marking.\n");
+ {
+ GCCallbacksScope scope(this);
+ // Temporary override any embedder stack state as callbacks may create
+ // their own state on the stack and recursively trigger GC.
+ EmbedderStackStateScope embedder_scope(
+ this, EmbedderStackStateScope::kExplicitInvocation,
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ if (scope.CheckReenter()) {
+ AllowGarbageCollection allow_gc;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
+ VMState<EXTERNAL> callback_state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
}
- size_t freed_global_handles = 0;
-
- size_t committed_memory_before = 0;
+ // Part 2: The main garbage collection phase.
+ DisallowGarbageCollection no_gc_during_gc;
- if (collector == GarbageCollector::MARK_COMPACTOR) {
- committed_memory_before = CommittedOldGenerationMemory();
- if (cpp_heap()) {
+ size_t freed_global_handles = 0;
+ size_t committed_memory_before = collector == GarbageCollector::MARK_COMPACTOR
+ ? CommittedOldGenerationMemory()
+ : 0;
+ {
+ tracer()->StartObservablePause();
+ VMState<GC> state(isolate());
+ DevToolsTraceEventScope devtools_trace_event_scope(
+ this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
+ GarbageCollectionReasonToString(gc_reason));
+
+ // Filter on-stack reference below this method.
+ isolate()
+ ->global_handles()
+ ->CleanupOnStackReferencesBelowCurrentStackPosition();
+
+ if (collector == GarbageCollector::MARK_COMPACTOR && cpp_heap()) {
// CppHeap needs a stack marker at the top of all entry points to allow
// deterministic passes over the stack. E.g., a verifier that should only
// find a subset of references of the marker.
@@ -1738,69 +1830,32 @@ bool Heap::CollectGarbage(AllocationSpace space,
static_cast<v8::internal::CppHeap*>(cpp_heap())
->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
}
- }
-
- {
- tracer()->Start(collector, gc_reason, collector_reason);
- DCHECK(AllowGarbageCollection::IsAllowed());
- DisallowGarbageCollection no_gc_during_gc;
- GarbageCollectionPrologue();
+ GarbageCollectionPrologue(gc_reason, gc_callback_flags);
{
- TimedHistogram* gc_type_timer = GCTypeTimer(collector);
- TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
- TRACE_EVENT0("v8", gc_type_timer->name());
-
- TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
- OptionalTimedHistogramScopeMode mode =
- isolate_->IsMemorySavingsModeActive()
- ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
- : OptionalTimedHistogramScopeMode::TAKE_TIME;
- OptionalTimedHistogramScope histogram_timer_priority_scope(
- gc_type_priority_timer, isolate_, mode);
-
- if (!IsYoungGenerationCollector(collector)) {
- PROFILE(isolate_, CodeMovingGCEvent());
- }
-
- GCType gc_type;
-
- switch (collector) {
- case GarbageCollector::MARK_COMPACTOR:
- gc_type = kGCTypeMarkSweepCompact;
- break;
- case GarbageCollector::SCAVENGER:
- gc_type = kGCTypeScavenge;
- break;
- case GarbageCollector::MINOR_MARK_COMPACTOR:
- gc_type = kGCTypeMinorMarkCompact;
- break;
- default:
- UNREACHABLE();
+ GCTracer::RecordGCPhasesInfo record_gc_phases_info(this, collector);
+ base::Optional<TimedHistogramScope> histogram_timer_scope;
+ base::Optional<OptionalTimedHistogramScope>
+ histogram_timer_priority_scope;
+ if (record_gc_phases_info.type_timer) {
+ histogram_timer_scope.emplace(record_gc_phases_info.type_timer,
+ isolate_);
+ TRACE_EVENT0("v8", record_gc_phases_info.type_timer->name());
}
-
- {
- GCCallbacksScope scope(this);
- // Temporary override any embedder stack state as callbacks may create
- // their own state on the stack and recursively trigger GC.
- EmbedderStackStateScope embedder_scope(
- local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> callback_state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
- }
+ if (record_gc_phases_info.type_priority_timer) {
+ OptionalTimedHistogramScopeMode mode =
+ isolate_->IsMemorySavingsModeActive()
+ ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
+ : OptionalTimedHistogramScopeMode::TAKE_TIME;
+ histogram_timer_priority_scope.emplace(
+ record_gc_phases_info.type_priority_timer, isolate_, mode);
}
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
tp_heap_->CollectGarbage();
} else {
- freed_global_handles +=
- PerformGarbageCollection(collector, gc_callback_flags);
+ freed_global_handles += PerformGarbageCollection(
+ collector, gc_reason, collector_reason, gc_callback_flags);
}
// Clear flags describing the current GC now that the current GC is
// complete. Do this before GarbageCollectionEpilogue() since that could
@@ -1808,33 +1863,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
is_current_gc_forced_ = false;
is_current_gc_for_heap_profiler_ = false;
- {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
- gc_post_processing_depth_++;
- {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- freed_global_handles +=
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, gc_callback_flags);
- }
- gc_post_processing_depth_--;
- }
-
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowGarbageCollection allow_gc;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> callback_state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
- }
- }
if (collector == GarbageCollector::MARK_COMPACTOR ||
collector == GarbageCollector::SCAVENGER) {
- tracer()->RecordGCPhasesHistograms(gc_type_timer);
+ tracer()->RecordGCPhasesHistograms(record_gc_phases_info.mode);
}
}
@@ -1870,7 +1901,47 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
}
- tracer()->Stop(collector);
+ tracer()->StopAtomicPause();
+ tracer()->StopObservablePause();
+ tracer()->UpdateStatistics(collector);
+ // Young generation cycles finish atomically. It is important that
+ // StopObservablePause, UpdateStatistics and StopCycle are called in this
+ // order; the latter may replace the current event with that of an
+ // interrupted full cycle.
+ if (IsYoungGenerationCollector(collector)) {
+ tracer()->StopCycle(collector);
+ } else {
+ tracer()->StopCycleIfNeeded();
+ }
+ }
+
+ // Part 3: Invoke all callbacks which should happen after the actual garbage
+ // collection is triggered. Note that these callbacks may trigger another
+ // garbage collection since they may allocate.
+
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ gc_post_processing_depth_++;
+ {
+ AllowGarbageCollection allow_gc;
+ AllowJavascriptExecution allow_js(isolate());
+ freed_global_handles +=
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, gc_callback_flags);
+ }
+ gc_post_processing_depth_--;
+ }
+
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowGarbageCollection allow_gc;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
+ VMState<EXTERNAL> callback_state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ }
}
if (collector == GarbageCollector::MARK_COMPACTOR &&
@@ -1924,9 +1995,6 @@ void Heap::StartIncrementalMarking(int gc_flags,
// Sweeping needs to be completed such that markbits are all cleared before
// starting marking again.
CompleteSweepingFull();
- if (cpp_heap()) {
- CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
- }
base::Optional<SafepointScope> safepoint_scope;
@@ -1940,9 +2008,9 @@ void Heap::StartIncrementalMarking(int gc_flags,
VerifyCountersAfterSweeping();
#endif
- // Now that sweeping is completed, we can update the current epoch for the new
- // full collection.
- UpdateEpochFull();
+ // Now that sweeping is completed, we can start the next full GC cycle.
+ tracer()->StartCycle(GarbageCollector::MARK_COMPACTOR, gc_reason, nullptr,
+ GCTracer::MarkingType::kIncremental);
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
@@ -1951,8 +2019,13 @@ void Heap::StartIncrementalMarking(int gc_flags,
void Heap::CompleteSweepingFull() {
array_buffer_sweeper()->EnsureFinished();
- mark_compact_collector()->EnsureSweepingCompleted();
+ mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kUnifiedHeap);
+
DCHECK(!mark_compact_collector()->sweeping_in_progress());
+ DCHECK_IMPLIES(cpp_heap(),
+ !CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
+ DCHECK(!tracer()->IsSweepingInProgress());
}
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
@@ -2148,23 +2221,41 @@ GCTracer::Scope::ScopeId CollectorScopeId(GarbageCollector collector) {
} // namespace
size_t Heap::PerformGarbageCollection(
- GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
+ GarbageCollector collector, GarbageCollectionReason gc_reason,
+ const char* collector_reason, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ // We don't really perform a GC here but need this scope for the nested
+ // SafepointScope inside Verify().
+ AllowGarbageCollection allow_gc;
+ Verify();
+ }
+#endif // VERIFY_HEAP
+
if (IsYoungGenerationCollector(collector)) {
CompleteSweepingYoung(collector);
+ tracer()->StartCycle(collector, gc_reason, collector_reason,
+ GCTracer::MarkingType::kAtomic);
} else {
DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
CompleteSweepingFull();
- if (cpp_heap()) {
- CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
+ // If incremental marking has been activated, the full GC cycle has already
+ // started, so don't start a new one.
+ if (!incremental_marking_->WasActivated()) {
+ tracer()->StartCycle(collector, gc_reason, collector_reason,
+ GCTracer::MarkingType::kAtomic);
}
}
- // The last GC cycle is done after completing sweeping. Start the next GC
- // cycle.
- UpdateCurrentEpoch(collector);
+ tracer()->StartAtomicPause();
+ if (!Heap::IsYoungGenerationCollector(collector) &&
+ incremental_marking_->WasActivated()) {
+ tracer()->UpdateCurrentEvent(gc_reason, collector_reason);
+ }
+ DCHECK(tracer()->IsConsistentWithCollector(collector));
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
base::Optional<SafepointScope> safepoint_scope;
@@ -2177,15 +2268,6 @@ size_t Heap::PerformGarbageCollection(
collection_barrier_->StopTimeToCollectionTimer();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- // We don't really perform a GC here but need this scope for the nested
- // SafepointScope inside Verify().
- AllowGarbageCollection allow_gc;
- Verify();
- }
-#endif
-
tracer()->StartInSafepoint();
GarbageCollectionPrologueInSafepoint();
@@ -2250,6 +2332,10 @@ size_t Heap::PerformGarbageCollection(
local_embedder_heap_tracer()->TraceEpilogue();
}
+ if (collector == GarbageCollector::SCAVENGER && cpp_heap()) {
+ CppHeap::From(cpp_heap())->RunMinorGC();
+ }
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
// We don't really perform a GC here but need this scope for the nested
@@ -2257,7 +2343,7 @@ size_t Heap::PerformGarbageCollection(
AllowGarbageCollection allow_gc;
Verify();
}
-#endif
+#endif // VERIFY_HEAP
RecomputeLimits(collector);
@@ -2288,20 +2374,36 @@ void Heap::PerformSharedGarbageCollection(Isolate* initiator,
v8::Locker locker(reinterpret_cast<v8::Isolate*>(isolate()));
v8::Isolate::Scope isolate_scope(reinterpret_cast<v8::Isolate*>(isolate()));
- const char* collector_reason = nullptr;
- GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
-
- tracer()->Start(collector, gc_reason, collector_reason);
-
+ tracer()->StartObservablePause();
+ DCHECK(!incremental_marking_->WasActivated());
DCHECK_NOT_NULL(isolate()->global_safepoint());
isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
client->heap()->FreeSharedLinearAllocationAreas();
+
+ // As long as we need to iterate the client heap to find references into the
+ // shared heap, all client heaps need to be iterable.
+ client->heap()->MakeHeapIterable();
+
+ if (FLAG_concurrent_marking) {
+ client->heap()->concurrent_marking()->Pause();
+ }
});
- PerformGarbageCollection(GarbageCollector::MARK_COMPACTOR);
+ const GarbageCollector collector = GarbageCollector::MARK_COMPACTOR;
+ PerformGarbageCollection(collector, gc_reason, nullptr);
+
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ if (FLAG_concurrent_marking &&
+ client->heap()->incremental_marking()->IsMarking()) {
+ client->heap()->concurrent_marking()->RescheduleJobIfNeeded();
+ }
+ });
- tracer()->Stop(collector);
+ tracer()->StopAtomicPause();
+ tracer()->StopObservablePause();
+ tracer()->UpdateStatistics(collector);
+ tracer()->StopCycleIfNeeded();
}
void Heap::CompleteSweepingYoung(GarbageCollector collector) {
@@ -2318,8 +2420,15 @@ void Heap::CompleteSweepingYoung(GarbageCollector collector) {
UNREACHABLE();
}
- TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
- array_buffer_sweeper()->EnsureFinished();
+ {
+ TRACE_GC_EPOCH(tracer(), scope_id, ThreadKind::kMain);
+ array_buffer_sweeper()->EnsureFinished();
+ }
+
+ // If sweeping is in progress and there are no sweeper tasks running, finish
+ // the sweeping here, to avoid having to pause and resume during the young
+ // generation GC.
+ mark_compact_collector()->FinishSweepingIfOutOfWork();
}
void Heap::EnsureSweepingCompleted(HeapObject object) {
@@ -2338,16 +2447,6 @@ void Heap::EnsureSweepingCompleted(HeapObject object) {
mark_compact_collector()->EnsurePageIsSwept(page);
}
-void Heap::UpdateCurrentEpoch(GarbageCollector collector) {
- if (IsYoungGenerationCollector(collector)) {
- epoch_young_ = next_epoch();
- } else if (incremental_marking()->IsStopped()) {
- epoch_full_ = next_epoch();
- }
-}
-
-void Heap::UpdateEpochFull() { epoch_full_ = next_epoch(); }
-
void Heap::RecomputeLimits(GarbageCollector collector) {
if (!((collector == GarbageCollector::MARK_COMPACTOR) ||
(HasLowYoungGenerationAllocationRate() &&
@@ -2448,9 +2547,8 @@ void Heap::MarkCompact() {
SetGCState(MARK_COMPACT);
- LOG(isolate_, ResourceEvent("markcompact", "begin"));
-
- CodeSpaceMemoryModificationScope code_modifcation(this);
+ PROFILE(isolate_, CodeMovingGCEvent());
+ CodeSpaceMemoryModificationScope code_modification(this);
// Disable soft allocation limits in the shared heap, if one exists, as
// promotions into the shared heap should always succeed.
@@ -2470,8 +2568,6 @@ void Heap::MarkCompact() {
mark_compact_collector()->CollectGarbage();
- LOG(isolate_, ResourceEvent("markcompact", "end"));
-
MarkCompactEpilogue();
if (FLAG_allocation_site_pretenuring) {
@@ -2488,13 +2584,16 @@ void Heap::MarkCompact() {
}
void Heap::MinorMarkCompact() {
-#ifdef ENABLE_MINOR_MC
DCHECK(FLAG_minor_mc);
DCHECK(new_space());
+ if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] MinorMarkCompact during marking.\n");
+ }
+
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
- LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(this);
@@ -2507,13 +2606,9 @@ void Heap::MinorMarkCompact() {
incremental_marking());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
- minor_mark_compact_collector()->CollectGarbage();
+ minor_mark_compact_collector_->CollectGarbage();
- LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
SetGCState(NOT_IN_GC);
-#else
- UNREACHABLE();
-#endif // ENABLE_MINOR_MC
}
void Heap::MarkCompactEpilogue() {
@@ -2560,9 +2655,6 @@ void Heap::EvacuateYoungGeneration() {
mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
- SetGCState(SCAVENGE);
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
// Move pages from new->old generation.
PageRange range(new_space()->first_allocatable_address(), new_space()->top());
for (auto it = range.begin(); it != range.end();) {
@@ -2596,14 +2688,16 @@ void Heap::EvacuateYoungGeneration() {
IncrementYoungSurvivorsCounter(promoted);
IncrementPromotedObjectsSize(promoted);
IncrementSemiSpaceCopiedObjectSize(0);
-
- LOG(isolate_, ResourceEvent("scavenge", "end"));
- SetGCState(NOT_IN_GC);
}
void Heap::Scavenge() {
DCHECK_NOT_NULL(new_space());
+ if (FLAG_trace_incremental_marking && !incremental_marking()->IsStopped()) {
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Scavenge during marking.\n");
+ }
+
if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
@@ -2648,12 +2742,8 @@ void Heap::Scavenge() {
new_lo_space()->ResetPendingObject();
// Implements Cheney's copying algorithm
- LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
scavenger_collector_->CollectGarbage();
- LOG(isolate_, ResourceEvent("scavenge", "end"));
-
SetGCState(NOT_IN_GC);
}
@@ -3345,22 +3435,6 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
PROFILE(isolate_,
NativeContextMoveEvent(source.address(), target.address()));
}
-
- if (FLAG_verify_predictable) {
- ++allocations_count_;
- // Advance synthetic time by making a time request.
- MonotonicallyIncreasingTimeInMs();
-
- UpdateAllocationsHash(source);
- UpdateAllocationsHash(target);
- UpdateAllocationsHash(size_in_bytes);
-
- if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
- PrintAllocationsHash();
- }
- } else if (FLAG_fuzzer_gc_analysis) {
- ++allocations_count_;
- }
}
FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
@@ -3551,7 +3625,8 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
}
void Heap::MakeHeapIterable() {
- mark_compact_collector()->EnsureSweepingCompleted();
+ mark_compact_collector()->EnsureSweepingCompleted(
+ MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable();
@@ -3590,7 +3665,7 @@ void Heap::FreeSharedLinearAllocationAreas() {
void Heap::FreeMainThreadSharedLinearAllocationAreas() {
if (!isolate()->shared_isolate()) return;
shared_old_allocator_->FreeLinearAllocationArea();
- shared_map_allocator_->FreeLinearAllocationArea();
+ if (shared_map_allocator_) shared_map_allocator_->FreeLinearAllocationArea();
main_thread_local_heap()->FreeSharedLinearAllocationArea();
}
@@ -3805,7 +3880,9 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
NestedTimedHistogramScope incremental_marking_scope(
isolate()->counters()->gc_incremental_marking_finalize());
- TRACE_EVENT1("v8", "V8.GCIncrementalMarkingFinalize", "epoch", epoch_full());
+ TRACE_EVENT1(
+ "v8", "V8.GCIncrementalMarkingFinalize", "epoch",
+ tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL_FINALIZE));
TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE,
ThreadKind::kMain);
@@ -3887,6 +3964,9 @@ class SlotCollectingVisitor final : public ObjectVisitor {
};
void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
+ // Object layout changes are currently not supported on background threads.
+ DCHECK_NULL(LocalHeap::Current());
+
if (!FLAG_verify_heap) return;
PtrComprCageBase cage_base(isolate());
@@ -3896,54 +3976,60 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
- if (object.IsJSObject(cage_base)) {
- // Without double unboxing all in-object fields of a JSObject are tagged.
- return;
- }
- if (object.IsString(cage_base) &&
- (new_map == ReadOnlyRoots(this).thin_string_map() ||
- new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
- // When transitioning a string to ThinString,
- // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
- // tagged fields are introduced.
- return;
- }
- if (FLAG_shared_string_table && object.IsString(cage_base) &&
- InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
- // In-place internalization does not change a string's fields.
- //
- // When sharing the string table, the setting and re-setting of maps below
- // can race when there are parallel internalization operations, causing
- // DCHECKs to fail.
- return;
- }
- // Check that the set of slots before and after the transition match.
- SlotCollectingVisitor old_visitor;
- object.IterateFast(cage_base, &old_visitor);
- MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
- // Temporarily set the new map to iterate new slots.
- object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
- SlotCollectingVisitor new_visitor;
- object.IterateFast(cage_base, &new_visitor);
- // Restore the old map.
- object.set_map_word(old_map_word, kRelaxedStore);
- DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
- for (int i = 0; i < new_visitor.number_of_slots(); i++) {
- DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
- }
-#ifdef V8_EXTERNAL_CODE_SPACE
- DCHECK_EQ(new_visitor.number_of_code_slots(),
- old_visitor.number_of_code_slots());
- for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
- DCHECK_EQ(new_visitor.code_slot(i), old_visitor.code_slot(i));
- }
-#endif // V8_EXTERNAL_CODE_SPACE
+ VerifySafeMapTransition(object, new_map);
} else {
DCHECK_EQ(pending_layout_change_object_, object);
pending_layout_change_object_ = HeapObject();
}
}
-#endif
+
+void Heap::VerifySafeMapTransition(HeapObject object, Map new_map) {
+ PtrComprCageBase cage_base(isolate());
+
+ if (object.IsJSObject(cage_base)) {
+ // Without double unboxing all in-object fields of a JSObject are tagged.
+ return;
+ }
+ if (object.IsString(cage_base) &&
+ (new_map == ReadOnlyRoots(this).thin_string_map() ||
+ new_map == ReadOnlyRoots(this).thin_one_byte_string_map())) {
+ // When transitioning a string to ThinString,
+ // Heap::NotifyObjectLayoutChange doesn't need to be invoked because only
+ // tagged fields are introduced.
+ return;
+ }
+ if (FLAG_shared_string_table && object.IsString(cage_base) &&
+ InstanceTypeChecker::IsInternalizedString(new_map.instance_type())) {
+ // In-place internalization does not change a string's fields.
+ //
+ // When sharing the string table, the setting and re-setting of maps below
+ // can race when there are parallel internalization operations, causing
+ // DCHECKs to fail.
+ return;
+ }
+ // Check that the set of slots before and after the transition match.
+ SlotCollectingVisitor old_visitor;
+ object.IterateFast(cage_base, &old_visitor);
+ MapWord old_map_word = object.map_word(cage_base, kRelaxedLoad);
+ // Temporarily set the new map to iterate new slots.
+ object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
+ SlotCollectingVisitor new_visitor;
+ object.IterateFast(cage_base, &new_visitor);
+ // Restore the old map.
+ object.set_map_word(old_map_word, kRelaxedStore);
+ DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
+ for (int i = 0; i < new_visitor.number_of_slots(); i++) {
+ DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
+ }
+#ifdef V8_EXTERNAL_CODE_SPACE
+ DCHECK_EQ(new_visitor.number_of_code_slots(),
+ old_visitor.number_of_code_slots());
+ for (int i = 0; i < new_visitor.number_of_code_slots(); i++) {
+ DCHECK_EQ(new_visitor.code_slot(i), old_visitor.code_slot(i));
+ }
+#endif // V8_EXTERNAL_CODE_SPACE
+}
+#endif // VERIFY_HEAP
GCIdleTimeHeapState Heap::ComputeHeapState() {
GCIdleTimeHeapState heap_state;
@@ -4338,8 +4424,8 @@ bool Heap::Contains(HeapObject value) const {
return HasBeenSetUp() &&
((new_space_ && new_space_->ToSpaceContains(value)) ||
old_space_->Contains(value) || code_space_->Contains(value) ||
- map_space_->Contains(value) || lo_space_->Contains(value) ||
- code_lo_space_->Contains(value) ||
+ (map_space_ && map_space_->Contains(value)) ||
+ lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
(new_lo_space_ && new_lo_space_->Contains(value)));
}
@@ -4358,7 +4444,7 @@ bool Heap::ContainsCode(HeapObject value) const {
bool Heap::SharedHeapContains(HeapObject value) const {
if (shared_old_space_)
return shared_old_space_->Contains(value) ||
- shared_map_space_->Contains(value);
+ (shared_map_space_ && shared_map_space_->Contains(value));
return false;
}
@@ -4366,6 +4452,7 @@ bool Heap::ShouldBeInSharedOldSpace(HeapObject value) {
if (isolate()->OwnsStringTable()) return false;
if (ReadOnlyHeap::Contains(value)) return false;
if (Heap::InYoungGeneration(value)) return false;
+ if (value.IsExternalString()) return false;
if (value.IsString()) {
return value.IsInternalizedString() ||
String::IsInPlaceInternalizable(String::cast(value));
@@ -4389,6 +4476,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
case CODE_SPACE:
return code_space_->Contains(value);
case MAP_SPACE:
+ DCHECK(map_space_);
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
@@ -4418,6 +4506,7 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
case CODE_SPACE:
return code_space_->ContainsSlow(addr);
case MAP_SPACE:
+ DCHECK(map_space_);
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
@@ -4479,7 +4568,9 @@ void Heap::Verify() {
if (new_space_) new_space_->Verify(isolate());
old_space_->Verify(isolate(), &visitor);
- map_space_->Verify(isolate(), &visitor);
+ if (map_space_) {
+ map_space_->Verify(isolate(), &visitor);
+ }
VerifyPointersVisitor no_dirty_regions_visitor(this);
code_space_->Verify(isolate(), &no_dirty_regions_visitor);
@@ -4488,6 +4579,10 @@ void Heap::Verify() {
code_lo_space_->Verify(isolate());
if (new_lo_space_) new_lo_space_->Verify(isolate());
isolate()->string_table()->VerifyIfOwnedBy(isolate());
+
+#if DEBUG
+ VerifyCommittedPhysicalMemory();
+#endif // DEBUG
}
void Heap::VerifyReadOnlyHeap() {
@@ -4534,25 +4629,25 @@ class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Object target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
- CHECK(
- InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
+ CHECK(InTypedSet(SlotType::kCodeEntry, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(SlotType::kConstPoolCodeEntry,
+ rinfo->constant_pool_entry_address())));
}
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object target = rinfo->target_object(cage_base());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
- CHECK(
- InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- InTypedSet(DATA_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(COMPRESSED_OBJECT_SLOT,
- rinfo->constant_pool_entry_address())) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
+ CHECK(InTypedSet(SlotType::kEmbeddedObjectFull, rinfo->pc()) ||
+ InTypedSet(SlotType::kEmbeddedObjectCompressed, rinfo->pc()) ||
+ InTypedSet(SlotType::kEmbeddedObjectData, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(SlotType::kConstPoolEmbeddedObjectCompressed,
+ rinfo->constant_pool_entry_address())) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(SlotType::kConstPoolEmbeddedObjectFull,
+ rinfo->constant_pool_entry_address())));
}
}
@@ -4587,9 +4682,7 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot target) override {
VisitPointer(host, target);
-#ifdef ENABLE_MINOR_MC
- if (FLAG_minor_mc) return VisitPointer(host, target);
-#endif
+ if (FLAG_minor_mc) return;
// Keys are handled separately and should never appear in this set.
CHECK(!InUntypedSet(key));
Object k = *key;
@@ -4681,7 +4774,15 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
space->VerifyCountersBeforeConcurrentSweeping();
}
}
-#endif
+
+void Heap::VerifyCommittedPhysicalMemory() {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ space->VerifyCommittedPhysicalMemory();
+ }
+}
+#endif // DEBUG
void Heap::ZapFromSpace() {
if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
@@ -4979,10 +5080,6 @@ void Heap::IterateBuiltins(RootVisitor* v) {
++builtin) {
const char* name = Builtins::name(builtin);
v->VisitRootPointer(Root::kBuiltins, name, builtins->builtin_slot(builtin));
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- v->VisitRootPointer(Root::kBuiltins, name,
- builtins->builtin_code_data_container_slot(builtin));
- }
}
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLastTier0;
@@ -5207,8 +5304,8 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->old_space_capacity = old_space_->Capacity();
*stats->code_space_size = code_space_->SizeOfObjects();
*stats->code_space_capacity = code_space_->Capacity();
- *stats->map_space_size = map_space_->SizeOfObjects();
- *stats->map_space_capacity = map_space_->Capacity();
+ *stats->map_space_size = map_space_ ? map_space_->SizeOfObjects() : 0;
+ *stats->map_space_capacity = map_space_ ? map_space_->Capacity() : 0;
*stats->lo_space_size = lo_space_->Size();
*stats->code_lo_space_size = code_lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
@@ -5434,8 +5531,10 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
max_marking_limit_reached_ =
std::max<double>(max_marking_limit_reached_, current_percent);
}
- } else if (current_percent >= stress_marking_percentage_) {
- stress_marking_percentage_ = NextStressMarkingLimit();
+ } else if (current_percent >=
+ stress_marking_percentage_.load(std::memory_order_relaxed)) {
+ stress_marking_percentage_.store(NextStressMarkingLimit(),
+ std::memory_order_relaxed);
return IncrementalMarkingLimit::kHardLimit;
}
}
@@ -5494,95 +5593,32 @@ bool Heap::ShouldStressCompaction() const {
}
void Heap::EnableInlineAllocation() {
- if (!inline_allocation_disabled_) return;
- inline_allocation_disabled_ = false;
-
// Update inline allocation limit for new space.
if (new_space()) {
- new_space()->AdvanceAllocationObservers();
- new_space()->UpdateInlineAllocationLimit(0);
+ new_space()->EnableInlineAllocation();
+ }
+ // Update inline allocation limit for old spaces.
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ base::MutexGuard guard(space->mutex());
+ space->EnableInlineAllocation();
}
}
void Heap::DisableInlineAllocation() {
- if (inline_allocation_disabled_) return;
- inline_allocation_disabled_ = true;
-
// Update inline allocation limit for new space.
if (new_space()) {
- new_space()->UpdateInlineAllocationLimit(0);
+ new_space()->DisableInlineAllocation();
}
-
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
CodePageCollectionMemoryModificationScope modification_scope(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
base::MutexGuard guard(space->mutex());
- space->FreeLinearAllocationArea();
- }
-}
-
-HeapObject Heap::AllocateRawWithLightRetrySlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment) {
- HeapObject result;
- AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
- if (alloc.To(&result)) {
- // DCHECK that the successful allocation is not "exception". The one
- // exception to this is when allocating the "exception" object itself, in
- // which case this must be an ROSpace allocation and the exception object
- // in the roots has to be unset.
- DCHECK((CanAllocateInReadOnlySpace() &&
- allocation == AllocationType::kReadOnly &&
- ReadOnlyRoots(this).unchecked_exception() == Smi::zero()) ||
- result != ReadOnlyRoots(this).exception());
- return result;
- }
- // Two GCs before panicking. In newspace will almost always succeed.
- for (int i = 0; i < 2; i++) {
- if (IsSharedAllocationType(allocation)) {
- CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
- } else {
- CollectGarbage(alloc.RetrySpace(),
- GarbageCollectionReason::kAllocationFailure);
- }
- alloc = AllocateRaw(size, allocation, origin, alignment);
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
- }
- }
- return HeapObject();
-}
-
-HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment) {
- AllocationResult alloc;
- HeapObject result =
- AllocateRawWithLightRetrySlowPath(size, allocation, origin, alignment);
- if (!result.is_null()) return result;
-
- isolate()->counters()->gc_last_resort_from_handles()->Increment();
- if (IsSharedAllocationType(allocation)) {
- CollectSharedGarbage(GarbageCollectionReason::kLastResort);
-
- AlwaysAllocateScope scope(isolate()->shared_isolate()->heap());
- alloc = AllocateRaw(size, allocation, origin, alignment);
- } else {
- CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
-
- AlwaysAllocateScope scope(this);
- alloc = AllocateRaw(size, allocation, origin, alignment);
- }
-
- if (alloc.To(&result)) {
- DCHECK(result != ReadOnlyRoots(this).exception());
- return result;
+ space->DisableInlineAllocation();
}
- // TODO(1181417): Fix this.
- FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
}
void Heap::SetUp(LocalHeap* main_thread_local_heap) {
@@ -5590,8 +5626,8 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
main_thread_local_heap_ = main_thread_local_heap;
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- allocation_timeout_ = NextAllocationTimeout();
-#endif
+ heap_allocator_.UpdateAllocationTimeout();
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
tp_heap_ = third_party_heap::Heap::New(isolate());
@@ -5650,6 +5686,7 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_.reset(new ScavengerCollector(this));
+ minor_mark_compact_collector_.reset(new MinorMarkCompactCollector(this));
incremental_marking_.reset(
new IncrementalMarking(this, mark_compact_collector_->weak_objects()));
@@ -5688,6 +5725,7 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
read_only_space_ == ro_heap->read_only_space());
space_[RO_SPACE] = nullptr;
read_only_space_ = ro_heap->read_only_space();
+ heap_allocator_.SetReadOnlySpace(read_only_space_);
}
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
@@ -5698,6 +5736,7 @@ void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
}
read_only_space_ = space;
+ heap_allocator_.SetReadOnlySpace(read_only_space_);
}
class StressConcurrentAllocationObserver : public AllocationObserver {
@@ -5729,15 +5768,15 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
space_[NEW_SPACE] = new_space_ = new NewSpace(
this, memory_allocator_->data_page_allocator(), initial_semispace_size_,
max_semi_space_size_, new_allocation_info);
+ space_[NEW_LO_SPACE] = new_lo_space_ =
+ new NewLargeObjectSpace(this, NewSpaceCapacity());
}
space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
- space_[MAP_SPACE] = map_space_ = new MapSpace(this);
- space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
- if (has_young_gen) {
- space_[NEW_LO_SPACE] = new_lo_space_ =
- new NewLargeObjectSpace(this, NewSpaceCapacity());
+ if (FLAG_use_map_space) {
+ space_[MAP_SPACE] = map_space_ = new MapSpace(this);
}
+ space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
@@ -5746,11 +5785,6 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
}
tracer_.reset(new GCTracer(this));
-#ifdef ENABLE_MINOR_MC
- minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
-#else
- minor_mark_compact_collector_ = nullptr;
-#endif // ENABLE_MINOR_MC
array_buffer_sweeper_.reset(new ArrayBufferSweeper(this));
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
memory_measurement_.reset(new MemoryMeasurement(isolate()));
@@ -5762,16 +5796,18 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
embedder_roots_handler_ =
&local_embedder_heap_tracer()->default_embedder_roots_handler();
+ if (Heap::AllocationTrackerForDebugging::IsNeeded()) {
+ allocation_tracker_for_debugging_ =
+ std::make_unique<Heap::AllocationTrackerForDebugging>(this);
+ }
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
mark_compact_collector()->SetUp();
-#ifdef ENABLE_MINOR_MC
- if (minor_mark_compact_collector() != nullptr) {
- minor_mark_compact_collector()->SetUp();
+ if (minor_mark_compact_collector_) {
+ minor_mark_compact_collector_->SetUp();
}
-#endif // ENABLE_MINOR_MC
if (new_space()) {
scavenge_job_.reset(new ScavengeJob());
@@ -5789,7 +5825,7 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
- if (FLAG_stress_scavenge > 0 && new_space()) {
+ if (IsStressingScavenge()) {
stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
@@ -5797,16 +5833,21 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
write_protect_code_memory_ = FLAG_write_protect_code_memory;
if (isolate()->shared_isolate()) {
- shared_old_space_ = isolate()->shared_isolate()->heap()->old_space();
+ Heap* shared_heap = isolate()->shared_isolate()->heap();
+
+ shared_old_space_ = shared_heap->old_space();
shared_old_allocator_.reset(
new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
- shared_map_space_ = isolate()->shared_isolate()->heap()->map_space();
- shared_map_allocator_.reset(
- new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
+ if (shared_heap->map_space()) {
+ shared_map_space_ = shared_heap->map_space();
+ shared_map_allocator_.reset(
+ new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
+ }
}
main_thread_local_heap()->SetUpMainThread();
+ heap_allocator_.Setup();
}
void Heap::InitializeHashSeed() {
@@ -5822,22 +5863,9 @@ void Heap::InitializeHashSeed() {
0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
-int Heap::NextAllocationTimeout(int current_timeout) {
- if (FLAG_random_gc_interval > 0) {
- // If current timeout hasn't reached 0 the GC was caused by something
- // different than --stress-atomic-gc flag and we don't update the timeout.
- if (current_timeout <= 0) {
- return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
- } else {
- return current_timeout;
- }
- }
- return FLAG_gc_interval;
-}
-
-void Heap::PrintAllocationsHash() {
- uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
- PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
+// static
+void Heap::InitializeOncePerProcess() {
+ MemoryAllocator::InitializeOncePerProcess();
}
void Heap::PrintMaxMarkingLimitReached() {
@@ -6015,15 +6043,11 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
- if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
- PrintAllocationsHash();
- }
-
if (FLAG_fuzzer_gc_analysis) {
if (FLAG_stress_marking > 0) {
PrintMaxMarkingLimitReached();
}
- if (FLAG_stress_scavenge > 0) {
+ if (IsStressingScavenge()) {
PrintMaxNewSpaceSizeReached();
}
}
@@ -6048,7 +6072,7 @@ void Heap::TearDown() {
delete stress_marking_observer_;
stress_marking_observer_ = nullptr;
}
- if (FLAG_stress_scavenge > 0 && new_space()) {
+ if (IsStressingScavenge()) {
new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
delete stress_scavenge_observer_;
stress_scavenge_observer_ = nullptr;
@@ -6059,13 +6083,10 @@ void Heap::TearDown() {
mark_compact_collector_.reset();
}
-#ifdef ENABLE_MINOR_MC
- if (minor_mark_compact_collector_ != nullptr) {
+ if (minor_mark_compact_collector_) {
minor_mark_compact_collector_->TearDown();
- delete minor_mark_compact_collector_;
- minor_mark_compact_collector_ = nullptr;
+ minor_mark_compact_collector_.reset();
}
-#endif // ENABLE_MINOR_MC
scavenger_collector_.reset();
array_buffer_sweeper_.reset();
@@ -6073,8 +6094,8 @@ void Heap::TearDown() {
concurrent_marking_.reset();
gc_idle_time_handler_.reset();
-
memory_measurement_.reset();
+ allocation_tracker_for_debugging_.reset();
if (memory_reducer_ != nullptr) {
memory_reducer_->TearDown();
@@ -6235,7 +6256,7 @@ void Heap::CompactWeakArrayLists() {
}
void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
- if (map->is_in_retained_map_list()) {
+ if (map->is_in_retained_map_list() || map->InSharedWritableHeap()) {
return;
}
@@ -6386,20 +6407,9 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
}
PagedSpace* PagedSpaceIterator::Next() {
- int space = counter_++;
- switch (space) {
- case RO_SPACE:
- UNREACHABLE();
- case OLD_SPACE:
- return heap_->old_space();
- case CODE_SPACE:
- return heap_->code_space();
- case MAP_SPACE:
- return heap_->map_space();
- default:
- DCHECK_GT(space, LAST_GROWABLE_PAGED_SPACE);
- return nullptr;
- }
+ DCHECK_GE(counter_, FIRST_GROWABLE_PAGED_SPACE);
+ if (counter_ > LAST_GROWABLE_PAGED_SPACE) return nullptr;
+ return heap_->paged_space(counter_++);
}
SpaceIterator::SpaceIterator(Heap* heap)
@@ -6419,7 +6429,7 @@ bool SpaceIterator::HasNext() {
}
Space* SpaceIterator::Next() {
- DCHECK(HasNext());
+ DCHECK_LE(current_space_, LAST_MUTABLE_SPACE);
Space* space = heap_->space(current_space_++);
DCHECK_NOT_NULL(space);
return space;
@@ -6577,6 +6587,8 @@ HeapObjectIterator::HeapObjectIterator(
default:
break;
}
+ // By not calling |space_iterator_->HasNext()|, we assume that the old
+ // space is first returned and that it has been set up.
object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
}
@@ -7069,6 +7081,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
case CODE_SPACE:
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
+ return dst == MAP_SPACE && type == MAP_TYPE;
case LO_SPACE:
case CODE_LO_SPACE:
case NEW_LO_SPACE:
@@ -7133,7 +7146,7 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
Builtin maybe_builtin =
OffHeapInstructionStream::TryLookupCode(isolate(), inner_pointer);
if (Builtins::IsBuiltinId(maybe_builtin)) {
- return isolate()->builtins()->code(maybe_builtin);
+ return FromCodeT(isolate()->builtins()->code(maybe_builtin));
}
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
@@ -7178,11 +7191,20 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
}
}
// TODO(1241665): Remove once the issue is solved.
+ std::shared_ptr<CodeRange> code_range = CodeRange::GetProcessWideCodeRange();
+ void* code_range_embedded_blob_code_copy =
+ code_range ? code_range->embedded_blob_code_copy() : nullptr;
+ Address flags = (isolate()->is_short_builtin_calls_enabled() ? 1 : 0) |
+ (code_range ? 2 : 0) |
+ static_cast<Address>(max_old_generation_size());
+
isolate()->PushParamsAndDie(
reinterpret_cast<void*>(inner_pointer),
const_cast<uint8_t*>(isolate()->embedded_blob_code()),
const_cast<uint8_t*>(Isolate::CurrentEmbeddedBlobCode()),
- reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()));
+ code_range_embedded_blob_code_copy,
+ reinterpret_cast<void*>(Isolate::CurrentEmbeddedBlobCodeSize()),
+ reinterpret_cast<void*>(flags));
UNREACHABLE();
}
@@ -7334,25 +7356,11 @@ void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject object) {
DCHECK(InYoungGeneration(object));
- Page* source_page = Page::FromHeapObject(host);
- RelocInfo::Mode rmode = rinfo->rmode();
- Address addr = rinfo->pc();
- SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
- if (rinfo->IsInConstantPool()) {
- addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- slot_type = CODE_ENTRY_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- slot_type = COMPRESSED_OBJECT_SLOT;
- } else {
- DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
- slot_type = FULL_OBJECT_SLOT;
- }
- }
- uintptr_t offset = addr - source_page->address();
- DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
- RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, slot_type,
- static_cast<uint32_t>(offset));
+ const MarkCompactCollector::RecordRelocSlotInfo info =
+ MarkCompactCollector::ProcessRelocInfo(host, rinfo, object);
+
+ RememberedSet<OLD_TO_NEW>::InsertTyped(info.memory_chunk, info.slot_type,
+ info.offset);
}
bool Heap::PageFlagsAreConsistent(HeapObject object) {
@@ -7390,12 +7398,6 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
return true;
}
-void Heap::SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state) {
- local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
- stack_state);
-}
-
#ifdef DEBUG
void Heap::IncrementObjectCounters() {
isolate_->counters()->objs_since_last_full()->Increment();
@@ -7403,6 +7405,10 @@ void Heap::IncrementObjectCounters() {
}
#endif // DEBUG
+bool Heap::IsStressingScavenge() {
+ return FLAG_stress_scavenge > 0 && new_space();
+}
+
// StrongRootBlocks are allocated as a block of addresses, prefixed with a
// StrongRootsEntry pointer:
//
@@ -7439,5 +7445,46 @@ void StrongRootBlockAllocator::deallocate(Address* p, size_t n) noexcept {
base::Free(block);
}
+#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
+void Heap::set_allocation_timeout(int allocation_timeout) {
+ heap_allocator_.SetAllocationTimeout(allocation_timeout);
+}
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+
+EmbedderStackStateScope::EmbedderStackStateScope(
+ Heap* heap, Origin origin,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : local_tracer_(heap->local_embedder_heap_tracer()),
+ old_stack_state_(local_tracer_->embedder_stack_state_) {
+ if (origin == kImplicitThroughTask && heap->overriden_stack_state()) {
+ stack_state = *heap->overriden_stack_state();
+ }
+
+ local_tracer_->embedder_stack_state_ = stack_state;
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ local_tracer_->NotifyEmptyEmbedderStack();
+}
+
+// static
+EmbedderStackStateScope EmbedderStackStateScope::ExplicitScopeForTesting(
+ LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
+ return EmbedderStackStateScope(local_tracer, stack_state);
+}
+
+EmbedderStackStateScope::EmbedderStackStateScope(
+ LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : local_tracer_(local_tracer),
+ old_stack_state_(local_tracer_->embedder_stack_state_) {
+ local_tracer_->embedder_stack_state_ = stack_state;
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ local_tracer_->NotifyEmptyEmbedderStack();
+}
+
+EmbedderStackStateScope::~EmbedderStackStateScope() {
+ local_tracer_->embedder_stack_state_ = old_stack_state_;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index ef8d912bfb..29aa5aad76 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -7,7 +7,6 @@
#include <atomic>
#include <cmath>
-#include <map>
#include <memory>
#include <unordered_map>
#include <unordered_set>
@@ -27,6 +26,8 @@
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
+#include "src/heap/allocation-result.h"
+#include "src/heap/heap-allocator.h"
#include "src/init/heap-symbols.h"
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
@@ -126,16 +127,10 @@ enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
-enum class AllocationOrigin {
- kGeneratedCode = 0,
- kRuntime = 1,
- kGC = 2,
- kFirstAllocationOrigin = kGeneratedCode,
- kLastAllocationOrigin = kGC,
- kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
-};
-
-enum class GarbageCollectionReason {
+// These values are persisted to logs. Entries should not be renumbered and
+// numeric values should never be reused. If you add new items here, update
+// src/tools/metrics/histograms/enums.xml in chromium.
+enum class GarbageCollectionReason : int {
kUnknown = 0,
kAllocationFailure = 1,
kAllocationLimit = 2,
@@ -162,11 +157,14 @@ enum class GarbageCollectionReason {
kGlobalAllocationLimit = 23,
kMeasureMemory = 24,
kBackgroundAllocationFailure = 25,
- // If you add new items here, then update the incremental_marking_reason,
- // mark_compact_reason, and scavenge_reason counters in counters.h.
- // Also update src/tools/metrics/histograms/enums.xml in chromium.
+
+ kLastReason = kBackgroundAllocationFailure,
};
+static_assert(kGarbageCollectionReasonMaxValue ==
+ static_cast<int>(GarbageCollectionReason::kLastReason),
+ "The value of kGarbageCollectionReasonMaxValue is inconsistent.");
+
enum class YoungGenerationHandling {
kRegularScavenge = 0,
kFastPromotionDuringScavenge = 1,
@@ -208,44 +206,6 @@ class StrongRootsEntry final {
friend class Heap;
};
-class AllocationResult {
- public:
- static inline AllocationResult Retry(AllocationSpace space) {
- return AllocationResult(space);
- }
-
- // Implicit constructor from Object.
- AllocationResult(Object object) // NOLINT
- : object_(object) {
- // AllocationResults can't return Smis, which are used to represent
- // failure and the space to retry in.
- CHECK(!object.IsSmi());
- }
-
- AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
-
- inline bool IsRetry() { return object_.IsSmi(); }
- inline HeapObject ToObjectChecked();
- inline HeapObject ToObject();
- inline Address ToAddress();
- inline AllocationSpace RetrySpace();
-
- template <typename T>
- bool To(T* obj) {
- if (IsRetry()) return false;
- *obj = T::cast(object_);
- return true;
- }
-
- private:
- explicit AllocationResult(AllocationSpace space)
- : object_(Smi::FromInt(static_cast<int>(space))) {}
-
- Object object_;
-};
-
-STATIC_ASSERT(sizeof(AllocationResult) == kSystemPointerSize);
-
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
@@ -265,8 +225,6 @@ using EphemeronRememberedSet =
std::unordered_map<EphemeronHashTable, std::unordered_set<int>,
Object::Hasher>;
-using CollectionEpoch = uint32_t;
-
class Heap {
public:
// Stores ephemeron entries where the EphemeronHashTable is in old-space,
@@ -480,12 +438,8 @@ class Heap {
}
static inline GarbageCollector YoungGenerationCollector() {
-#if ENABLE_MINOR_MC
return (FLAG_minor_mc) ? GarbageCollector::MINOR_MARK_COMPACTOR
: GarbageCollector::SCAVENGER;
-#else
- return GarbageCollector::SCAVENGER;
-#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
@@ -545,8 +499,6 @@ class Heap {
void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
- void UpdateCurrentEpoch(GarbageCollector collector);
-
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
inline Address* OldSpaceAllocationTopAddress();
@@ -647,9 +599,6 @@ class Heap {
void CheckHandleCount();
- // Number of "runtime allocations" done so far.
- uint32_t allocations_count() { return allocations_count_; }
-
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -800,16 +749,9 @@ class Heap {
V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
Handle<Map> map);
- // This event is triggered after successful allocation of a new object made
- // by runtime. Allocations of target space for object evacuation do not
- // trigger the event. In order to track ALL allocations one must turn off
- // FLAG_inline_new.
- inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
-
// This event is triggered after object is moved to a new place.
void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
- inline bool CanAllocateInReadOnlySpace();
bool deserialization_complete() const { return deserialization_complete_; }
// We can only invoke Safepoint() on the main thread local heap after
@@ -866,6 +808,9 @@ class Heap {
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
+ // Invoked once for the process from V8::Initialize.
+ static void InitializeOncePerProcess();
+
// Bootstraps the object heap with the core set of objects required to run.
// Returns whether it succeeded.
bool CreateHeapObjects();
@@ -893,6 +838,7 @@ class Heap {
OldSpace* shared_old_space() { return shared_old_space_; }
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
+ inline PagedSpace* space_for_maps();
OldLargeObjectSpace* lo_space() { return lo_space_; }
CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
@@ -912,6 +858,8 @@ class Heap {
return memory_allocator_.get();
}
+ inline ConcurrentAllocator* concurrent_allocator_for_maps();
+
inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() {
@@ -919,17 +867,22 @@ class Heap {
}
MinorMarkCompactCollector* minor_mark_compact_collector() {
- return minor_mark_compact_collector_;
+ return minor_mark_compact_collector_.get();
}
ArrayBufferSweeper* array_buffer_sweeper() {
return array_buffer_sweeper_.get();
}
+ // The potentially overreserved address space region reserved by the code
+ // range if it exists or empty region otherwise.
const base::AddressRegion& code_region();
CodeRange* code_range() { return code_range_.get(); }
+ // The base of the code range if it exists or null address.
+ inline Address code_range_base();
+
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
Heap* AsHeap() { return this; }
@@ -1001,9 +954,6 @@ class Heap {
// Inline allocation. ========================================================
// ===========================================================================
- // Indicates whether inline bump-pointer allocation has been disabled.
- bool inline_allocation_disabled() { return inline_allocation_disabled_; }
-
// Switch whether inline bump-pointer allocation should be used.
V8_EXPORT_PRIVATE void EnableInlineAllocation();
V8_EXPORT_PRIVATE void DisableInlineAllocation();
@@ -1165,6 +1115,9 @@ class Heap {
// - or it was communicated to GC using NotifyObjectLayoutChange.
V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object,
Map new_map);
+ // Checks that this is a safe map transition.
+ V8_EXPORT_PRIVATE void VerifySafeMapTransition(HeapObject object,
+ Map new_map);
#endif
// ===========================================================================
@@ -1196,8 +1149,6 @@ class Heap {
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
void RegisterExternallyReferencedObject(Address* location);
- V8_EXPORT_PRIVATE void SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state);
EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const;
@@ -1626,23 +1577,24 @@ class Heap {
#endif
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
-#endif
+ void V8_EXPORT_PRIVATE set_allocation_timeout(int allocation_timeout);
+#endif // V8_ENABLE_ALLOCATION_TIMEOUT
#ifdef DEBUG
void VerifyCountersAfterSweeping();
void VerifyCountersBeforeConcurrentSweeping();
+ void VerifyCommittedPhysicalMemory();
void Print();
void PrintHandles();
// Report code statistics.
void ReportCodeStatistics(const char* title);
-#endif
+#endif // DEBUG
void* GetRandomMmapAddr() {
void* result = v8::internal::GetRandomMmapAddr();
#if V8_TARGET_ARCH_X64
-#if V8_OS_MACOSX
+#if V8_OS_DARWIN
// The Darwin kernel [as of macOS 10.12.5] does not clean up page
// directory entries [PDE] created from mmap or mach_vm_allocate, even
// after the region is destroyed. Using a virtual address space that is
@@ -1652,7 +1604,7 @@ class Heap {
// space. See crbug.com/700928.
uintptr_t offset = reinterpret_cast<uintptr_t>(result) & kMmapRegionMask;
result = reinterpret_cast<void*>(mmap_region_base_ + offset);
-#endif // V8_OS_MACOSX
+#endif // V8_OS_DARWIN
#endif // V8_TARGET_ARCH_X64
return result;
}
@@ -1667,16 +1619,13 @@ class Heap {
static Isolate* GetIsolateFromWritableObject(HeapObject object);
- CollectionEpoch epoch_young() { return epoch_young_; }
- CollectionEpoch epoch_full() { return epoch_full_; }
-
- void UpdateEpochFull();
-
// Ensure that we have swept all spaces in such a way that we can iterate
// over all objects.
void MakeHeapIterable();
private:
+ class AllocationTrackerForDebugging;
+
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
@@ -1811,7 +1760,8 @@ class Heap {
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
size_t PerformGarbageCollection(
- GarbageCollector collector,
+ GarbageCollector collector, GarbageCollectionReason gc_reason,
+ const char* collector_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs garbage collection in the shared heap.
@@ -1887,11 +1837,6 @@ class Heap {
GCIdleTimeHeapState heap_state, double start_ms,
double deadline_in_ms);
- int NextAllocationTimeout(int current_timeout = 0);
- inline void UpdateAllocationsHash(HeapObject object);
- inline void UpdateAllocationsHash(uint32_t value);
- void PrintAllocationsHash();
-
void PrintMaxMarkingLimitReached();
void PrintMaxNewSpaceSizeReached();
@@ -1920,15 +1865,6 @@ class Heap {
void InvokeIncrementalMarkingPrologueCallbacks();
void InvokeIncrementalMarkingEpilogueCallbacks();
- // Returns the timer used for a given GC type.
- // - GCScavenger: young generation GC
- // - GCCompactor: full GC
- // - GCFinalzeMC: finalization of incremental full GC
- // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
- // memory reduction
- TimedHistogram* GCTypeTimer(GarbageCollector collector);
- TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
-
// ===========================================================================
// Pretenuring. ==============================================================
// ===========================================================================
@@ -1947,7 +1883,8 @@ class Heap {
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
- void GarbageCollectionPrologue();
+ void GarbageCollectionPrologue(GarbageCollectionReason gc_reason,
+ const v8::GCCallbackFlags gc_callback_flags);
void GarbageCollectionPrologueInSafepoint();
void GarbageCollectionEpilogue(GarbageCollector collector);
void GarbageCollectionEpilogueInSafepoint(GarbageCollector collector);
@@ -2087,6 +2024,8 @@ class Heap {
// Allocation methods. =======================================================
// ===========================================================================
+ HeapAllocator* allocator() { return &heap_allocator_; }
+
// Allocates a JS Map in the heap.
V8_WARN_UNUSED_RESULT AllocationResult
AllocateMap(InstanceType instance_type, int instance_size,
@@ -2097,19 +2036,19 @@ class Heap {
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationType allocation,
- AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kTaggedAligned);
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
// This method will try to allocate objects quickly (AllocationType::kYoung)
// otherwise it falls back to a slower path indicated by the mode.
enum AllocationRetryMode { kLightRetry, kRetryOrFail };
template <AllocationRetryMode mode>
- V8_WARN_UNUSED_RESULT inline HeapObject AllocateRawWith(
- int size, AllocationType allocation,
- AllocationOrigin origin = AllocationOrigin::kRuntime,
- AllocationAlignment alignment = kTaggedAligned);
+ V8_WARN_UNUSED_RESULT V8_INLINE HeapObject
+ AllocateRawWith(int size, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kTaggedAligned);
// Call AllocateRawWith with kRetryOrFail. Matches the method in LocalHeap.
V8_WARN_UNUSED_RESULT inline Address AllocateRawOrFail(
@@ -2117,25 +2056,6 @@ class Heap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kTaggedAligned);
- // This method will try to perform an allocation of a given size of a given
- // AllocationType. If the allocation fails, a regular full garbage collection
- // is triggered and the allocation is retried. This is performed multiple
- // times. If after that retry procedure the allocation still fails nullptr is
- // returned.
- V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithLightRetrySlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kTaggedAligned);
-
- // This method will try to perform an allocation of a given size of a given
- // AllocationType. If the allocation fails, a regular full garbage collection
- // is triggered and the allocation is retried. This is performed multiple
- // times. If after that retry procedure the allocation still fails a "hammer"
- // garbage collection is triggered which tries to significantly reduce memory.
- // If the allocation still fails after that a fatal error is thrown.
- V8_WARN_UNUSED_RESULT HeapObject AllocateRawWithRetryOrFailSlowPath(
- int size, AllocationType allocation, AllocationOrigin origin,
- AllocationAlignment alignment = kTaggedAligned);
-
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
AllocationType allocation);
@@ -2179,12 +2099,16 @@ class Heap {
return allocation_type_for_in_place_internalizable_strings_;
}
+ bool IsStressingScavenge();
+
ExternalMemoryAccounting external_memory_;
// This can be calculated directly from a pointer to the heap; however, it is
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
+ HeapAllocator heap_allocator_;
+
// These limits are initialized in Heap::ConfigureHeap based on the resource
// constraints and flags.
size_t code_range_size_ = 0;
@@ -2276,18 +2200,12 @@ class Heap {
// Returns the amount of external memory registered since last global gc.
V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact();
- // How many "runtime allocations" happened.
- uint32_t allocations_count_ = 0;
-
- // Running hash over allocations performed.
- uint32_t raw_allocations_hash_ = 0;
-
// Starts marking when stress_marking_percentage_% of the marking start limit
// is reached.
- int stress_marking_percentage_ = 0;
+ std::atomic<int> stress_marking_percentage_{0};
- // Observer that causes more frequent checks for reached incremental marking
- // limit.
+ // Observer that causes more frequent checks for reached incremental
+ // marking limit.
AllocationObserver* stress_marking_observer_ = nullptr;
// Observer that can cause early scavenge start.
@@ -2321,10 +2239,6 @@ class Heap {
std::atomic<size_t> old_generation_allocation_limit_{0};
size_t global_allocation_limit_ = 0;
- // Indicates that inline bump-pointer allocation has been globally disabled
- // for all spaces. This is used to disable allocations in generated code.
- bool inline_allocation_disabled_ = false;
-
// Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start.
// {native_contexts_list_} is an Address instead of an Object to allow the use
@@ -2369,7 +2283,7 @@ class Heap {
std::unique_ptr<GCTracer> tracer_;
std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
- MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
+ std::unique_ptr<MinorMarkCompactCollector> minor_mark_compact_collector_;
std::unique_ptr<ScavengerCollector> scavenger_collector_;
std::unique_ptr<ArrayBufferSweeper> array_buffer_sweeper_;
@@ -2386,6 +2300,8 @@ class Heap {
std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
+ std::unique_ptr<AllocationTrackerForDebugging>
+ allocation_tracker_for_debugging_;
// This object controls virtual space reserved for code on the V8 heap. This
// is only valid for 64-bit architectures where kRequiresCodeRange.
@@ -2486,13 +2402,6 @@ class Heap {
base::Mutex unprotected_memory_chunks_mutex_;
std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
-#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_ = 0;
-#endif // V8_ENABLE_ALLOCATION_TIMEOUT
-
std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_;
std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
@@ -2509,11 +2418,6 @@ class Heap {
std::unique_ptr<third_party_heap::Heap> tp_heap_;
- // We need two epochs, since there can be scavenges during incremental
- // marking.
- CollectionEpoch epoch_young_ = 0;
- CollectionEpoch epoch_full_ = 0;
-
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class ArrayBufferCollector;
@@ -2522,6 +2426,7 @@ class Heap {
friend class EvacuateVisitorBase;
friend class GCCallbacksScope;
friend class GCTracer;
+ friend class HeapAllocator;
friend class HeapObjectIterator;
friend class ScavengeTaskObserver;
friend class IgnoreLocalGCRequests;
@@ -2611,6 +2516,7 @@ class V8_NODISCARD AlwaysAllocateScope {
friend class AlwaysAllocateScopeForTesting;
friend class Evacuator;
friend class Heap;
+ friend class HeapAllocator;
friend class Isolate;
explicit inline AlwaysAllocateScope(Heap* heap);
@@ -2653,9 +2559,8 @@ class V8_NODISCARD CodeSpaceMemoryModificationScope {
Heap* heap_;
};
-// The CodePageCollectionMemoryModificationScope can only be used by the main
-// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
-// already active.
+// The CodePageCollectionMemoryModificationScope can be used by any thread. It
+// will not be enabled if a CodeSpaceMemoryModificationScope is already active.
class V8_NODISCARD CodePageCollectionMemoryModificationScope {
public:
explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
@@ -2867,6 +2772,30 @@ struct StrongRootBlockAllocator::rebind {
};
};
+class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final {
+ public:
+ enum Origin {
+ kImplicitThroughTask,
+ kExplicitInvocation,
+ };
+
+ // Only used for testing where the Origin is always an explicit invocation.
+ static EmbedderStackStateScope ExplicitScopeForTesting(
+ LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
+ EmbedderStackStateScope(Heap* heap, Origin origin,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+ ~EmbedderStackStateScope();
+
+ private:
+ EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
+ LocalEmbedderHeapTracer* const local_tracer_;
+ const EmbedderHeapTracer::EmbedderStackState old_stack_state_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 08a34991db..aad12728d7 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -56,6 +56,7 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
+
const EmbedderHeapTracer::EmbedderStackState stack_state =
taskrunner->NonNestableTasksEnabled()
? EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers
@@ -97,8 +98,8 @@ void IncrementalMarkingJob::Task::RunInternal() {
TRACE_EVENT_CALL_STATS_SCOPED(isolate(), "v8", "V8.Task");
Heap* heap = isolate()->heap();
- EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
- stack_state_);
+ EmbedderStackStateScope scope(
+ heap, EmbedderStackStateScope::kImplicitThroughTask, stack_state_);
if (task_type_ == TaskType::kNormal) {
heap->tracer()->RecordTimeToIncrementalMarkingTask(
heap->MonotonicallyIncreasingTimeInMs() - job_->scheduled_time_);
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index a653877f40..5d7dd4a1dd 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -6,6 +6,7 @@
#include "src/codegen/compilation-cache.h"
#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
@@ -191,8 +192,9 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
static_cast<int>(gc_reason));
NestedTimedHistogramScope incremental_marking_scope(
counters->gc_incremental_marking_start());
- TRACE_EVENT1("v8", "V8.GCIncrementalMarkingStart", "epoch",
- heap_->epoch_full());
+ TRACE_EVENT1(
+ "v8", "V8.GCIncrementalMarkingStart", "epoch",
+ heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL_START));
TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START,
ThreadKind::kMain);
heap_->tracer()->NotifyIncrementalMarkingStart();
@@ -235,11 +237,22 @@ void IncrementalMarking::StartMarking() {
is_compacting_ = collector_->StartCompaction(
MarkCompactCollector::StartCompactionMode::kIncremental);
+
+ auto embedder_flags = heap_->flags_for_embedder_tracer();
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
+ // PrepareForTrace should be called before visitor initialization in
+ // StartMarking. It is only used with CppHeap.
+ heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
+ }
+
collector_->StartMarking();
SetState(MARKING);
MarkingBarrier::ActivateAll(heap(), is_compacting_);
+ GlobalHandles::EnableMarkingBarrier(heap()->isolate());
heap_->isolate()->compilation_cache()->MarkCompactPrologue();
@@ -261,8 +274,7 @@ void IncrementalMarking::StartMarking() {
// marking (including write barriers) is fully set up.
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue(
- heap_->flags_for_embedder_tracer());
+ heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
heap_->InvokeIncrementalMarkingEpilogueCallbacks();
@@ -273,7 +285,7 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(IsMarking());
black_allocation_ = true;
heap()->old_space()->MarkLinearAllocationAreaBlack();
- heap()->map_space()->MarkLinearAllocationAreaBlack();
+ if (heap()->map_space()) heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MarkLinearAllocationAreaBlack();
@@ -287,7 +299,7 @@ void IncrementalMarking::StartBlackAllocation() {
void IncrementalMarking::PauseBlackAllocation() {
DCHECK(IsMarking());
heap()->old_space()->UnmarkLinearAllocationArea();
- heap()->map_space()->UnmarkLinearAllocationArea();
+ if (heap()->map_space()) heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
heap()->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
@@ -410,7 +422,7 @@ void IncrementalMarking::FinalizeIncrementally() {
// 2) Age and retain maps embedded in optimized code.
MarkRoots();
- // Map retaining is needed for perfromance, not correctness,
+ // Map retaining is needed for performance, not correctness,
// so we can do it only once at the beginning of the finalization.
RetainMaps();
@@ -426,83 +438,83 @@ void IncrementalMarking::FinalizeIncrementally() {
}
}
-void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
+void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
if (!IsMarking()) return;
Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
-#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
heap()->minor_mark_compact_collector()->marking_state();
-#endif // ENABLE_MINOR_MC
collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
PtrComprCageBase cage_base(heap_->isolate());
- collector_->marking_worklists()->Update(
- [
+ collector_->marking_worklists()->Update([
#ifdef DEBUG
- // this is referred inside DCHECK.
- this,
-#endif
-#ifdef ENABLE_MINOR_MC
- minor_marking_state,
+ // this is referred inside DCHECK.
+ this,
#endif
- cage_base, filler_map](HeapObject obj, HeapObject* out) -> bool {
- DCHECK(obj.IsHeapObject());
- // Only pointers to from space have to be updated.
- if (Heap::InFromPage(obj)) {
- MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
- if (!map_word.IsForwardingAddress()) {
- // There may be objects on the marking deque that do not exist
- // anymore, e.g. left trimmed objects or objects from the root set
- // (frames). If these object are dead at scavenging time, their
- // marking deque entries will not point to forwarding addresses.
- // Hence, we can discard them.
- return false;
- }
- HeapObject dest = map_word.ToForwardingAddress();
- DCHECK_IMPLIES(marking_state()->IsWhite(obj),
- obj.IsFreeSpaceOrFiller());
- *out = dest;
- return true;
- } else if (Heap::InToPage(obj)) {
- // The object may be on a large page or on a page that was moved in
- // new space.
- DCHECK(Heap::IsLargeObject(obj) ||
- Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
-#ifdef ENABLE_MINOR_MC
- if (minor_marking_state->IsWhite(obj)) {
- return false;
- }
-#endif // ENABLE_MINOR_MC
- // Either a large object or an object marked by the minor
- // mark-compactor.
- *out = obj;
- return true;
- } else {
- // The object may be on a page that was moved from new to old space.
- // Only applicable during minor MC garbage collections.
- if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
-#ifdef ENABLE_MINOR_MC
- if (minor_marking_state->IsWhite(obj)) {
- return false;
- }
-#endif // ENABLE_MINOR_MC
- *out = obj;
- return true;
- }
- DCHECK_IMPLIES(marking_state()->IsWhite(obj),
- obj.IsFreeSpaceOrFiller(cage_base));
- // Skip one word filler objects that appear on the
- // stack when we perform in place array shift.
- if (obj.map(cage_base) != filler_map) {
- *out = obj;
- return true;
- }
+ minor_marking_state, cage_base,
+ filler_map](
+ HeapObject obj,
+ HeapObject* out) -> bool {
+ DCHECK(obj.IsHeapObject());
+ // Only pointers to from space have to be updated.
+ if (Heap::InFromPage(obj)) {
+ DCHECK_IMPLIES(FLAG_minor_mc_sweeping, minor_marking_state->IsWhite(obj));
+ MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
+ DCHECK_IMPLIES(FLAG_minor_mc_sweeping, !map_word.IsForwardingAddress());
+ if (!map_word.IsForwardingAddress()) {
+ // There may be objects on the marking deque that do not exist
+ // anymore, e.g. left trimmed objects or objects from the root set
+ // (frames). If these object are dead at scavenging time, their
+ // marking deque entries will not point to forwarding addresses.
+ // Hence, we can discard them.
+ return false;
+ }
+ HeapObject dest = map_word.ToForwardingAddress();
+ DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
+ if (dest.InSharedHeap()) {
+ // Object got promoted into the shared heap. Drop it from the client
+ // heap marking worklist.
+ return false;
+ }
+ *out = dest;
+ return true;
+ } else if (Heap::InToPage(obj)) {
+ // The object may be on a large page or on a page that was moved in
+ // new space.
+ DCHECK(Heap::IsLargeObject(obj) || Page::FromHeapObject(obj)->IsFlagSet(
+ Page::PAGE_NEW_NEW_PROMOTION));
+ if (minor_marking_state->IsWhite(obj)) {
+ return false;
+ }
+ // Either a large object or an object marked by the minor
+ // mark-compactor.
+ *out = obj;
+ return true;
+ } else {
+ // The object may be on a page that was moved from new to old space.
+ // Only applicable during minor MC garbage collections.
+ if (!Heap::IsLargeObject(obj) &&
+ Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ if (minor_marking_state->IsWhite(obj)) {
return false;
}
- });
+ *out = obj;
+ return true;
+ }
+ DCHECK_IMPLIES(marking_state()->IsWhite(obj),
+ obj.IsFreeSpaceOrFiller(cage_base));
+ // Skip one word filler objects that appear on the
+ // stack when we perform in place array shift.
+ if (obj.map(cage_base) != filler_map) {
+ *out = obj;
+ return true;
+ }
+ return false;
+ }
+ });
collector_->local_weak_objects()->Publish();
weak_objects_->UpdateAfterScavenge();
@@ -533,13 +545,15 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
const double start = heap_->MonotonicallyIncreasingTimeInMs();
const double deadline = start + expected_duration_ms;
- bool empty_worklist;
- {
+ bool empty_worklist = true;
+ if (local_marking_worklists()->PublishWrapper()) {
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
+ } else {
+ // Cannot directly publish wrapper objects.
LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
HeapObject object;
size_t cnt = 0;
- empty_worklist = true;
- while (local_marking_worklists()->PopEmbedder(&object)) {
+ while (local_marking_worklists()->PopWrapper(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
@@ -586,7 +600,6 @@ void IncrementalMarking::Hurry() {
}
}
-
void IncrementalMarking::Stop() {
if (IsStopped()) return;
if (FLAG_trace_incremental_marking) {
@@ -601,8 +614,7 @@ void IncrementalMarking::Stop() {
std::max(0, old_generation_size_mb - old_generation_limit_mb));
}
- SpaceIterator it(heap_);
- while (it.HasNext()) {
+ for (SpaceIterator it(heap_); it.HasNext();) {
Space* space = it.Next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
@@ -629,13 +641,11 @@ void IncrementalMarking::Stop() {
background_live_bytes_.clear();
}
-
void IncrementalMarking::Finalize() {
Hurry();
Stop();
}
-
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
DCHECK(!finalize_marking_completed_);
if (FLAG_trace_incremental_marking) {
@@ -784,7 +794,8 @@ StepResult IncrementalMarking::AdvanceWithDeadline(
StepOrigin step_origin) {
NestedTimedHistogramScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch", heap_->epoch_full());
+ TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch",
+ heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL));
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
ThreadKind::kMain);
DCHECK(!IsStopped());
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 5ea92e6bad..4a0c196358 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -114,8 +114,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
inline bool IsMarking() const { return state() >= MARKING; }
- inline bool IsMarkingIncomplete() const { return state() == MARKING; }
-
inline bool IsComplete() const { return state() == COMPLETE; }
inline bool IsReadyToOverApproximateWeakClosure() const {
@@ -140,7 +138,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
void FinalizeIncrementally();
- void UpdateMarkingWorklistAfterScavenge();
+ void UpdateMarkingWorklistAfterYoungGenGC();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 7d28b750e2..d5dc4e41b5 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -5,8 +5,6 @@
#ifndef V8_HEAP_INVALIDATED_SLOTS_INL_H_
#define V8_HEAP_INVALIDATED_SLOTS_INL_H_
-#include <map>
-
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
#include "src/objects/objects-inl.h"
@@ -33,14 +31,19 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
HeapObject invalidated_object = HeapObject::FromAddress(invalidated_start_);
if (invalidated_size_ == 0) {
- DCHECK(invalidated_object.map().IsMap());
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(invalidated_object.map()));
invalidated_size_ = invalidated_object.Size();
}
int offset = static_cast<int>(slot - invalidated_start_);
- DCHECK_GT(offset, 0);
+
+ // OLD_TO_OLD can have slots in map word unlike other remembered sets.
+ DCHECK_GE(offset, 0);
+ DCHECK_IMPLIES(remembered_set_type_ != OLD_TO_OLD, offset > 0);
+
if (offset < invalidated_size_)
- return invalidated_object.IsValidSlot(invalidated_object.map(), offset);
+ return offset == 0 ||
+ invalidated_object.IsValidSlot(invalidated_object.map(), offset);
NextInvalidatedObject();
return true;
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index b3655aaad8..d9ad9547ad 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -13,15 +13,19 @@ namespace v8 {
namespace internal {
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>());
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
+ OLD_TO_OLD);
}
InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
- return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
+ OLD_TO_NEW);
}
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
- MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
+ MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
+ RememberedSetType remembered_set_type) {
+ USE(remembered_set_type);
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
iterator_ = invalidated_slots->begin();
@@ -36,6 +40,7 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(
#ifdef DEBUG
last_slot_ = chunk->area_start();
+ remembered_set_type_ = remembered_set_type;
#endif
}
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 15be3ce44c..032d259e27 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -9,6 +9,7 @@
#include <stack>
#include "src/base/atomic-utils.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/objects/heap-object.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
@@ -33,11 +34,13 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk);
static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
- explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
- InvalidatedSlots* invalidated_slots);
inline bool IsValid(Address slot);
private:
+ explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
+ InvalidatedSlots* invalidated_slots,
+ RememberedSetType remembered_set_type);
+
InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
@@ -47,6 +50,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
+ RememberedSetType remembered_set_type_;
#endif
private:
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 7d79c5cdd4..19844ff4c8 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -107,7 +107,7 @@ void LargeObjectSpace::TearDown() {
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page);
}
}
@@ -135,11 +135,11 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, executable);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return AllocationResult::Failure();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
UpdatePendingObject(object);
@@ -156,7 +156,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->NotifyOldGenerationExpansion(identity(), page);
AdvanceAndInvokeAllocationObservers(object.address(),
static_cast<size_t>(object_size));
- return object;
+ return AllocationResult::FromObject(object);
}
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
@@ -171,11 +171,11 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
!heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, executable);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return AllocationResult::Failure();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
@@ -189,7 +189,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
if (identity() == CODE_LO_SPACE) {
heap()->isolate()->AddCodeMemoryChunk(page);
}
- return object;
+ return AllocationResult::FromObject(object);
}
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
@@ -324,8 +324,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
} else {
RemovePage(current, size);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
- current);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current);
}
current = next_current;
}
@@ -378,22 +377,35 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// in map space or read-only space.
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->space_for_maps()->Contains(map));
// We have only the following types in the large object space:
- if (!(object.IsAbstractCode(cage_base) || object.IsSeqString(cage_base) ||
- object.IsExternalString(cage_base) ||
- object.IsThinString(cage_base) || object.IsFixedArray(cage_base) ||
- object.IsFixedDoubleArray(cage_base) ||
- object.IsWeakFixedArray(cage_base) ||
- object.IsWeakArrayList(cage_base) ||
- object.IsPropertyArray(cage_base) || object.IsByteArray(cage_base) ||
- object.IsFeedbackVector(cage_base) || object.IsBigInt(cage_base) ||
- object.IsFreeSpace(cage_base) ||
- object.IsFeedbackMetadata(cage_base) || object.IsContext(cage_base) ||
- object.IsUncompiledDataWithoutPreparseData(cage_base) ||
- object.IsPreparseData(cage_base)) &&
- !FLAG_young_generation_large_objects) {
+ const bool is_valid_lo_space_object = //
+ object.IsAbstractCode(cage_base) || //
+ object.IsBigInt(cage_base) || //
+ object.IsByteArray(cage_base) || //
+ object.IsContext(cage_base) || //
+ object.IsExternalString(cage_base) || //
+ object.IsFeedbackMetadata(cage_base) || //
+ object.IsFeedbackVector(cage_base) || //
+ object.IsFixedArray(cage_base) || //
+ object.IsFixedDoubleArray(cage_base) || //
+ object.IsFreeSpace(cage_base) || //
+ object.IsPreparseData(cage_base) || //
+ object.IsPropertyArray(cage_base) || //
+ object.IsScopeInfo() || //
+ object.IsSeqString(cage_base) || //
+ object.IsSwissNameDictionary() || //
+ object.IsThinString(cage_base) || //
+ object.IsUncompiledDataWithoutPreparseData(cage_base) || //
+#if V8_ENABLE_WEBASSEMBLY //
+ object.IsWasmArray() || //
+#endif //
+ object.IsWeakArrayList(cage_base) || //
+ object.IsWeakFixedArray(cage_base);
+ if (!is_valid_lo_space_object) {
+ object.Print();
FATAL("Found invalid Object (instance_type=%i) in large object space.",
object.map(cage_base).instance_type());
}
@@ -434,6 +446,9 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
}
+
+ CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
}
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
@@ -472,16 +487,16 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
// Allocation for the first object must succeed independent from the capacity.
if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
- if (page == nullptr) return AllocationResult::Retry(identity());
+ if (page == nullptr) return AllocationResult::Failure();
// The size of the first object may exceed the capacity.
capacity_ = std::max(capacity_, SizeOfObjects());
@@ -490,7 +505,6 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
UpdatePendingObject(result);
-#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
@@ -498,13 +512,12 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
->non_atomic_marking_state()
->ClearLiveness(page);
}
-#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AdvanceAndInvokeAllocationObservers(result.address(),
static_cast<size_t>(object_size));
- return result;
+ return AllocationResult::FromObject(result);
}
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
@@ -531,7 +544,7 @@ void NewLargeObjectSpace::FreeDeadObjects(
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}
diff --git a/deps/v8/src/heap/large-spaces.h b/deps/v8/src/heap/large-spaces.h
index 257e3943a3..65734d5b34 100644
--- a/deps/v8/src/heap/large-spaces.h
+++ b/deps/v8/src/heap/large-spaces.h
@@ -94,8 +94,8 @@ class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
- LargePage* first_page() {
- return reinterpret_cast<LargePage*>(Space::first_page());
+ LargePage* first_page() override {
+ return reinterpret_cast<LargePage*>(memory_chunk_list_.front());
}
iterator begin() { return iterator(first_page()); }
diff --git a/deps/v8/src/heap/local-factory.cc b/deps/v8/src/heap/local-factory.cc
index d8c2ce898a..b767145e09 100644
--- a/deps/v8/src/heap/local-factory.cc
+++ b/deps/v8/src/heap/local-factory.cc
@@ -19,7 +19,12 @@
namespace v8 {
namespace internal {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+LocalFactory::LocalFactory(Isolate* isolate)
+ : roots_(isolate), isolate_for_sandbox_(isolate) {}
+#else
LocalFactory::LocalFactory(Isolate* isolate) : roots_(isolate) {}
+#endif
void LocalFactory::AddToScriptList(Handle<Script> shared) {
// TODO(leszeks): Actually add the script to the main Isolate's script list,
diff --git a/deps/v8/src/heap/local-factory.h b/deps/v8/src/heap/local-factory.h
index 8737e3bfa1..9ad22f7c35 100644
--- a/deps/v8/src/heap/local-factory.h
+++ b/deps/v8/src/heap/local-factory.h
@@ -5,9 +5,6 @@
#ifndef V8_HEAP_LOCAL_FACTORY_H_
#define V8_HEAP_LOCAL_FACTORY_H_
-#include <map>
-#include <vector>
-
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
@@ -68,13 +65,13 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
}
// This is the real Isolate that will be used for allocating and accessing
- // external pointer entries when V8_HEAP_SANDBOX is enabled.
- Isolate* isolate_for_heap_sandbox() {
-#ifdef V8_HEAP_SANDBOX
- return isolate_for_heap_sandbox_;
+ // external pointer entries when V8_SANDBOXED_EXTERNAL_POINTERS is enabled.
+ Isolate* isolate_for_sandbox() {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ return isolate_for_sandbox_;
#else
return nullptr;
-#endif // V8_HEAP_SANDBOX
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
inline bool CanAllocateInReadOnlySpace() { return false; }
@@ -86,8 +83,8 @@ class V8_EXPORT_PRIVATE LocalFactory : public FactoryBase<LocalFactory> {
// ------
ReadOnlyRoots roots_;
-#ifdef V8_HEAP_SANDBOX
- Isolate* isolate_for_heap_sandbox_;
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ Isolate* isolate_for_sandbox_;
#endif
#ifdef DEBUG
bool a_script_was_added_to_the_script_list_ = false;
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index 030e5b1932..abff7072df 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -72,7 +72,8 @@ Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
AllocationAlignment alignment) {
DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
- if (!result.IsRetry()) return result.ToObject().address();
+ HeapObject object;
+ if (result.To(&object)) return object.address();
return PerformCollectionAndAllocateAgain(object_size, type, origin,
alignment);
}
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index 0485158799..700016cade 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -398,7 +398,7 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
- if (!result.IsRetry()) {
+ if (!result.IsFailure()) {
allocation_failed_ = false;
main_thread_parked_ = false;
return result.ToObjectChecked().address();
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index e945c34cef..ca6a17bf7d 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -40,8 +40,6 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
}
}
-#ifdef ENABLE_MINOR_MC
-
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InYoungGeneration(obj) &&
non_atomic_marking_state_.WhiteToGrey(obj)) {
@@ -49,8 +47,6 @@ void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
}
}
-#endif
-
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject obj) {
if (marking_state()->WhiteToGrey(obj)) {
local_marking_worklists()->Push(obj);
@@ -199,10 +195,9 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
- Object map_object = black_object.map(cage_base, kAcquireLoad);
- CHECK(map_object.IsMap(cage_base));
- map = Map::cast(map_object);
- DCHECK(map.IsMap(cage_base));
+ map = black_object.map(cage_base, kAcquireLoad);
+ // Map might be forwarded during GC.
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map));
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 2977b4219d..206cf936df 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -5,6 +5,7 @@
#include "src/heap/mark-compact.h"
#include <unordered_map>
+#include <unordered_set>
#include "src/base/logging.h"
#include "src/base/optional.h"
@@ -19,18 +20,21 @@
#include "src/execution/vm-state-inl.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
+#include "src/heap/concurrent-allocator.h"
+#include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/index-generator.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
-#include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-barrier.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-measurement-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/object-stats.h"
@@ -48,9 +52,11 @@
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/objects.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/objects/transitions-inl.h"
@@ -211,7 +217,7 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
- VerifyMarking(heap_->map_space());
+ if (heap_->map_space()) VerifyMarking(heap_->map_space());
VerifyMarking(heap_->lo_space());
VerifyMarking(heap_->code_lo_space());
}
@@ -393,7 +399,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
- VerifyEvacuation(heap_->map_space());
+ if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
}
protected:
@@ -507,6 +513,17 @@ void MarkCompactCollector::TearDown() {
sweeper()->TearDown();
}
+// static
+bool MarkCompactCollector::IsMapOrForwardedMap(Map map) {
+ MapWord map_word = map.map_word(kRelaxedLoad);
+
+ if (map_word.IsForwardingAddress()) {
+ return map_word.ToForwardingAddress().IsMap();
+ } else {
+ return map_word.ToMap().IsMap();
+ }
+}
+
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
@@ -543,6 +560,10 @@ bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
CollectEvacuationCandidates(heap()->old_space());
+ if (heap()->map_space() && FLAG_compact_maps) {
+ CollectEvacuationCandidates(heap()->map_space());
+ }
+
if (FLAG_compact_code_space &&
(heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
CollectEvacuationCandidates(heap()->code_space());
@@ -550,7 +571,7 @@ bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
TraceFragmentation(heap()->code_space());
}
- if (FLAG_trace_fragmentation) {
+ if (FLAG_trace_fragmentation && heap()->map_space()) {
TraceFragmentation(heap()->map_space());
}
@@ -570,8 +591,11 @@ void MarkCompactCollector::StartMarking() {
}
code_flush_mode_ = Heap::GetCodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts);
- local_marking_worklists_ =
- std::make_unique<MarkingWorklists::Local>(marking_worklists());
+ auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
+ local_marking_worklists_ = std::make_unique<MarkingWorklists::Local>(
+ marking_worklists(),
+ cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
+ : MarkingWorklists::Local::kNoCppMarkingState);
local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), local_weak_objects_.get(),
@@ -591,10 +615,6 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
-#ifdef ENABLE_MINOR_MC
- heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
-#endif // ENABLE_MINOR_MC
-
MarkLiveObjects();
ClearNonLiveReferences();
VerifyMarking();
@@ -643,7 +663,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
VerifyMarkbitsAreClean(heap_->code_space());
- VerifyMarkbitsAreClean(heap_->map_space());
+ if (heap_->map_space()) {
+ VerifyMarkbitsAreClean(heap_->map_space());
+ }
VerifyMarkbitsAreClean(heap_->new_space());
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
@@ -655,26 +677,57 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
#endif // VERIFY_HEAP
-void MarkCompactCollector::EnsureSweepingCompleted() {
- if (!sweeper()->sweeping_in_progress()) return;
+void MarkCompactCollector::FinishSweepingIfOutOfWork() {
+ if (sweeper()->sweeping_in_progress() && FLAG_concurrent_sweeping &&
+ !sweeper()->AreSweeperTasksRunning()) {
+ // At this point we know that all concurrent sweeping tasks have run
+ // out of work and quit: all pages are swept. The main thread still needs
+ // to complete sweeping though.
+ EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
+ }
+ if (heap()->cpp_heap()) {
+ // Ensure that sweeping is also completed for the C++ managed heap, if one
+ // exists and it's out of work.
+ CppHeap::From(heap()->cpp_heap())->FinishSweepingIfOutOfWork();
+ }
+}
- TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
- ThreadKind::kMain);
+void MarkCompactCollector::EnsureSweepingCompleted(
+ SweepingForcedFinalizationMode mode) {
+ if (sweeper()->sweeping_in_progress()) {
+ TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
- sweeper()->EnsureCompleted();
- heap()->old_space()->RefillFreeList();
- heap()->code_space()->RefillFreeList();
- heap()->map_space()->RefillFreeList();
- heap()->map_space()->SortFreeList();
+ sweeper()->EnsureCompleted();
+ heap()->old_space()->RefillFreeList();
+ heap()->code_space()->RefillFreeList();
+ if (heap()->map_space()) {
+ heap()->map_space()->RefillFreeList();
+ heap()->map_space()->SortFreeList();
+ }
- heap()->tracer()->NotifySweepingCompleted();
+ heap()->tracer()->NotifySweepingCompleted();
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !evacuation()) {
- FullEvacuationVerifier verifier(heap());
- verifier.Run();
- }
+ if (FLAG_verify_heap && !evacuation()) {
+ FullEvacuationVerifier verifier(heap());
+ verifier.Run();
+ }
#endif
+ }
+
+ if (mode == SweepingForcedFinalizationMode::kUnifiedHeap &&
+ heap()->cpp_heap()) {
+ // Ensure that sweeping is also completed for the C++ managed heap, if one
+ // exists.
+ CppHeap::From(heap()->cpp_heap())->FinishSweepingIfRunning();
+ DCHECK(
+ !CppHeap::From(heap()->cpp_heap())->sweeper().IsSweepingInProgress());
+ }
+
+ DCHECK_IMPLIES(mode == SweepingForcedFinalizationMode::kUnifiedHeap ||
+ !heap()->cpp_heap(),
+ !heap()->tracer()->IsSweepingInProgress());
}
void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
@@ -736,7 +789,8 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
}
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
- DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
+ DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
+ space->identity() == MAP_SPACE);
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
@@ -924,8 +978,11 @@ void MarkCompactCollector::Prepare() {
if (!was_marked_incrementally_) {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue(
- heap_->flags_for_embedder_tracer());
+ auto embedder_flags = heap_->flags_for_embedder_tracer();
+ // PrepareForTrace should be called before visitor initialization in
+ // StartMarking.
+ heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
+ heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
}
StartCompaction(StartCompactionMode::kAtomic);
StartMarking();
@@ -973,7 +1030,7 @@ void MarkCompactCollector::VerifyMarking() {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
heap()->old_space()->VerifyLiveBytes();
- heap()->map_space()->VerifyLiveBytes();
+ if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
}
#endif
@@ -1151,7 +1208,14 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
private:
V8_INLINE void MarkObject(HeapObject host, Object object) {
if (!object.IsHeapObject()) return;
- collector_->MarkObject(host, HeapObject::cast(object));
+ HeapObject heap_object = HeapObject::cast(object);
+ // We use this visitor both in client and shared GCs. The client GC should
+ // not mark objects in the shared heap. In shared GCs we are marking each
+ // client's top stack frame, so it is actually legal to encounter references
+ // into the client heap here in a shared GC. We need to bail out in these
+ // cases as well.
+ if (collector_->is_shared_heap() != heap_object.InSharedHeap()) return;
+ collector_->MarkObject(host, heap_object);
}
MarkCompactCollector* const collector_;
@@ -1165,17 +1229,18 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
collector_(collector) {}
void VisitPointer(HeapObject host, ObjectSlot p) final {
- MarkObject(host, p.load(cage_base()));
+ MarkObject(host, p, p.load(cage_base()));
}
void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
MaybeObject object = p.load(cage_base());
HeapObject heap_object;
- if (object.GetHeapObject(&heap_object)) MarkObject(host, heap_object);
+ if (object.GetHeapObject(&heap_object))
+ MarkObject(host, ObjectSlot(p), heap_object);
}
void VisitMapPointer(HeapObject host) final {
- MarkObject(host, host.map(cage_base()));
+ MarkObject(host, host.map_slot(), host.map(cage_base()));
}
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
@@ -1183,13 +1248,13 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
// The map slot should be handled in VisitMapPointer.
DCHECK_NE(host.map_slot(), p);
DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
- MarkObject(host, p.load(cage_base()));
+ MarkObject(host, p, p.load(cage_base()));
}
}
void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- MarkObject(host, slot.load(code_cage_base()));
+ MarkObject(host, ObjectSlot(slot.address()), slot.load(code_cage_base()));
}
void VisitPointers(HeapObject host, MaybeObjectSlot start,
@@ -1203,19 +1268,37 @@ class MarkCompactCollector::SharedHeapObjectVisitor final
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- MarkObject(host, target);
+ RecordRelocSlot(host, rinfo, target);
}
+
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- MarkObject(host, rinfo->target_object(cage_base()));
+ HeapObject target = rinfo->target_object(cage_base());
+ RecordRelocSlot(host, rinfo, target);
}
private:
- V8_INLINE void MarkObject(HeapObject host, Object object) {
- DCHECK(!BasicMemoryChunk::FromHeapObject(host)->InSharedHeap());
+ V8_INLINE void MarkObject(HeapObject host, ObjectSlot slot, Object object) {
+ DCHECK(!host.InSharedHeap());
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
- if (!BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap()) return;
- collector_->MarkObject(host, heap_object);
+ if (!heap_object.InSharedHeap()) return;
+ RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot.address());
+ collector_->MarkRootObject(Root::kClientHeap, heap_object);
+ }
+
+ V8_INLINE void RecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ if (ShouldRecordRelocSlot(host, rinfo, target)) {
+ RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
+ RememberedSet<OLD_TO_SHARED>::InsertTyped(info.memory_chunk,
+ info.slot_type, info.offset);
+ }
+ }
+
+ V8_INLINE bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ return BasicMemoryChunk::FromHeapObject(target)->InSharedHeap();
}
MarkCompactCollector* const collector_;
@@ -1345,6 +1428,10 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
p.address());
}
+ inline void VisitMapPointer(HeapObject host) final {
+ VisitPointer(host, host.map_slot());
+ }
+
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
RecordMigratedSlot(host, p.load(cage_base()), p.address());
@@ -1518,10 +1605,19 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
+ // In case the object's map gets relocated during GC we load the old map
+ // here. This is fine since they store the same content.
+ dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
+ } else if (dest == MAP_SPACE) {
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(IsAligned(size, kTaggedSize));
+ base->heap_->CopyBlock(dst_addr, src_addr, size);
+ if (mode != MigrationMode::kFast)
+ base->ExecuteMigrationObservers(dest, src, dst, size);
+ dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
@@ -1529,7 +1625,9 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
code.Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst.IterateBodyFast(dst.map(cage_base), size, base->record_visitor_);
+ // In case the object's map gets relocated during GC we load the old map
+ // here. This is fine since they store the same content.
+ dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1541,14 +1639,13 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
+ ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
local_allocator_(local_allocator),
- record_visitor_(record_visitor) {
- if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
- shared_string_table_ = true;
- shared_old_allocator_ = heap_->shared_old_allocator_.get();
- }
+ shared_old_allocator_(shared_old_allocator),
+ record_visitor_(record_visitor),
+ shared_string_table_(shared_old_allocator != nullptr) {
migration_function_ = RawMigrateObject<MigrationMode::kFast>;
}
@@ -1584,7 +1681,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
inline bool ShouldPromoteIntoSharedHeap(Map map) {
if (shared_string_table_) {
- return String::IsInPlaceInternalizable(map.instance_type());
+ return String::IsInPlaceInternalizableExcludingExternal(
+ map.instance_type());
}
return false;
}
@@ -1622,7 +1720,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
Heap* heap_;
EvacuationAllocator* local_allocator_;
- ConcurrentAllocator* shared_old_allocator_ = nullptr;
+ ConcurrentAllocator* shared_old_allocator_;
RecordMigratedSlotVisitor* record_visitor_;
std::vector<MigrationObserver*> observers_;
MigrateFunction migration_function_;
@@ -1633,10 +1731,12 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
public:
explicit EvacuateNewSpaceVisitor(
Heap* heap, EvacuationAllocator* local_allocator,
+ ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor,
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
AlwaysPromoteYoung always_promote_young)
- : EvacuateVisitorBase(heap, local_allocator, record_visitor),
+ : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
+ record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
@@ -1706,7 +1806,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation = local_allocator_->Allocate(
NEW_SPACE, size, AllocationOrigin::kGC, alignment);
- if (allocation.IsRetry()) {
+ if (allocation.IsFailure()) {
allocation = AllocateInOldSpace(size, alignment);
space_allocated_in = OLD_SPACE;
}
@@ -1720,7 +1820,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationAlignment alignment) {
AllocationResult allocation = local_allocator_->Allocate(
OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
- if (allocation.IsRetry()) {
+ if (allocation.IsFailure()) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen");
}
@@ -1769,7 +1869,7 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
} else if (mode == NEW_TO_OLD) {
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
- object.IterateBodyFast(cage_base, record_visitor_);
+ object.IterateFast(cage_base, record_visitor_);
if (V8_UNLIKELY(FLAG_minor_mc)) {
record_visitor_->MarkArrayBufferExtensionPromoted(object);
}
@@ -1790,8 +1890,10 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
public:
EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
+ ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor)
- : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
+ : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
+ record_visitor) {}
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
@@ -1807,19 +1909,41 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
public:
- explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
+ explicit EvacuateRecordOnlyVisitor(Heap* heap)
+ : heap_(heap)
+#ifdef V8_COMPRESS_POINTERS
+ ,
+ cage_base_(heap->isolate())
+#endif // V8_COMPRESS_POINTERS
+ {
+ }
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ V8_INLINE PtrComprCageBase cage_base() const {
+#ifdef V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
- DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
- PtrComprCageBase cage_base = GetPtrComprCageBase(object);
- object.IterateBodyFast(cage_base, &visitor);
+ Map map = object.map(cage_base());
+ // Instead of calling object.IterateBodyFast(cage_base(), &visitor) here
+ // we can shortcut and use the precomputed size value passed to the visitor.
+ DCHECK_EQ(object.SizeFromMap(map), size);
+ object.IterateBodyFast(map, size, &visitor);
return true;
}
private:
Heap* heap_;
+#ifdef V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
};
bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
@@ -1920,7 +2044,7 @@ bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
} while (another_ephemeron_iteration_main_thread ||
heap()->concurrent_marking()->another_ephemeron_iteration() ||
!local_marking_worklists()->IsEmpty() ||
- !local_marking_worklists()->IsEmbedderEmpty() ||
+ !local_marking_worklists()->IsWrapperEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
CHECK(local_marking_worklists()->IsEmpty());
@@ -2042,7 +2166,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
// is necessary.
work_to_do = !local_marking_worklists()->IsEmpty() ||
- !local_marking_worklists()->IsEmbedderEmpty() ||
+ !local_marking_worklists()->IsWrapperEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
CHECK(local_weak_objects()
->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
@@ -2063,11 +2187,14 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
- {
+ if (local_marking_worklists()->PublishWrapper()) {
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
+ } else {
+ // Cannot directly publish wrapper objects.
LocalEmbedderHeapTracer::ProcessingScope scope(
heap_->local_embedder_heap_tracer());
HeapObject object;
- while (local_marking_worklists()->PopEmbedder(&object)) {
+ while (local_marking_worklists()->PopWrapper(&object)) {
scope.TracePossibleWrapper(JSObject::cast(object));
}
}
@@ -2200,28 +2327,29 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
}
void MarkCompactCollector::RecordObjectStats() {
- if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
- heap()->CreateObjectStats();
- ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
- heap()->dead_object_stats_.get());
- collector.Collect();
- if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- std::stringstream live, dead;
- heap()->live_object_stats_->Dump(live);
- heap()->dead_object_stats_->Dump(dead);
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
- "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
- "live", TRACE_STR_COPY(live.str().c_str()), "dead",
- TRACE_STR_COPY(dead.str().c_str()));
- }
- if (FLAG_trace_gc_object_stats) {
- heap()->live_object_stats_->PrintJSON("live");
- heap()->dead_object_stats_->PrintJSON("dead");
- }
- heap()->live_object_stats_->CheckpointObjectStats();
- heap()->dead_object_stats_->ClearObjectStats();
- }
+ if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
+ // Cannot run during bootstrapping due to incomplete objects.
+ if (isolate()->bootstrapper()->IsActive()) return;
+ heap()->CreateObjectStats();
+ ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
+ heap()->dead_object_stats_.get());
+ collector.Collect();
+ if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ std::stringstream live, dead;
+ heap()->live_object_stats_->Dump(live);
+ heap()->dead_object_stats_->Dump(dead);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+ "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
+ "live", TRACE_STR_COPY(live.str().c_str()), "dead",
+ TRACE_STR_COPY(dead.str().c_str()));
+ }
+ if (FLAG_trace_gc_object_stats) {
+ heap()->live_object_stats_->PrintJSON("live");
+ heap()->dead_object_stats_->PrintJSON("dead");
+ }
+ heap()->live_object_stats_->CheckpointObjectStats();
+ heap()->dead_object_stats_->ClearObjectStats();
}
void MarkCompactCollector::MarkLiveObjects() {
@@ -2292,8 +2420,8 @@ void MarkCompactCollector::MarkLiveObjects() {
PerformWrapperTracing();
DrainMarkingWorklist();
} while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
- !local_marking_worklists()->IsEmbedderEmpty());
- DCHECK(local_marking_worklists()->IsEmbedderEmpty());
+ !local_marking_worklists()->IsWrapperEmpty());
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
@@ -2336,7 +2464,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
- DCHECK(local_marking_worklists()->IsEmbedderEmpty());
+ DCHECK(local_marking_worklists()->IsWrapperEmpty());
DCHECK(local_marking_worklists()->IsEmpty());
}
@@ -2350,6 +2478,7 @@ void MarkCompactCollector::MarkLiveObjects() {
}
if (was_marked_incrementally_) {
MarkingBarrier::DeactivateAll(heap());
+ GlobalHandles::DisableMarkingBarrier(heap()->isolate());
}
epoch_++;
@@ -2369,12 +2498,12 @@ void MarkCompactCollector::ClearNonLiveReferences() {
string_table->DropOldData();
string_table->IterateElements(&internalized_visitor);
string_table->NotifyElementsRemoved(internalized_visitor.PointersRemoved());
-
- ExternalStringTableCleaner external_visitor(heap());
- heap()->external_string_table_.IterateAll(&external_visitor);
- heap()->external_string_table_.CleanUpAll();
}
+ ExternalStringTableCleaner external_visitor(heap());
+ heap()->external_string_table_.IterateAll(&external_visitor);
+ heap()->external_string_table_.CleanUpAll();
+
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
// ProcessFlusheBaselineCandidates should be called after clearing bytecode
@@ -2414,6 +2543,14 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization();
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_EXTERNAL_POINTER_TABLE);
+ isolate()->external_pointer_table().Sweep(isolate());
+ }
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
@@ -2449,7 +2586,7 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
Map parent = Map::cast(potential_parent);
DisallowGarbageCollection no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
- TransitionsAccessor(isolate(), parent, &no_gc_obviously)
+ TransitionsAccessor(isolate(), parent)
.HasSimpleTransitionTo(dead_target)) {
ClearPotentialSimpleMapTransition(parent, dead_target);
}
@@ -2809,12 +2946,16 @@ void MarkCompactCollector::ClearWeakCollections() {
if (FLAG_verify_heap) {
Object value = table.ValueAt(i);
if (value.IsHeapObject()) {
- CHECK_IMPLIES(non_atomic_marking_state()->IsBlackOrGrey(key),
- non_atomic_marking_state()->IsBlackOrGrey(
- HeapObject::cast(value)));
+ HeapObject heap_object = HeapObject::cast(value);
+ CHECK_IMPLIES(
+ (!is_shared_heap_ && key.InSharedHeap()) ||
+ non_atomic_marking_state()->IsBlackOrGrey(key),
+ (!is_shared_heap_ && heap_object.InSharedHeap()) ||
+ non_atomic_marking_state()->IsBlackOrGrey(heap_object));
}
}
#endif
+ if (!is_shared_heap_ && key.InSharedHeap()) continue;
if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
table.RemoveEntry(i);
}
@@ -2927,53 +3068,76 @@ bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
}
+// static
+bool MarkCompactCollector::ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(host);
+ BasicMemoryChunk* target_chunk = BasicMemoryChunk::FromHeapObject(target);
+ return target_chunk->IsEvacuationCandidate() &&
+ !source_chunk->ShouldSkipEvacuationSlotRecording();
+}
+
+// static
MarkCompactCollector::RecordRelocSlotInfo
-MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
- HeapObject target) {
+MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
+ HeapObject target) {
+ DCHECK_EQ(host, rinfo->host());
+
RecordRelocSlotInfo result;
- result.should_record = false;
- Page* target_page = Page::FromHeapObject(target);
- Page* source_page = Page::FromHeapObject(host);
- if (target_page->IsEvacuationCandidate() &&
- (rinfo->host().is_null() ||
- !source_page->ShouldSkipEvacuationSlotRecording())) {
- RelocInfo::Mode rmode = rinfo->rmode();
- Address addr = rinfo->pc();
- SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
- if (rinfo->IsInConstantPool()) {
- addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- slot_type = CODE_ENTRY_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- slot_type = COMPRESSED_OBJECT_SLOT;
- } else {
- DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
- slot_type = FULL_OBJECT_SLOT;
- }
+ const RelocInfo::Mode rmode = rinfo->rmode();
+ Address addr;
+ SlotType slot_type;
+
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ slot_type = SlotType::kConstPoolCodeEntry;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ slot_type = SlotType::kConstPoolEmbeddedObjectCompressed;
+ } else {
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
+ slot_type = SlotType::kConstPoolEmbeddedObjectFull;
+ }
+ } else {
+ addr = rinfo->pc();
+
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ slot_type = SlotType::kCodeEntry;
+ } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
+ slot_type = SlotType::kEmbeddedObjectFull;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ slot_type = SlotType::kEmbeddedObjectCompressed;
+ } else {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
+ slot_type = SlotType::kEmbeddedObjectData;
}
- uintptr_t offset = addr - source_page->address();
- DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
- result.should_record = true;
- result.memory_chunk = source_page;
- result.slot_type = slot_type;
- result.offset = static_cast<uint32_t>(offset);
}
+
+ MemoryChunk* const source_chunk = MemoryChunk::FromHeapObject(host);
+ const uintptr_t offset = addr - source_chunk->address();
+ DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+ result.memory_chunk = source_chunk;
+ result.slot_type = slot_type;
+ result.offset = static_cast<uint32_t>(offset);
+
return result;
}
+// static
void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
- RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
- if (info.should_record) {
- // Access to TypeSlots need to be protected, since LocalHeaps might
- // publish code in the background thread.
- base::Optional<base::MutexGuard> opt_guard;
- if (FLAG_concurrent_sparkplug) {
- opt_guard.emplace(info.memory_chunk->mutex());
- }
- RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
- info.offset);
+ if (!ShouldRecordRelocSlot(host, rinfo, target)) return;
+ RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
+
+ // Access to TypeSlots need to be protected, since LocalHeaps might
+ // publish code in the background thread.
+ base::Optional<base::MutexGuard> opt_guard;
+ if (FLAG_concurrent_sparkplug) {
+ opt_guard.emplace(info.memory_chunk->mutex());
}
+ RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
+ info.offset);
}
namespace {
@@ -3049,14 +3213,17 @@ static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
map_word.ToForwardingAddress(host_cage_base));
if (access_mode == AccessMode::NON_ATOMIC) {
- slot.store(target);
+ // Needs to be atomic for map space compaction: This slot could be a map
+ // word which we update while loading the map word for updating the slot
+ // on another page.
+ slot.Relaxed_Store(target);
} else {
slot.Release_CompareAndSwap(old, target);
}
DCHECK(!Heap::InFromPage(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
- DCHECK(heap_obj.map(cage_base).IsMap(cage_base));
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map_word.ToMap()));
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
@@ -3106,7 +3273,7 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
CodeDataContainer::cast(HeapObject::FromAddress(
slot.address() - CodeDataContainer::kCodeOffset));
Code code = code_data_container.code(code_cage_base);
- Isolate* isolate_for_sandbox = GetIsolateForHeapSandbox(host);
+ Isolate* isolate_for_sandbox = GetIsolateForSandbox(host);
code_data_container.UpdateCodeEntryPoint(isolate_for_sandbox, code);
return result;
}
@@ -3115,11 +3282,8 @@ static inline SlotCallbackResult UpdateStrongCodeSlot(
} // namespace
-static constexpr bool kClientHeap = true;
-
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-template <bool in_client_heap = false>
class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
public RootVisitor {
public:
@@ -3175,34 +3339,14 @@ class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
}
}
- void VisitMapPointer(HeapObject object) override {
- if (in_client_heap) {
- UpdateStrongSlotInternal(cage_base(), object.map_slot());
- } else {
- UNREACHABLE();
- }
- }
-
void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- if (in_client_heap) {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- CHECK_WITH_MSG(!target.InSharedHeap(),
- "refs into shared heap not yet supported here.");
- } else {
- // This visitor nevers visits code objects.
- UNREACHABLE();
- }
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- if (in_client_heap) {
- HeapObject target = rinfo->target_object(cage_base());
- CHECK_WITH_MSG(!target.InSharedHeap(),
- "refs into shared heap not yet supported here.");
- } else {
- // This visitor nevers visits code objects.
- UNREACHABLE();
- }
+ // This visitor nevers visits code objects.
+ UNREACHABLE();
}
private:
@@ -3232,9 +3376,75 @@ class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
}
};
+#ifdef VERIFY_HEAP
+// Visitor for updating root pointers and to-space pointers.
+// It does not expect to encounter pointers to dead objects.
+class ClientHeapVerifier final : public ObjectVisitorWithCageBases {
+ public:
+ explicit ClientHeapVerifier(Heap* heap) : ObjectVisitorWithCageBases(heap) {}
+
+ void VisitPointer(HeapObject host, ObjectSlot p) override {
+ VerifySlot(cage_base(), p);
+ }
+
+ void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
+ VerifySlot(cage_base(), p);
+ }
+
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
+ VerifySlot(cage_base(), p);
+ }
+ }
+
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
+ VerifySlot(cage_base(), p);
+ }
+ }
+
+ void VisitMapPointer(HeapObject host) override {
+ VerifySlot(cage_base(), host.map_slot());
+ }
+
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
+ VerifySlot(code_cage_base(), ObjectSlot(slot.address()));
+ }
+
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {}
+
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {}
+
+ private:
+ void VerifySlot(PtrComprCageBase cage_base, ObjectSlot slot) {
+ HeapObject heap_object;
+ if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ VerifyHeapObject(heap_object);
+ }
+ }
+
+ void VerifySlot(PtrComprCageBase cage_base, MaybeObjectSlot slot) {
+ HeapObject heap_object;
+ if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ VerifyHeapObject(heap_object);
+ }
+ }
+
+ void VerifyHeapObject(HeapObject heap_object) {
+ if (BasicMemoryChunk::FromHeapObject(heap_object)->InReadOnlySpace())
+ return;
+ if (!heap_object.InSharedHeap()) return;
+ CHECK(!heap_object.map_word(kRelaxedLoad).IsForwardingAddress());
+ }
+};
+#endif // VERIFY_HEAP
+
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
- MapWord map_word = HeapObject::cast(*p).map_word(kRelaxedLoad);
+ HeapObject old_string = HeapObject::cast(*p);
+ MapWord map_word = old_string.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
String new_string = String::cast(map_word.ToForwardingAddress());
@@ -3306,6 +3516,7 @@ void MarkCompactCollector::EvacuateEpilogue() {
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
+ DCHECK_NULL((p->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()));
DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL(p->invalidated_slots<OLD_TO_OLD>());
DCHECK_NULL(p->invalidated_slots<OLD_TO_NEW>());
@@ -3313,6 +3524,16 @@ void MarkCompactCollector::EvacuateEpilogue() {
#endif
}
+namespace {
+ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
+ if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_old_space());
+ }
+
+ return nullptr;
+}
+} // namespace
+
class Evacuator : public Malloced {
public:
enum EvacuationMode {
@@ -3359,14 +3580,17 @@ class Evacuator : public Malloced {
AlwaysPromoteYoung always_promote_young)
: heap_(heap),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
- new_space_visitor_(heap_, local_allocator, record_visitor,
- &local_pretenuring_feedback_, always_promote_young),
+ shared_old_allocator_(CreateSharedOldAllocator(heap_)),
+ new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
+ record_visitor, &local_pretenuring_feedback_,
+ always_promote_young),
new_to_new_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_),
new_to_old_page_visitor_(heap_, record_visitor,
&local_pretenuring_feedback_),
- old_space_visitor_(heap_, local_allocator, record_visitor),
+ old_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
+ record_visitor),
local_allocator_(local_allocator),
duration_(0.0),
bytes_compacted_(0) {}
@@ -3405,6 +3629,9 @@ class Evacuator : public Malloced {
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
+ // Allocator for the shared heap.
+ std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
+
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
@@ -3450,6 +3677,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
void Evacuator::Finalize() {
local_allocator_->Finalize();
+ if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_to_old_page_visitor_.moved_bytes());
@@ -3733,6 +3961,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (marking_state->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
@@ -3915,10 +4144,9 @@ void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
for (Page* p : new_space_evacuation_pages_) {
- if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
- p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- sweeper()->AddPageForIterability(p);
- } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ // Full GCs don't promote pages within new space.
+ DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
@@ -3926,6 +4154,12 @@ void MarkCompactCollector::Evacuate() {
}
new_space_evacuation_pages_.clear();
+ for (LargePage* p : promoted_large_pages_) {
+ DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ }
+ promoted_large_pages_.clear();
+
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
@@ -4037,7 +4271,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
void ProcessVisitAll() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"ToSpaceUpdatingItem::ProcessVisitAll");
- PointersUpdatingVisitor<> visitor(heap_);
+ PointersUpdatingVisitor visitor(heap_);
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
Map map = object.map(visitor.cage_base());
@@ -4052,7 +4286,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
"ToSpaceUpdatingItem::ProcessVisitLive");
// For young generation evacuations we want to visit grey objects, for
// full MC, we need to visit black objects.
- PointersUpdatingVisitor<> visitor(heap_);
+ PointersUpdatingVisitor visitor(heap_);
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
object_and_size.first.IterateBodyFast(visitor.cage_base(), &visitor);
@@ -4380,7 +4614,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
// The external string table is updated at the end.
- PointersUpdatingVisitor<> updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor(heap());
heap_->IterateRootsIncludingClients(
&updating_visitor,
base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
@@ -4405,8 +4639,10 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
RememberedSetUpdatingMode::ALL);
CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
RememberedSetUpdatingMode::ALL);
- CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
- RememberedSetUpdatingMode::ALL);
+ if (heap()->map_space()) {
+ CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::ALL);
+ }
// Iterating to space may require a valid body descriptor for e.g.
// WasmStruct which races with updating a slot in Map. Since to space is
@@ -4440,18 +4676,54 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
void MarkCompactCollector::UpdatePointersInClientHeaps() {
if (!isolate()->is_shared()) return;
- PointersUpdatingVisitor<kClientHeap> visitor(heap());
-
isolate()->global_safepoint()->IterateClientIsolates(
- [&visitor](Isolate* client) {
- Heap* heap = client->heap();
- HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
- PtrComprCageBase cage_base(client);
- for (HeapObject obj = iterator.Next(); !obj.is_null();
- obj = iterator.Next()) {
- obj.IterateFast(cage_base, &visitor);
- }
- });
+ [this](Isolate* client) { UpdatePointersInClientHeap(client); });
+}
+
+void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
+ PtrComprCageBase cage_base(client);
+ MemoryChunkIterator chunk_iterator(client->heap());
+
+ while (chunk_iterator.HasNext()) {
+ MemoryChunk* chunk = chunk_iterator.Next();
+ CodePageMemoryModificationScope unprotect_code_page(chunk);
+
+ RememberedSet<OLD_TO_SHARED>::Iterate(
+ chunk,
+ [cage_base](MaybeObjectSlot slot) {
+ return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ chunk->ReleaseSlotSet<OLD_TO_SHARED>();
+
+ RememberedSet<OLD_TO_SHARED>::IterateTyped(
+ chunk, [this](SlotType slot_type, Address slot) {
+ // Using UpdateStrongSlot is OK here, because there are no weak
+ // typed slots.
+ PtrComprCageBase cage_base = heap_->isolate();
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base,
+ slot);
+ });
+ });
+
+ chunk->ReleaseTypedSlotSet<OLD_TO_SHARED>();
+ }
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ ClientHeapVerifier verifier_visitor(client->heap());
+
+ HeapObjectIterator iterator(client->heap(),
+ HeapObjectIterator::kNoFiltering);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ obj.IterateFast(cage_base, &verifier_visitor);
+ }
+ }
+#endif // VERIFY_HEAP
}
void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
@@ -4605,7 +4877,7 @@ void MarkCompactCollector::StartSweepSpaces() {
heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE, ThreadKind::kMain);
StartSweepSpace(heap()->code_space());
}
- {
+ if (heap()->map_space()) {
GCTracer::Scope sweep_scope(
heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP, ThreadKind::kMain);
StartSweepSpace(heap()->map_space());
@@ -4614,8 +4886,6 @@ void MarkCompactCollector::StartSweepSpaces() {
}
}
-#ifdef ENABLE_MINOR_MC
-
namespace {
#ifdef VERIFY_HEAP
@@ -4668,8 +4938,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyHeapObjectImpl(target);
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- PtrComprCageBase cage_base = host.main_cage_base();
- VerifyHeapObjectImpl(rinfo->target_object(cage_base));
+ VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
}
void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
VerifyPointersImpl(start, end);
@@ -4707,7 +4976,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
VerifyEvacuation(heap_->new_space());
VerifyEvacuation(heap_->old_space());
VerifyEvacuation(heap_->code_space());
- VerifyEvacuation(heap_->map_space());
+ if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
}
protected:
@@ -4871,14 +5140,18 @@ MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete main_marking_visitor_;
}
-void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
- for (Page* p : sweep_to_iterate_pages_) {
- if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- non_atomic_marking_state()->ClearLiveness(p);
- }
+void MinorMarkCompactCollector::CleanupPromotedPages() {
+ for (Page* p : promoted_pages_) {
+ p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ non_atomic_marking_state()->ClearLiveness(p);
+ }
+ promoted_pages_.clear();
+
+ for (LargePage* p : promoted_large_pages_) {
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
}
- sweep_to_iterate_pages_.clear();
+ promoted_large_pages_.clear();
}
void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
@@ -4962,7 +5235,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor<> updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor(heap());
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
// Create batches of global handles.
@@ -4971,8 +5244,11 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ if (heap()->map_space()) {
+ CollectRememberedSetUpdatingItems(
+ &updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ }
CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
@@ -5044,7 +5320,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
- CleanupSweepToIteratePages();
}
heap()->array_buffer_sweeper()->EnsureFinished();
@@ -5068,14 +5343,15 @@ void MinorMarkCompactCollector::CollectGarbage() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
- heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ heap()->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
for (Page* p :
PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
- DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ DCHECK_EQ(promoted_pages_.end(),
+ std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
non_atomic_marking_state()->ClearLiveness(p);
if (FLAG_concurrent_marking) {
// Ensure that concurrent marker does not track pages that are
@@ -5090,12 +5366,13 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
}
+ CleanupPromotedPages();
+
SweepArrayBufferExtensions();
}
void MinorMarkCompactCollector::MakeIterable(
- Page* p, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode) {
+ Page* p, FreeSpaceTreatmentMode free_space_mode) {
CHECK(!p->IsLargePage());
// We have to clear the full collectors markbits for the areas that we
// remove here.
@@ -5137,11 +5414,6 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
-
- if (marking_mode == MarkingTreatmentMode::CLEAR) {
- non_atomic_marking_state()->ClearLiveness(p);
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- }
}
namespace {
@@ -5637,7 +5909,7 @@ void MinorMarkCompactCollector::Evacuate() {
EvacuatePagesInParallel();
}
- UpdatePointersAfterEvacuation();
+ if (!FLAG_minor_mc_sweeping) UpdatePointersAfterEvacuation();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
@@ -5651,10 +5923,7 @@ void MinorMarkCompactCollector::Evacuate() {
for (Page* p : new_space_evacuation_pages_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->SetFlag(Page::SWEEP_TO_ITERATE);
- sweep_to_iterate_pages_.push_back(p);
+ promoted_pages_.push_back(p);
}
}
new_space_evacuation_pages_.clear();
@@ -5703,6 +5972,7 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
*live_bytes = marking_state->live_bytes(chunk);
switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld:
+ DCHECK(!FLAG_minor_mc_sweeping);
LiveObjectVisitor::VisitGreyObjectsNoFail(
chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
@@ -5715,14 +5985,12 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
if (!chunk->IsLargePage()) {
if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP,
IGNORE_FREE_SPACE);
}
}
@@ -5735,14 +6003,12 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
marking_state->live_bytes(chunk));
DCHECK(!chunk->IsLargePage());
if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
- collector_->MakeIterable(static_cast<Page*>(chunk),
- MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk), IGNORE_FREE_SPACE);
}
break;
case kObjectsOldToOld:
@@ -5760,7 +6026,8 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
if (live_bytes_on_page == 0) continue;
live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
+ if (FLAG_minor_mc_sweeping ||
+ ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
@@ -5780,6 +6047,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
if (non_atomic_marking_state_.IsGrey(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
@@ -5797,7 +6065,5 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
}
}
-#endif // ENABLE_MINOR_MC
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index ecfb5adc64..ea9173f5be 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -26,6 +26,7 @@ namespace internal {
class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
+class LargePage;
class MigrationObserver;
class ReadOnlySpace;
class RecordMigratedSlotVisitor;
@@ -187,7 +188,6 @@ class LiveObjectVisitor : AllStatic {
enum class AlwaysPromoteYoung { kYes, kNo };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
-enum class MarkingTreatmentMode { KEEP, CLEAR };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
@@ -508,11 +508,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
struct RecordRelocSlotInfo {
MemoryChunk* memory_chunk;
SlotType slot_type;
- bool should_record;
uint32_t offset;
};
- static RecordRelocSlotInfo PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
- HeapObject target);
+
+ static V8_EXPORT_PRIVATE bool IsMapOrForwardedMap(Map map);
+
+ static bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
+ HeapObject target);
+ static RecordRelocSlotInfo ProcessRelocInfo(Code host, RelocInfo* rinfo,
+ HeapObject target);
+
static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
HeapObject target);
@@ -525,10 +530,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
bool is_compacting() const { return compacting_; }
bool is_shared_heap() const { return is_shared_heap_; }
+ void FinishSweepingIfOutOfWork();
+
+ enum class SweepingForcedFinalizationMode { kUnifiedHeap, kV8Only };
+
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
- V8_EXPORT_PRIVATE void EnsureSweepingCompleted();
+ V8_EXPORT_PRIVATE void EnsureSweepingCompleted(
+ SweepingForcedFinalizationMode mode);
void EnsurePageIsSwept(Page* page);
@@ -622,6 +632,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Free unmarked ArrayBufferExtensions.
void SweepArrayBufferExtensions();
+ // Free unmarked entries in the ExternalPointerTable.
+ void SweepExternalPointerTable();
+
void MarkLiveObjects() override;
// Marks the object grey and adds it to the marking work list.
@@ -642,6 +655,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Updates pointers to shared objects from client heaps.
void UpdatePointersInClientHeaps();
+ void UpdatePointersInClientHeap(Isolate* client);
// Marks object reachable from harmony weak maps and wrapper tracing.
void ProcessEphemeronMarking();
@@ -799,6 +813,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
aborted_evacuation_candidates_due_to_oom_;
std::vector<std::pair<Address, Page*>>
aborted_evacuation_candidates_due_to_flags_;
+ std::vector<LargePage*> promoted_large_pages_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
@@ -835,8 +850,6 @@ class V8_NODISCARD EvacuationScope {
MarkCompactCollector* collector_;
};
-#ifdef ENABLE_MINOR_MC
-
// Collector for young-generation only.
class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
public:
@@ -858,9 +871,8 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void TearDown() override;
void CollectGarbage() override;
- void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode);
- void CleanupSweepToIteratePages();
+ void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode);
+ void CleanupPromotedPages();
private:
using MarkingWorklist =
@@ -909,15 +921,14 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
- std::vector<Page*> sweep_to_iterate_pages_;
+ std::vector<Page*> promoted_pages_;
+ std::vector<LargePage*> promoted_large_pages_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
friend class YoungGenerationMarkingVisitor;
};
-#endif // ENABLE_MINOR_MC
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index 6a7571af79..fc82ff50f2 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -121,15 +121,16 @@ void MarkingBarrier::Write(DescriptorArray descriptor_array,
void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
HeapObject target) {
DCHECK(IsCurrentMarkingBarrier());
+ if (!MarkCompactCollector::ShouldRecordRelocSlot(host, rinfo, target)) return;
+
MarkCompactCollector::RecordRelocSlotInfo info =
- MarkCompactCollector::PrepareRecordRelocSlot(host, rinfo, target);
- if (info.should_record) {
- auto& typed_slots = typed_slots_map_[info.memory_chunk];
- if (!typed_slots) {
- typed_slots.reset(new TypedSlots());
- }
- typed_slots->Insert(info.slot_type, info.offset);
+ MarkCompactCollector::ProcessRelocInfo(host, rinfo, target);
+
+ auto& typed_slots = typed_slots_map_[info.memory_chunk];
+ if (!typed_slots) {
+ typed_slots.reset(new TypedSlots());
}
+ typed_slots->Insert(info.slot_type, info.offset);
}
// static
@@ -193,7 +194,7 @@ void MarkingBarrier::Deactivate() {
is_compacting_ = false;
if (is_main_thread_barrier_) {
DeactivateSpace(heap_->old_space());
- DeactivateSpace(heap_->map_space());
+ if (heap_->map_space()) DeactivateSpace(heap_->map_space());
DeactivateSpace(heap_->code_space());
DeactivateSpace(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) {
@@ -232,7 +233,7 @@ void MarkingBarrier::Activate(bool is_compacting) {
is_activated_ = true;
if (is_main_thread_barrier_) {
ActivateSpace(heap_->old_space());
- ActivateSpace(heap_->map_space());
+ if (heap_->map_space()) ActivateSpace(heap_->map_space());
ActivateSpace(heap_->code_space());
ActivateSpace(heap_->new_space());
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index 8f65a61dab..c59ae55d2d 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -6,6 +6,7 @@
#define V8_HEAP_MARKING_VISITOR_INL_H_
#include "src/heap/marking-visitor.h"
+#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/progress-bar.h"
@@ -25,6 +26,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
DCHECK(ReadOnlyHeap::Contains(object) || heap_->Contains(object));
concrete_visitor()->SynchronizePageAccess(object);
+ AddStrongReferenceForReferenceSummarizer(host, object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
local_marking_worklists_->Push(object);
if (V8_UNLIKELY(concrete_visitor()->retaining_path_mode() ==
@@ -41,8 +43,7 @@ template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
concrete_visitor()->SynchronizePageAccess(heap_object);
- BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(heap_object);
- if (!is_shared_heap_ && target_page->InSharedHeap()) return;
+ if (!is_shared_heap_ && heap_object.InSharedHeap()) return;
MarkObject(host, heap_object);
concrete_visitor()->RecordSlot(host, slot, heap_object);
}
@@ -64,6 +65,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessWeakHeapObject(
// the reference when we know the liveness of the whole transitive
// closure.
local_weak_objects_->weak_references_local.Push(std::make_pair(host, slot));
+ AddWeakReferenceForReferenceSummarizer(host, heap_object);
}
}
@@ -112,10 +114,13 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object =
rinfo->target_object(ObjectVisitorWithCageBases::cage_base());
+ if (!is_shared_heap_ && object.InSharedHeap()) return;
+
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
local_weak_objects_->weak_objects_in_code_local.Push(
std::make_pair(object, host));
+ AddWeakReferenceForReferenceSummarizer(host, object);
} else {
MarkObject(host, object);
}
@@ -128,6 +133,8 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodeTarget(
Code host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+
+ if (!is_shared_heap_ && target.InSharedHeap()) return;
MarkObject(host, target);
concrete_visitor()->RecordRelocSlot(host, rinfo, target);
}
@@ -243,7 +250,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArray(
// in the large object space.
ProgressBar& progress_bar =
MemoryChunk::FromHeapObject(object)->ProgressBar();
- return progress_bar.IsEnabled()
+ return CanUpdateValuesInHeap() && progress_bar.IsEnabled()
? VisitFixedArrayWithProgressBar(map, object, progress_bar)
: concrete_visitor()->VisitLeftTrimmableArray(map, object);
}
@@ -260,17 +267,45 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedDoubleArray(
template <typename ConcreteVisitor, typename MarkingState>
template <typename T>
+inline int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitEmbedderTracingSubClassNoEmbedderTracing(Map map, T object) {
+ return concrete_visitor()->VisitJSObjectSubclass(map, object);
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+template <typename T>
+inline int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
+ VisitEmbedderTracingSubClassWithEmbedderTracing(Map map, T object) {
+ const bool requires_snapshot =
+ local_marking_worklists_->SupportsExtractWrapper();
+ MarkingWorklists::Local::WrapperSnapshot wrapper_snapshot;
+ const bool valid_snapshot =
+ requires_snapshot &&
+ local_marking_worklists_->ExtractWrapper(map, object, wrapper_snapshot);
+ const int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
+ if (size) {
+ if (valid_snapshot) {
+ // Success: The object needs to be processed for embedder references.
+ local_marking_worklists_->PushExtractedWrapper(wrapper_snapshot);
+ } else if (!requires_snapshot) {
+ // Snapshot not supported. Just fall back to pushing the wrapper itself
+ // instead which will be processed on the main thread.
+ local_marking_worklists_->PushWrapper(object);
+ }
+ }
+ return size;
+}
+
+template <typename ConcreteVisitor, typename MarkingState>
+template <typename T>
int MarkingVisitorBase<ConcreteVisitor,
MarkingState>::VisitEmbedderTracingSubclass(Map map,
T object) {
- DCHECK(object.IsApiWrapper());
- int size = concrete_visitor()->VisitJSObjectSubclass(map, object);
- if (size && is_embedder_tracing_enabled_) {
- // Success: The object needs to be processed for embedder references on
- // the main thread.
- local_marking_worklists_->PushEmbedder(object);
+ DCHECK(object.MayHaveEmbedderFields());
+ if (V8_LIKELY(is_embedder_tracing_enabled_)) {
+ return VisitEmbedderTracingSubClassWithEmbedderTracing(map, object);
}
- return size;
+ return VisitEmbedderTracingSubClassNoEmbedderTracing(map, object);
}
template <typename ConcreteVisitor, typename MarkingState>
@@ -315,11 +350,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
concrete_visitor()->SynchronizePageAccess(key);
concrete_visitor()->RecordSlot(table, key_slot, key);
+ AddWeakReferenceForReferenceSummarizer(table, key);
ObjectSlot value_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
- if (concrete_visitor()->marking_state()->IsBlackOrGrey(key)) {
+ if ((!is_shared_heap_ && key.InSharedHeap()) ||
+ concrete_visitor()->marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
Object value_obj = table.ValueAt(i);
@@ -328,6 +365,9 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
HeapObject value = HeapObject::cast(value_obj);
concrete_visitor()->SynchronizePageAccess(value);
concrete_visitor()->RecordSlot(table, value_slot, value);
+ AddWeakReferenceForReferenceSummarizer(table, value);
+
+ if (!is_shared_heap_ && value.InSharedHeap()) continue;
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
@@ -358,6 +398,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSWeakRef(
// JSWeakRef points to a potentially dead object. We have to process
// them when we know the liveness of the whole transitive closure.
local_weak_objects_->js_weak_refs_local.Push(weak_ref);
+ AddWeakReferenceForReferenceSummarizer(weak_ref, target);
}
}
return size;
@@ -388,6 +429,8 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitWeakCell(
// token. We have to process them when we know the liveness of the whole
// transitive closure.
local_weak_objects_->weak_cells_local.Push(weak_cell);
+ AddWeakReferenceForReferenceSummarizer(weak_cell, target);
+ AddWeakReferenceForReferenceSummarizer(weak_cell, unregister_token);
}
return size;
}
@@ -414,8 +457,11 @@ template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitDescriptors(
DescriptorArray descriptor_array, int number_of_own_descriptors) {
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
- int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
- mark_compact_epoch_, new_marked);
+ int16_t old_marked = 0;
+ if (CanUpdateValuesInHeap()) {
+ old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
+ mark_compact_epoch_, new_marked);
+ }
if (old_marked < new_marked) {
VisitPointers(
descriptor_array,
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 26ebf5713f..94670b4d73 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -25,6 +25,11 @@ struct EphemeronMarking {
template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
+ // Declares that this marking state is not collecting retainers, so the
+ // marking visitor may update the heap state to store information about
+ // progress, and may avoid fully visiting an object if it is safe to do so.
+ static constexpr bool kCollectRetainers = false;
+
explicit MarkingStateBase(PtrComprCageBase cage_base)
#if V8_COMPRESS_POINTERS
: cage_base_(cage_base)
@@ -102,6 +107,15 @@ class MarkingStateBase {
static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
}
+ void AddStrongReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ // This is not a reference summarizer, so there is nothing to do here.
+ }
+
+ void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
+ // This is not a reference summarizer, so there is nothing to do here.
+ }
+
private:
#if V8_COMPRESS_POINTERS
const PtrComprCageBase cage_base_;
@@ -127,9 +141,8 @@ template <typename ConcreteVisitor, typename MarkingState>
class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
public:
MarkingVisitorBase(MarkingWorklists::Local* local_marking_worklists,
- WeakObjects::Local* local_weak_objects,
- // WeakObjects* weak_objects,
- Heap* heap, unsigned mark_compact_epoch,
+ WeakObjects::Local* local_weak_objects, Heap* heap,
+ unsigned mark_compact_epoch,
base::EnumSet<CodeFlushMode> code_flush_mode,
bool is_embedder_tracing_enabled,
bool should_keep_ages_unchanged)
@@ -141,7 +154,13 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
should_keep_ages_unchanged_(should_keep_ages_unchanged),
- is_shared_heap_(heap->IsShared()) {}
+ is_shared_heap_(heap->IsShared())
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ ,
+ external_pointer_table_(&heap->isolate()->external_pointer_table())
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+ {
+ }
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
@@ -161,10 +180,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// ObjectVisitor overrides.
void VisitMapPointer(HeapObject host) final {
- // Note that we are skipping the recording the slot because map objects
- // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
- MarkObject(host, HeapObject::cast(
- host.map(ObjectVisitorWithCageBases::cage_base())));
+ Map map = host.map(ObjectVisitorWithCageBases::cage_base());
+ MarkObject(host, map);
+ concrete_visitor()->RecordSlot(host, host.map_slot(), map);
}
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
@@ -191,6 +209,14 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// reconstructed after GC.
}
+ V8_INLINE void VisitExternalPointer(HeapObject host,
+ ExternalPointer_t ptr) final {
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ uint32_t index = ptr >> kExternalPointerIndexShift;
+ external_pointer_table_->Mark(index);
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
+ }
+
protected:
ConcreteVisitor* concrete_visitor() {
return static_cast<ConcreteVisitor*>(this);
@@ -219,6 +245,11 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
+ template <typename T>
+ int VisitEmbedderTracingSubClassWithEmbedderTracing(Map map, T object);
+ template <typename T>
+ int VisitEmbedderTracingSubClassNoEmbedderTracing(Map map, T object);
+
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
ProgressBar& progress_bar);
// Marks the descriptor array black without pushing it on the marking work
@@ -228,6 +259,23 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
+ V8_INLINE void AddStrongReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ concrete_visitor()
+ ->marking_state()
+ ->AddStrongReferenceForReferenceSummarizer(host, obj);
+ }
+
+ V8_INLINE void AddWeakReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ concrete_visitor()->marking_state()->AddWeakReferenceForReferenceSummarizer(
+ host, obj);
+ }
+
+ constexpr bool CanUpdateValuesInHeap() {
+ return !MarkingState::kCollectRetainers;
+ }
+
MarkingWorklists::Local* const local_marking_worklists_;
WeakObjects::Local* const local_weak_objects_;
Heap* const heap_;
@@ -236,6 +284,9 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const bool is_embedder_tracing_enabled_;
const bool should_keep_ages_unchanged_;
const bool is_shared_heap_;
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ ExternalPointerTable* const external_pointer_table_;
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
};
} // namespace internal
diff --git a/deps/v8/src/heap/marking-worklist-inl.h b/deps/v8/src/heap/marking-worklist-inl.h
index 7e4c49667f..8a1551f1a2 100644
--- a/deps/v8/src/heap/marking-worklist-inl.h
+++ b/deps/v8/src/heap/marking-worklist-inl.h
@@ -5,9 +5,11 @@
#define V8_HEAP_MARKING_WORKLIST_INL_H_
#include <unordered_map>
-#include <vector>
+#include "src/heap/cppgc-js/cpp-marking-state-inl.h"
#include "src/heap/marking-worklist.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects-inl.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,7 @@ template <typename Callback>
void MarkingWorklists::Update(Callback callback) {
shared_.Update(callback);
on_hold_.Update(callback);
- embedder_.Update(callback);
+ wrapper_.Update(callback);
other_.Update(callback);
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext || cw.context == kOtherContext) {
@@ -45,12 +47,30 @@ bool MarkingWorklists::Local::PopOnHold(HeapObject* object) {
return on_hold_.Pop(object);
}
-void MarkingWorklists::Local::PushEmbedder(HeapObject object) {
- embedder_.Push(object);
+bool MarkingWorklists::Local::SupportsExtractWrapper() {
+ return cpp_marking_state_.get();
}
-bool MarkingWorklists::Local::PopEmbedder(HeapObject* object) {
- return embedder_.Pop(object);
+bool MarkingWorklists::Local::ExtractWrapper(Map map, JSObject object,
+ WrapperSnapshot& snapshot) {
+ DCHECK_NOT_NULL(cpp_marking_state_);
+ return cpp_marking_state_->ExtractEmbedderDataSnapshot(map, object, snapshot);
+}
+
+void MarkingWorklists::Local::PushExtractedWrapper(
+ const WrapperSnapshot& snapshot) {
+ DCHECK_NOT_NULL(cpp_marking_state_);
+ cpp_marking_state_->MarkAndPush(snapshot);
+}
+
+void MarkingWorklists::Local::PushWrapper(HeapObject object) {
+ DCHECK_NULL(cpp_marking_state_);
+ wrapper_.Push(object);
+}
+
+bool MarkingWorklists::Local::PopWrapper(HeapObject* object) {
+ DCHECK_NULL(cpp_marking_state_);
+ return wrapper_.Pop(object);
}
Address MarkingWorklists::Local::SwitchToContext(Address context) {
@@ -72,6 +92,12 @@ void MarkingWorklists::Local::SwitchToContext(
active_context_ = context;
}
+bool MarkingWorklists::Local::PublishWrapper() {
+ if (!cpp_marking_state_) return false;
+ cpp_marking_state_->Publish();
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking-worklist.cc b/deps/v8/src/heap/marking-worklist.cc
index e5d3fbdf35..5dbbef5dcd 100644
--- a/deps/v8/src/heap/marking-worklist.cc
+++ b/deps/v8/src/heap/marking-worklist.cc
@@ -5,8 +5,11 @@
#include "src/heap/marking-worklist.h"
#include <algorithm>
+#include <cstddef>
#include <map>
+#include "src/heap/cppgc-js/cpp-heap.h"
+#include "src/heap/cppgc-js/cpp-marking-state.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/heap-object.h"
@@ -29,7 +32,7 @@ MarkingWorklists::~MarkingWorklists() {
void MarkingWorklists::Clear() {
shared_.Clear();
on_hold_.Clear();
- embedder_.Clear();
+ wrapper_.Clear();
other_.Clear();
for (auto cw : context_worklists_) {
if (cw.context == kSharedContext || cw.context == kOtherContext) {
@@ -94,13 +97,17 @@ void MarkingWorklists::PrintWorklist(const char* worklist_name,
#endif
}
-const Address MarkingWorklists::Local::kSharedContext;
-const Address MarkingWorklists::Local::kOtherContext;
+constexpr Address MarkingWorklists::Local::kSharedContext;
+constexpr Address MarkingWorklists::Local::kOtherContext;
+constexpr std::nullptr_t MarkingWorklists::Local::kNoCppMarkingState;
-MarkingWorklists::Local::Local(MarkingWorklists* global)
+MarkingWorklists::Local::Local(
+ MarkingWorklists* global,
+ std::unique_ptr<CppMarkingState> cpp_marking_state)
: on_hold_(global->on_hold()),
- embedder_(global->embedder()),
- is_per_context_mode_(false) {
+ wrapper_(global->wrapper()),
+ is_per_context_mode_(false),
+ cpp_marking_state_(std::move(cpp_marking_state)) {
if (global->context_worklists().empty()) {
MarkingWorklist::Local shared(global->shared());
active_ = std::move(shared);
@@ -133,7 +140,7 @@ MarkingWorklists::Local::~Local() {
void MarkingWorklists::Local::Publish() {
active_.Publish();
on_hold_.Publish();
- embedder_.Publish();
+ wrapper_.Publish();
if (is_per_context_mode_) {
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_) {
@@ -141,6 +148,7 @@ void MarkingWorklists::Local::Publish() {
}
}
}
+ PublishWrapper();
}
bool MarkingWorklists::Local::IsEmpty() {
@@ -163,8 +171,12 @@ bool MarkingWorklists::Local::IsEmpty() {
return true;
}
-bool MarkingWorklists::Local::IsEmbedderEmpty() const {
- return embedder_.IsLocalEmpty() && embedder_.IsGlobalEmpty();
+bool MarkingWorklists::Local::IsWrapperEmpty() const {
+ if (cpp_marking_state_) {
+ DCHECK(wrapper_.IsLocalAndGlobalEmpty());
+ return cpp_marking_state_->IsLocalEmpty();
+ }
+ return wrapper_.IsLocalAndGlobalEmpty();
}
void MarkingWorklists::Local::ShareWork() {
diff --git a/deps/v8/src/heap/marking-worklist.h b/deps/v8/src/heap/marking-worklist.h
index 2be050c7e5..b202c09a70 100644
--- a/deps/v8/src/heap/marking-worklist.h
+++ b/deps/v8/src/heap/marking-worklist.h
@@ -5,21 +5,27 @@
#ifndef V8_HEAP_MARKING_WORKLIST_H_
#define V8_HEAP_MARKING_WORKLIST_H_
+#include <cstddef>
+#include <memory>
#include <unordered_map>
#include <vector>
#include "src/heap/base/worklist.h"
+#include "src/heap/cppgc-js/cpp-marking-state.h"
#include "src/heap/marking.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
+class CppMarkingState;
+class JSObject;
+
// The index of the main thread task used by concurrent/parallel GC.
const int kMainThreadTask = 0;
using MarkingWorklist = ::heap::base::Worklist<HeapObject, 64>;
-using EmbedderTracingWorklist = ::heap::base::Worklist<HeapObject, 16>;
+using WrapperTracingWorklist = ::heap::base::Worklist<HeapObject, 16>;
// We piggyback on marking to compute object sizes per native context that is
// needed for the new memory measurement API. The algorithm works as follows:
@@ -82,7 +88,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
MarkingWorklist* shared() { return &shared_; }
MarkingWorklist* on_hold() { return &on_hold_; }
- EmbedderTracingWorklist* embedder() { return &embedder_; }
+ WrapperTracingWorklist* wrapper() { return &wrapper_; }
// A list of (context, worklist) pairs that was set up at the start of
// marking by CreateContextWorklists.
@@ -115,7 +121,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
// Worklist for objects that potentially require embedder tracing, i.e.,
// these objects need to be handed over to the embedder to find the full
// transitive closure.
- EmbedderTracingWorklist embedder_;
+ WrapperTracingWorklist wrapper_;
// Per-context worklists.
std::vector<ContextWorklistPair> context_worklists_;
@@ -137,10 +143,13 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
// been moved to active_.
class V8_EXPORT_PRIVATE MarkingWorklists::Local {
public:
- static const Address kSharedContext = MarkingWorklists::kSharedContext;
- static const Address kOtherContext = MarkingWorklists::kOtherContext;
+ static constexpr Address kSharedContext = MarkingWorklists::kSharedContext;
+ static constexpr Address kOtherContext = MarkingWorklists::kOtherContext;
+ static constexpr std::nullptr_t kNoCppMarkingState = nullptr;
- explicit Local(MarkingWorklists* global);
+ Local(
+ MarkingWorklists* global,
+ std::unique_ptr<CppMarkingState> cpp_marking_state = kNoCppMarkingState);
~Local();
inline void Push(HeapObject object);
@@ -149,12 +158,17 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local {
inline void PushOnHold(HeapObject object);
inline bool PopOnHold(HeapObject* object);
- inline void PushEmbedder(HeapObject object);
- inline bool PopEmbedder(HeapObject* object);
+ using WrapperSnapshot = CppMarkingState::EmbedderDataSnapshot;
+ inline bool ExtractWrapper(Map map, JSObject object,
+ WrapperSnapshot& snapshot);
+ inline void PushExtractedWrapper(const WrapperSnapshot& snapshot);
+ inline bool SupportsExtractWrapper();
+ inline void PushWrapper(HeapObject object);
+ inline bool PopWrapper(HeapObject* object);
void Publish();
bool IsEmpty();
- bool IsEmbedderEmpty() const;
+ bool IsWrapperEmpty() const;
// Publishes the local active marking worklist if its global worklist is
// empty. In the per-context marking mode it also publishes the shared
// worklist.
@@ -162,25 +176,35 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local {
// Merges the on-hold worklist to the shared worklist.
void MergeOnHold();
+ // Returns true if wrapper objects could be directly pushed. Otherwise,
+ // objects need to be processed one by one.
+ inline bool PublishWrapper();
+
// Returns the context of the active worklist.
Address Context() const { return active_context_; }
inline Address SwitchToContext(Address context);
inline Address SwitchToShared();
bool IsPerContextMode() const { return is_per_context_mode_; }
+ CppMarkingState* cpp_marking_state() const {
+ return cpp_marking_state_.get();
+ }
+
private:
bool PopContext(HeapObject* object);
Address SwitchToContextSlow(Address context);
inline void SwitchToContext(Address context,
MarkingWorklist::Local* worklist);
MarkingWorklist::Local on_hold_;
- EmbedderTracingWorklist::Local embedder_;
+ WrapperTracingWorklist::Local wrapper_;
MarkingWorklist::Local active_;
Address active_context_;
MarkingWorklist::Local* active_owner_;
bool is_per_context_mode_;
std::unordered_map<Address, std::unique_ptr<MarkingWorklist::Local>>
worklist_by_context_;
+
+ std::unique_ptr<CppMarkingState> cpp_marking_state_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index d9552149c2..9f467305bf 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -24,6 +24,9 @@ namespace internal {
// MemoryAllocator
//
+size_t MemoryAllocator::commit_page_size_ = 0;
+size_t MemoryAllocator::commit_page_size_bits_ = 0;
+
MemoryAllocator::MemoryAllocator(Isolate* isolate,
v8::PageAllocator* code_page_allocator,
size_t capacity)
@@ -87,8 +90,8 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
private:
void RunImpl(JobDelegate* delegate) {
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
- delegate);
+ unmapper_->PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled,
+ delegate);
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
}
@@ -110,7 +113,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
}
} else {
- PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled);
}
}
@@ -131,21 +134,20 @@ void MemoryAllocator::Unmapper::PrepareForGC() {
void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
CancelAndWaitForPendingTasks();
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
}
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
- while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
+ while ((chunk = GetMemoryChunkSafe(kNonRegular)) != nullptr) {
allocator_->PerformFreeMemory(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
-template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
- JobDelegate* delegate) {
+ MemoryAllocator::Unmapper::FreeMode mode, JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
if (FLAG_trace_unmapper) {
PrintIsolate(
@@ -154,18 +156,18 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
NumberOfChunks());
}
// Regular chunks.
- while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
+ while ((chunk = GetMemoryChunkSafe(kRegular)) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
allocator_->PerformFreeMemory(chunk);
- if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+ if (pooled) AddMemoryChunkSafe(kPooled, chunk);
if (delegate && delegate->ShouldYield()) return;
}
- if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
+ if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
- // to the pooled list. In case of kReleasePooled we need to free them
- // though.
- while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
- allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ // to the pooled list. In case of kFreePooled we need to free them though as
+ // well.
+ while ((chunk = GetMemoryChunkSafe(kPooled)) != nullptr) {
+ allocator_->FreePooledChunk(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
@@ -174,7 +176,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
void MemoryAllocator::Unmapper::TearDown() {
CHECK(!job_handle_ || !job_handle_->IsValid());
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
}
@@ -228,9 +230,9 @@ bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
return true;
}
-void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
- Address base, size_t size) {
- CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
+void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
+ Address base, size_t size) {
+ FreePages(page_allocator, reinterpret_cast<void*>(base), size);
}
Address MemoryAllocator::AllocateAlignedMemory(
@@ -400,14 +402,15 @@ V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
+ PageSize page_size,
BaseSpace* owner) {
BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
reserve_area_size, commit_area_size, executable, owner);
if (basic_chunk == nullptr) return nullptr;
- MemoryChunk* chunk =
- MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
+ MemoryChunk* chunk = MemoryChunk::Initialize(basic_chunk, isolate_->heap(),
+ executable, page_size);
#ifdef DEBUG
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
@@ -440,7 +443,8 @@ void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
size_ -= released_bytes;
}
-void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
+void MemoryAllocator::UnregisterSharedBasicMemoryChunk(
+ BasicMemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
@@ -448,8 +452,8 @@ void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
size_ -= size;
}
-void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
- Executability executable) {
+void MemoryAllocator::UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
+ Executability executable) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
@@ -469,15 +473,20 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
-void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
- UnregisterMemory(chunk, chunk->executable());
+void MemoryAllocator::UnregisterMemoryChunk(MemoryChunk* chunk) {
+ UnregisterBasicMemoryChunk(chunk, chunk->executable());
+}
+
+void MemoryAllocator::UnregisterReadOnlyPage(ReadOnlyPage* page) {
+ DCHECK(!page->executable());
+ UnregisterBasicMemoryChunk(page, NOT_EXECUTABLE);
}
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- UnregisterSharedMemory(chunk);
+ UnregisterSharedBasicMemoryChunk(chunk);
v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE);
VirtualMemory* reservation = chunk->reserved_memory();
@@ -487,15 +496,15 @@ void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
// Only read-only pages can have a non-initialized reservation object. This
// happens when the pages are remapped to multiple locations and where the
// reservation would therefore be invalid.
- FreeMemory(allocator, chunk->address(),
- RoundUp(chunk->size(), allocator->AllocatePageSize()));
+ FreeMemoryRegion(allocator, chunk->address(),
+ RoundUp(chunk->size(), allocator->AllocatePageSize()));
}
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- UnregisterMemory(chunk);
+ UnregisterMemoryChunk(chunk);
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
chunk->SetFlag(MemoryChunk::PRE_FREED);
@@ -516,25 +525,18 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
}
}
-template <MemoryAllocator::FreeMode mode>
-void MemoryAllocator::Free(MemoryChunk* chunk) {
+void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) {
switch (mode) {
- case kFull:
+ case kImmediately:
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
- case kAlreadyPooled:
- // Pooled pages cannot be touched anymore as their memory is uncommitted.
- // Pooled pages are not-executable.
- FreeMemory(data_page_allocator(), chunk->address(),
- static_cast<size_t>(MemoryChunk::kPageSize));
- break;
- case kPooledAndQueue:
+ case kConcurrentlyAndPool:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
V8_FALLTHROUGH;
- case kPreFreeAndQueue:
+ case kConcurrently:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
@@ -542,23 +544,18 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
}
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kFull>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
+void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) {
+ // Pooled pages cannot be touched anymore as their memory is uncommitted.
+ // Pooled pages are not-executable.
+ FreeMemoryRegion(data_page_allocator(), chunk->address(),
+ static_cast<size_t>(MemoryChunk::kPageSize));
+}
-template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
+Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
+ size_t size, Space* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
- if (alloc_mode == kPooled) {
+ if (alloc_mode == kUsePool) {
DCHECK_EQ(size, static_cast<size_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
owner->identity())));
@@ -566,22 +563,12 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
chunk = AllocatePagePooled(owner);
}
if (chunk == nullptr) {
- chunk = AllocateChunk(size, size, executable, owner);
+ chunk = AllocateChunk(size, size, executable, PageSize::kRegular, owner);
}
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
}
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
ReadOnlySpace* owner) {
BasicMemoryChunk* chunk =
@@ -599,13 +586,13 @@ MemoryAllocator::RemapSharedPage(
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+ MemoryChunk* chunk =
+ AllocateChunk(size, size, executable, PageSize::kLarge, owner);
if (chunk == nullptr) return nullptr;
return LargePage::Initialize(isolate_->heap(), chunk, executable);
}
-template <typename SpaceType>
-MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
+MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) {
MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;
@@ -624,7 +611,8 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
BasicMemoryChunk* basic_chunk =
BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
area_end, owner, std::move(reservation));
- MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
+ MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE,
+ PageSize::kRegular);
size_ += size;
return chunk;
}
@@ -637,18 +625,16 @@ void MemoryAllocator::ZapBlock(Address start, size_t size,
size >> kTaggedSizeLog2);
}
-intptr_t MemoryAllocator::GetCommitPageSize() {
- if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
- return FLAG_v8_os_page_size * KB;
- } else {
- return CommitPageSize();
- }
+void MemoryAllocator::InitializeOncePerProcess() {
+ commit_page_size_ =
+ FLAG_v8_os_page_size > 0 ? FLAG_v8_os_page_size * KB : CommitPageSize();
+ CHECK(base::bits::IsPowerOfTwo(commit_page_size_));
+ commit_page_size_bits_ = base::bits::WhichPowerOfTwo(commit_page_size_);
}
base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
size_t size) {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
+ size_t page_size = GetCommitPageSize();
if (size < page_size + FreeSpace::kSize) {
return base::AddressRegion(0, 0);
}
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index 49b5a769cf..f7a5da5c26 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -50,9 +50,9 @@ class MemoryAllocator {
void AddMemoryChunkSafe(MemoryChunk* chunk) {
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
- AddMemoryChunkSafe<kRegular>(chunk);
+ AddMemoryChunkSafe(kRegular, chunk);
} else {
- AddMemoryChunkSafe<kNonRegular>(chunk);
+ AddMemoryChunkSafe(kNonRegular, chunk);
}
}
@@ -61,10 +61,10 @@ class MemoryAllocator {
// (1) Try to get a chunk that was declared as pooled and already has
// been uncommitted.
// (2) Try to steal any memory chunk of kPageSize that would've been
- // unmapped.
- MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
+ // uncommitted.
+ MemoryChunk* chunk = GetMemoryChunkSafe(kPooled);
if (chunk == nullptr) {
- chunk = GetMemoryChunkSafe<kRegular>();
+ chunk = GetMemoryChunkSafe(kRegular);
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
chunk->ReleaseAllAllocatedMemory();
@@ -90,23 +90,24 @@ class MemoryAllocator {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
// can thus be used for stealing.
kNonRegular, // Large chunks and executable chunks.
- kPooled, // Pooled chunks, already uncommited and ready for reuse.
+ kPooled, // Pooled chunks, already freed and ready for reuse.
kNumberOfChunkQueues,
};
enum class FreeMode {
+ // Disables any access on pooled pages before adding them to the pool.
kUncommitPooled,
- kReleasePooled,
+
+ // Free pooled pages. Only used on tear down and last-resort GCs.
+ kFreePooled,
};
- template <ChunkQueueType type>
- void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ void AddMemoryChunkSafe(ChunkQueueType type, MemoryChunk* chunk) {
base::MutexGuard guard(&mutex_);
chunks_[type].push_back(chunk);
}
- template <ChunkQueueType type>
- MemoryChunk* GetMemoryChunkSafe() {
+ MemoryChunk* GetMemoryChunkSafe(ChunkQueueType type) {
base::MutexGuard guard(&mutex_);
if (chunks_[type].empty()) return nullptr;
MemoryChunk* chunk = chunks_[type].back();
@@ -116,8 +117,8 @@ class MemoryAllocator {
bool MakeRoomForNewTasks();
- template <FreeMode mode>
- void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr);
+ void PerformFreeMemoryOnQueuedChunks(FreeMode mode,
+ JobDelegate* delegate = nullptr);
void PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate = nullptr);
@@ -132,18 +133,38 @@ class MemoryAllocator {
};
enum AllocationMode {
+ // Regular allocation path. Does not use pool.
kRegular,
- kPooled,
+
+ // Uses the pool for allocation first.
+ kUsePool,
};
enum FreeMode {
- kFull,
- kAlreadyPooled,
- kPreFreeAndQueue,
- kPooledAndQueue,
+ // Frees page immediately on the main thread.
+ kImmediately,
+
+ // Frees page on background thread.
+ kConcurrently,
+
+ // Uncommits but does not free page on background thread. Page is added to
+ // pool. Used to avoid the munmap/mmap-cycle when we quickly reallocate
+ // pages.
+ kConcurrentlyAndPool,
};
- V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
+ // Initialize page sizes field in V8::Initialize.
+ static void InitializeOncePerProcess();
+
+ V8_INLINE static intptr_t GetCommitPageSize() {
+ DCHECK_LT(0, commit_page_size_);
+ return commit_page_size_;
+ }
+
+ V8_INLINE static intptr_t GetCommitPageSizeBits() {
+ DCHECK_LT(0, commit_page_size_bits_);
+ return commit_page_size_bits_;
+ }
// Computes the memory area of discardable memory within a given memory area
// [addr, addr+size) and returns the result as base::AddressRegion. If the
@@ -160,10 +181,9 @@ class MemoryAllocator {
// Allocates a Page from the allocator. AllocationMode is used to indicate
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
// should be tried first.
- template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
- typename SpaceType>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
+ V8_EXPORT_PRIVATE Page* AllocatePage(
+ MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner,
+ Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
@@ -173,9 +193,8 @@ class MemoryAllocator {
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
- template <MemoryAllocator::FreeMode mode = kFull>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- void Free(MemoryChunk* chunk);
+ V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode,
+ MemoryChunk* chunk);
void FreeReadOnlyPage(ReadOnlyPage* chunk);
// Returns allocated spaces in bytes.
@@ -197,27 +216,15 @@ class MemoryAllocator {
address >= highest_ever_allocated_;
}
- // Returns a BasicMemoryChunk in which the memory region from commit_area_size
- // to reserve_area_size of the chunk area is reserved but not committed, it
- // could be committed later by calling MemoryChunk::CommitArea.
- V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
- size_t reserve_area_size, size_t commit_area_size,
- Executability executable, BaseSpace* space);
-
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
+ PageSize page_size,
BaseSpace* space);
- Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
- size_t alignment, Executability executable,
- void* hint, VirtualMemory* controller);
-
- void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
-
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so
@@ -234,23 +241,10 @@ class MemoryAllocator {
}
#endif // DEBUG
- // Commit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool CommitMemory(VirtualMemory* reservation);
-
- // Uncommit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool UncommitMemory(VirtualMemory* reservation);
-
// Zaps a contiguous block of memory [start..(start+size)[ with
// a given zap value.
void ZapBlock(Address start, size_t size, uintptr_t zap_value);
- V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
-
// Page allocator instance for allocating non-executable pages.
// Guaranteed to be a valid pointer.
v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
@@ -268,16 +262,37 @@ class MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
- // Performs all necessary bookkeeping to free the memory, but does not free
- // it.
- void UnregisterMemory(MemoryChunk* chunk);
- void UnregisterMemory(BasicMemoryChunk* chunk,
- Executability executable = NOT_EXECUTABLE);
- void UnregisterSharedMemory(BasicMemoryChunk* chunk);
-
- void RegisterReadOnlyMemory(ReadOnlyPage* page);
+ void UnregisterReadOnlyPage(ReadOnlyPage* page);
private:
+ // Returns a BasicMemoryChunk in which the memory region from commit_area_size
+ // to reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
+ size_t reserve_area_size, size_t commit_area_size,
+ Executability executable, BaseSpace* space);
+
+ Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+ size_t alignment, Executability executable,
+ void* hint, VirtualMemory* controller);
+
+ // Commit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool CommitMemory(VirtualMemory* reservation);
+
+ V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
+
+ // Disallows any access on memory region owned by given reservation object.
+ // Returns true if it succeeded and false otherwise.
+ bool UncommitMemory(VirtualMemory* reservation);
+
+ // Frees the given memory region.
+ void FreeMemoryRegion(v8::PageAllocator* page_allocator, Address addr,
+ size_t size);
+
// PreFreeMemory logically frees the object, i.e., it unregisters the
// memory, logs a delete event and adds the chunk to remembered unmapped
// pages.
@@ -289,8 +304,10 @@ class MemoryAllocator {
// See AllocatePage for public interface. Note that currently we only
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
- template <typename SpaceType>
- MemoryChunk* AllocatePagePooled(SpaceType* owner);
+ MemoryChunk* AllocatePagePooled(Space* owner);
+
+ // Frees a pooled page. Only used on tear-down and last-resort GCs.
+ void FreePooledChunk(MemoryChunk* chunk);
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
@@ -313,6 +330,15 @@ class MemoryAllocator {
}
}
+ // Performs all necessary bookkeeping to free the memory, but does not free
+ // it.
+ void UnregisterMemoryChunk(MemoryChunk* chunk);
+ void UnregisterSharedBasicMemoryChunk(BasicMemoryChunk* chunk);
+ void UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
+ Executability executable = NOT_EXECUTABLE);
+
+ void RegisterReadOnlyMemory(ReadOnlyPage* page);
+
#ifdef DEBUG
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
base::MutexGuard guard(&executable_memory_mutex_);
@@ -369,35 +395,15 @@ class MemoryAllocator {
base::Mutex executable_memory_mutex_;
#endif // DEBUG
+ V8_EXPORT_PRIVATE static size_t commit_page_size_;
+ V8_EXPORT_PRIVATE static size_t commit_page_size_bits_;
+
friend class heap::TestCodePageAllocatorScope;
friend class heap::TestMemoryAllocatorScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 1b958f0cbf..9a76730e82 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
#define V8_HEAP_MEMORY_CHUNK_LAYOUT_H_
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/heap.h"
#include "src/heap/list.h"
#include "src/heap/progress-bar.h"
@@ -27,10 +28,13 @@ class SlotSet;
enum RememberedSetType {
OLD_TO_NEW,
OLD_TO_OLD,
- OLD_TO_CODE = V8_EXTERNAL_CODE_SPACE_BOOL ? OLD_TO_OLD + 1 : OLD_TO_OLD,
+ OLD_TO_SHARED,
+ OLD_TO_CODE = V8_EXTERNAL_CODE_SPACE_BOOL ? OLD_TO_SHARED + 1 : OLD_TO_SHARED,
NUMBER_OF_REMEMBERED_SET_TYPES
};
+using ActiveSystemPages = ::heap::base::ActiveSystemPages;
+
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static const int kNumSets = NUMBER_OF_REMEMBERED_SET_TYPES;
@@ -67,6 +71,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(Bitmap*, YoungGenerationBitmap),
FIELD(CodeObjectRegistry*, CodeObjectRegistry),
FIELD(PossiblyEmptyBuckets, PossiblyEmptyBuckets),
+ FIELD(ActiveSystemPages, ActiveSystemPages),
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
FIELD(ObjectStartBitmap, ObjectStartBitmap),
#endif
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index d4d1116683..08baeee8b2 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -6,9 +6,11 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
+#include "src/common/globals.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
@@ -117,11 +119,14 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
} // namespace
MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
- Executability executable) {
+ Executability executable,
+ PageSize page_size) {
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_SHARED],
+ nullptr);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE],
nullptr);
@@ -131,6 +136,8 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_SHARED],
+ nullptr);
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
@@ -176,6 +183,15 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
chunk->possibly_empty_buckets_.Initialize();
+ if (page_size == PageSize::kRegular) {
+ chunk->active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
+ MemoryAllocator::GetCommitPageSizeBits(),
+ chunk->size());
+ } else {
+ // We do not track active system pages for large pages.
+ chunk->active_system_pages_.Clear();
+ }
+
// All pages of a shared heap need to be marked with this flag.
if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
@@ -191,9 +207,8 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
}
size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
- return size();
- return high_water_mark_;
+ if (!base::OS::HasLazyCommits() || IsLargePage()) return size();
+ return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits());
}
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
@@ -259,6 +274,8 @@ void MemoryChunk::ReleaseAllAllocatedMemory() {
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
+template V8_EXPORT_PRIVATE SlotSet*
+MemoryChunk::AllocateSlotSet<OLD_TO_SHARED>();
#ifdef V8_EXTERNAL_CODE_SPACE
template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_CODE>();
#endif // V8_EXTERNAL_CODE_SPACE
@@ -286,6 +303,7 @@ SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
+template void MemoryChunk::ReleaseSlotSet<OLD_TO_SHARED>();
#ifdef V8_EXTERNAL_CODE_SPACE
template void MemoryChunk::ReleaseSlotSet<OLD_TO_CODE>();
#endif // V8_EXTERNAL_CODE_SPACE
@@ -308,6 +326,7 @@ void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
+template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_SHARED>();
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
@@ -324,6 +343,7 @@ TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
+template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_SHARED>();
template <RememberedSetType type>
void MemoryChunk::ReleaseTypedSlotSet() {
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index de6f09234b..8a8f556426 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
@@ -219,7 +220,7 @@ class MemoryChunk : public BasicMemoryChunk {
protected:
static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
- Executability executable);
+ Executability executable, PageSize page_size);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
@@ -291,6 +292,8 @@ class MemoryChunk : public BasicMemoryChunk {
PossiblyEmptyBuckets possibly_empty_buckets_;
+ ActiveSystemPages active_system_pages_;
+
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
ObjectStartBitmap object_start_bitmap_;
#endif
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index 72112d2426..0f1a3a361a 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -102,17 +102,14 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
result = AllocateFastUnaligned(size_in_bytes, origin);
}
- if (!result.IsRetry()) {
- return result;
- } else {
- return AllocateRawSlow(size_in_bytes, alignment, origin);
- }
+ return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
+ : result;
}
AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes));
@@ -124,7 +121,7 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
UpdateAllocationOrigins(origin);
}
- return obj;
+ return AllocationResult::FromObject(obj);
}
AllocationResult NewSpace::AllocateFastAligned(
@@ -135,7 +132,7 @@ AllocationResult NewSpace::AllocateFastAligned(
int aligned_size_in_bytes = size_in_bytes + filler_size;
if (!allocation_info_->CanIncrementTop(aligned_size_in_bytes)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
HeapObject obj = HeapObject::FromAddress(
allocation_info_->IncrementTop(aligned_size_in_bytes));
@@ -153,7 +150,7 @@ AllocationResult NewSpace::AllocateFastAligned(
UpdateAllocationOrigins(origin);
}
- return obj;
+ return AllocationResult::FromObject(obj);
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 6155a06f77..685e631f23 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -24,7 +24,6 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->list_node().Initialize();
-#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
@@ -32,7 +31,6 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
->non_atomic_marking_state()
->ClearLiveness(page);
}
-#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
return page;
}
@@ -54,12 +52,14 @@ bool SemiSpace::EnsureCurrentCapacity() {
// Free all overallocated pages which are behind current_page.
while (current_page) {
MemoryChunk* next_current = current_page->list_node().next();
+ AccountUncommitted(Page::kPageSize);
+ DecrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->ClearFlags(Page::kIsInYoungGenerationMask);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- current_page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
+ current_page);
current_page = next_current;
}
@@ -68,12 +68,14 @@ bool SemiSpace::EnsureCurrentCapacity() {
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
actual_pages++;
- current_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
+ current_page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kUsePool,
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
+ AccountCommitted(Page::kPageSize);
+ IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
@@ -105,22 +107,23 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
DCHECK(!IsCommitted());
+ DCHECK_EQ(CommittedMemory(), size_t(0));
const int num_pages = static_cast<int>(target_capacity_ / Page::kPageSize);
DCHECK(num_pages);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
// Pages in the new spaces can be moved to the old space by the full
// collector. Therefore, they must be initialized with the same FreeList as
// old pages.
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
+ Page* new_page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kUsePool,
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
DCHECK(!IsCommitted());
return false;
}
memory_chunk_list_.PushBack(new_page);
+ IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
}
Reset();
AccountCommitted(target_capacity_);
@@ -133,14 +136,22 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
DCHECK(IsCommitted());
+ int actual_pages = 0;
while (!memory_chunk_list_.Empty()) {
+ actual_pages++;
MemoryChunk* chunk = memory_chunk_list_.front();
+ DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory());
memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
+ chunk);
}
current_page_ = nullptr;
current_capacity_ = 0;
- AccountUncommitted(target_capacity_);
+ size_t removed_page_size =
+ static_cast<size_t>(actual_pages * Page::kPageSize);
+ DCHECK_EQ(CommittedMemory(), removed_page_size);
+ DCHECK_EQ(CommittedPhysicalMemory(), 0);
+ AccountUncommitted(removed_page_size);
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
DCHECK(!IsCommitted());
return true;
@@ -148,11 +159,8 @@ bool SemiSpace::Uncommit() {
size_t SemiSpace::CommittedPhysicalMemory() {
if (!IsCommitted()) return 0;
- size_t size = 0;
- for (Page* p : *this) {
- size += p->CommittedPhysicalMemory();
- }
- return size;
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ return committed_physical_memory_;
}
bool SemiSpace::GrowTo(size_t new_capacity) {
@@ -169,16 +177,16 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
+ Page* new_page = heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kUsePool,
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
}
memory_chunk_list_.PushBack(new_page);
marking_state->ClearLiveness(new_page);
+ IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
// Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
}
@@ -193,7 +201,9 @@ void SemiSpace::RewindPages(int num_pages) {
while (num_pages > 0) {
MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
+ DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory());
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
+ last);
num_pages--;
}
}
@@ -246,6 +256,8 @@ void SemiSpace::RemovePage(Page* page) {
}
}
memory_chunk_list_.Remove(page);
+ AccountUncommitted(Page::kPageSize);
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
@@ -257,6 +269,8 @@ void SemiSpace::PrependPage(Page* page) {
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
+ AccountCommitted(Page::kPageSize);
+ IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
@@ -286,11 +300,39 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->current_page_, to->current_page_);
std::swap(from->external_backing_store_bytes_,
to->external_backing_store_bytes_);
+ std::swap(from->committed_physical_memory_, to->committed_physical_memory_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(Page::NO_FLAGS, Page::NO_FLAGS);
}
+void SemiSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
+ if (!base::OS::HasLazyCommits()) return;
+ DCHECK_LE(committed_physical_memory_,
+ committed_physical_memory_ + increment_value);
+ committed_physical_memory_ += increment_value;
+}
+
+void SemiSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
+ if (!base::OS::HasLazyCommits()) return;
+ DCHECK_LE(decrement_value, committed_physical_memory_);
+ committed_physical_memory_ -= decrement_value;
+}
+
+void SemiSpace::AddRangeToActiveSystemPages(Address start, Address end) {
+ Page* page = current_page();
+
+ DCHECK_LE(page->address(), start);
+ DCHECK_LT(start, end);
+ DCHECK_LE(end, page->address() + Page::kPageSize);
+
+ const size_t added_pages = page->active_system_pages()->Add(
+ start - page->address(), end - page->address(),
+ MemoryAllocator::GetCommitPageSizeBits());
+ IncrementCommittedPhysicalMemory(added_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
@@ -318,6 +360,9 @@ void SemiSpace::Verify() {
external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ int actual_pages = 0;
+ size_t computed_committed_physical_memory = 0;
+
for (Page* page : *this) {
CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
@@ -341,9 +386,15 @@ void SemiSpace::Verify() {
external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
}
+ computed_committed_physical_memory += page->CommittedPhysicalMemory();
+
CHECK_IMPLIES(page->list_node().prev(),
page->list_node().prev()->list_node().next() == page);
+ actual_pages++;
}
+ CHECK_EQ(actual_pages * size_t(Page::kPageSize), CommittedMemory());
+ CHECK_EQ(computed_committed_physical_memory, CommittedPhysicalMemory());
+
for (int i = 0; i < kNumTypes; i++) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
@@ -478,6 +529,8 @@ void NewSpace::UpdateLinearAllocationArea(Address known_top) {
original_limit_.store(limit(), std::memory_order_relaxed);
original_top_.store(top(), std::memory_order_release);
}
+
+ to_space_.AddRangeToActiveSystemPages(top(), limit());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
UpdateInlineAllocationLimit(0);
@@ -621,13 +674,13 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kTaggedAligned)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
AllocationResult result = AllocateFastUnaligned(size_in_bytes, origin);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes, size_in_bytes,
size_in_bytes);
@@ -640,7 +693,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
@@ -649,7 +702,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult result = AllocateFastAligned(
size_in_bytes, &aligned_size_in_bytes, alignment, origin);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
InvokeAllocationObservers(result.ToAddress(), size_in_bytes,
aligned_size_in_bytes, aligned_size_in_bytes);
@@ -704,6 +757,11 @@ void NewSpace::Verify(Isolate* isolate) {
external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
}
+ CHECK(!Page::FromAllocationAreaAddress(current)->IsFlagSet(
+ Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!Page::FromAllocationAreaAddress(current)->IsFlagSet(
+ Page::PAGE_NEW_NEW_PROMOTION));
+
PtrComprCageBase cage_base(isolate);
while (current != top()) {
if (!Page::IsAlignedToPageSize(current)) {
@@ -717,7 +775,8 @@ void NewSpace::Verify(Isolate* isolate) {
// be in map space or read-only space.
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->space_for_maps()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap(cage_base));
@@ -742,6 +801,8 @@ void NewSpace::Verify(Isolate* isolate) {
} else {
// At end of page, switch to next page.
Page* page = Page::FromAllocationAreaAddress(current)->next_page();
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
current = page->area_start();
}
}
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index b1bec1b032..b31dfa28e4 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -6,7 +6,6 @@
#define V8_HEAP_NEW_SPACES_H_
#include <atomic>
-#include <map>
#include <memory>
#include "src/base/macros.h"
@@ -107,7 +106,7 @@ class SemiSpace : public Space {
void PrependPage(Page* page);
void MovePageToTheEnd(Page* page);
- Page* InitializePage(MemoryChunk* chunk);
+ Page* InitializePage(MemoryChunk* chunk) override;
// Age mark accessors.
Address age_mark() { return age_mark_; }
@@ -139,11 +138,18 @@ class SemiSpace : public Space {
size_t Available() override { UNREACHABLE(); }
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
- Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
+ Page* first_page() override {
+ return reinterpret_cast<Page*>(memory_chunk_list_.front());
+ }
+ Page* last_page() override {
+ return reinterpret_cast<Page*>(memory_chunk_list_.back());
+ }
- const Page* first_page() const {
- return reinterpret_cast<const Page*>(Space::first_page());
+ const Page* first_page() const override {
+ return reinterpret_cast<const Page*>(memory_chunk_list_.front());
+ }
+ const Page* last_page() const override {
+ return reinterpret_cast<const Page*>(memory_chunk_list_.back());
}
iterator begin() { return iterator(first_page()); }
@@ -169,12 +175,17 @@ class SemiSpace : public Space {
virtual void Verify();
#endif
+ void AddRangeToActiveSystemPages(Address start, Address end);
+
private:
void RewindPages(int num_pages);
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(Page::MainThreadFlags flags, Page::MainThreadFlags mask);
+ void IncrementCommittedPhysicalMemory(size_t increment_value);
+ void DecrementCommittedPhysicalMemory(size_t decrement_value);
+
// The currently committed space capacity.
size_t current_capacity_;
@@ -191,6 +202,8 @@ class SemiSpace : public Space {
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
+ size_t committed_physical_memory_{0};
+
SemiSpaceId id_;
Page* current_page_;
@@ -447,8 +460,11 @@ class V8_EXPORT_PRIVATE NewSpace
SemiSpace* active_space() { return &to_space_; }
- Page* first_page() { return to_space_.first_page(); }
- Page* last_page() { return to_space_.last_page(); }
+ Page* first_page() override { return to_space_.first_page(); }
+ Page* last_page() override { return to_space_.last_page(); }
+
+ const Page* first_page() const override { return to_space_.first_page(); }
+ const Page* last_page() const override { return to_space_.last_page(); }
iterator begin() { return to_space_.begin(); }
iterator end() { return to_space_.end(); }
@@ -478,7 +494,7 @@ class V8_EXPORT_PRIVATE NewSpace
void MakeLinearAllocationAreaIterable();
// Creates a filler object in the linear allocation area and closes it.
- void FreeLinearAllocationArea();
+ void FreeLinearAllocationArea() override;
private:
static const int kAllocationBufferParkingThreshold = 4 * KB;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 379356a797..a495d259ad 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -443,6 +443,11 @@ class ObjectStatsCollectorImpl {
void RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription description);
+
+ PtrComprCageBase cage_base() const {
+ return field_stats_collector_.cage_base();
+ }
+
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
@@ -488,7 +493,7 @@ void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
HeapObject parent, HeapObject obj, ObjectStats::VirtualInstanceType type) {
- return RecordVirtualObjectStats(parent, obj, type, obj.Size(),
+ return RecordVirtualObjectStats(parent, obj, type, obj.Size(cage_base()),
ObjectStats::kNoOverAllocation, kCheckCow);
}
@@ -657,13 +662,13 @@ static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
}
return ObjectStats::FEEDBACK_VECTOR_SLOT_LOAD_TYPE;
- case FeedbackSlotKind::kStoreNamedSloppy:
- case FeedbackSlotKind::kStoreNamedStrict:
- case FeedbackSlotKind::kStoreOwnNamed:
+ case FeedbackSlotKind::kSetNamedSloppy:
+ case FeedbackSlotKind::kSetNamedStrict:
+ case FeedbackSlotKind::kDefineNamedOwn:
case FeedbackSlotKind::kStoreGlobalSloppy:
case FeedbackSlotKind::kStoreGlobalStrict:
- case FeedbackSlotKind::kStoreKeyedSloppy:
- case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kSetKeyedSloppy:
+ case FeedbackSlotKind::kSetKeyedStrict:
if (obj == *isolate->factory()->uninitialized_symbol()) {
return ObjectStats::FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE;
}
@@ -711,7 +716,8 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
MaybeObject raw_object = vector.Get(slot.WithOffset(i));
HeapObject object;
if (raw_object->GetHeapObject(&object)) {
- if (object.IsCell() || object.IsWeakFixedArray()) {
+ if (object.IsCell(cage_base()) ||
+ object.IsWeakFixedArray(cage_base())) {
RecordSimpleVirtualObjectStats(
vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
}
@@ -733,51 +739,55 @@ void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
void ObjectStatsCollectorImpl::CollectStatistics(
HeapObject obj, Phase phase, CollectFieldStats collect_field_stats) {
- Map map = obj.map();
+ DisallowGarbageCollection no_gc;
+ Map map = obj.map(cage_base());
+ InstanceType instance_type = map.instance_type();
switch (phase) {
case kPhase1:
- if (obj.IsFeedbackVector()) {
+ if (InstanceTypeChecker::IsFeedbackVector(instance_type)) {
RecordVirtualFeedbackVectorDetails(FeedbackVector::cast(obj));
- } else if (obj.IsMap()) {
+ } else if (InstanceTypeChecker::IsMap(instance_type)) {
RecordVirtualMapDetails(Map::cast(obj));
- } else if (obj.IsBytecodeArray()) {
+ } else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
RecordVirtualBytecodeArrayDetails(BytecodeArray::cast(obj));
- } else if (obj.IsCode()) {
+ } else if (InstanceTypeChecker::IsCode(instance_type)) {
RecordVirtualCodeDetails(Code::cast(obj));
- } else if (obj.IsFunctionTemplateInfo()) {
+ } else if (InstanceTypeChecker::IsFunctionTemplateInfo(instance_type)) {
RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo::cast(obj));
- } else if (obj.IsJSGlobalObject()) {
+ } else if (InstanceTypeChecker::IsJSGlobalObject(instance_type)) {
RecordVirtualJSGlobalObjectDetails(JSGlobalObject::cast(obj));
- } else if (obj.IsJSObject()) {
+ } else if (InstanceTypeChecker::IsJSObject(instance_type)) {
// This phase needs to come after RecordVirtualAllocationSiteDetails
// to properly split among boilerplates.
RecordVirtualJSObjectDetails(JSObject::cast(obj));
- } else if (obj.IsSharedFunctionInfo()) {
+ } else if (InstanceTypeChecker::IsSharedFunctionInfo(instance_type)) {
RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
- } else if (obj.IsContext()) {
+ } else if (InstanceTypeChecker::IsContext(instance_type)) {
RecordVirtualContext(Context::cast(obj));
- } else if (obj.IsScript()) {
+ } else if (InstanceTypeChecker::IsScript(instance_type)) {
RecordVirtualScriptDetails(Script::cast(obj));
- } else if (obj.IsArrayBoilerplateDescription()) {
+ } else if (InstanceTypeChecker::IsArrayBoilerplateDescription(
+ instance_type)) {
RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription::cast(obj));
- } else if (obj.IsFixedArrayExact()) {
+ } else if (InstanceTypeChecker::IsFixedArrayExact(instance_type)) {
// Has to go last as it triggers too eagerly.
RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
}
break;
case kPhase2:
- if (obj.IsExternalString()) {
+ if (InstanceTypeChecker::IsExternalString(instance_type)) {
// This has to be in Phase2 to avoid conflicting with recording Script
// sources. We still want to run RecordObjectStats after though.
RecordVirtualExternalStringDetails(ExternalString::cast(obj));
}
size_t over_allocated = ObjectStats::kNoOverAllocation;
- if (obj.IsJSObject()) {
+ if (InstanceTypeChecker::IsJSObject(instance_type)) {
over_allocated = map.instance_size() - map.UsedInstanceSize();
}
- RecordObjectStats(obj, map.instance_type(), obj.Size(), over_allocated);
+ RecordObjectStats(obj, instance_type, obj.Size(cage_base()),
+ over_allocated);
if (collect_field_stats == CollectFieldStats::kYes) {
field_stats_collector_.RecordStats(obj);
}
@@ -788,7 +798,7 @@ void ObjectStatsCollectorImpl::CollectStatistics(
void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
// Iterate boilerplates first to disambiguate them from regular JS objects.
Object list = heap_->allocation_sites_list();
- while (list.IsAllocationSite()) {
+ while (list.IsAllocationSite(cage_base())) {
AllocationSite site = AllocationSite::cast(list);
RecordVirtualAllocationSiteDetails(site);
list = site.weak_next();
@@ -829,7 +839,7 @@ bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase array) {
}
bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase array) {
- return array.map() == ReadOnlyRoots(heap_).fixed_cow_array_map();
+ return array.map(cage_base()) == ReadOnlyRoots(heap_).fixed_cow_array_map();
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject obj1, HeapObject obj2) {
@@ -868,7 +878,7 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// This will be logged as MAP_TYPE in Phase2.
}
- DescriptorArray array = map.instance_descriptors(isolate());
+ DescriptorArray array = map.instance_descriptors(cage_base());
if (map.owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// Generally DescriptorArrays have their own instance type already
@@ -891,10 +901,10 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
}
if (map.is_prototype_map()) {
- if (map.prototype_info().IsPrototypeInfo()) {
+ if (map.prototype_info().IsPrototypeInfo(cage_base())) {
PrototypeInfo info = PrototypeInfo::cast(map.prototype_info());
Object users = info.prototype_users();
- if (users.IsWeakFixedArray()) {
+ if (users.IsWeakFixedArray(cage_base())) {
RecordSimpleVirtualObjectStats(map, WeakArrayList::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
}
@@ -909,7 +919,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script script) {
// Log the size of external source code.
Object raw_source = script.source();
- if (raw_source.IsExternalString()) {
+ if (raw_source.IsExternalString(cage_base())) {
// The contents of external strings aren't on the heap, so we have to record
// them manually. The on-heap String object is recorded indepentendely in
// the normal pass.
@@ -922,7 +932,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script script) {
? ObjectStats::SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE
: ObjectStats::SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE,
off_heap_size);
- } else if (raw_source.IsString()) {
+ } else if (raw_source.IsString(cage_base())) {
String source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
script, source,
@@ -940,7 +950,7 @@ void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
size_t off_heap_size = string.ExternalPayloadSize();
RecordExternalResourceStats(
resource,
- string.IsOneByteRepresentation()
+ string.IsOneByteRepresentation(cage_base())
? ObjectStats::STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE
: ObjectStats::STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE,
off_heap_size);
@@ -967,7 +977,7 @@ void ObjectStatsCollectorImpl::
HeapObject parent, HeapObject object,
ObjectStats::VirtualInstanceType type) {
if (!RecordSimpleVirtualObjectStats(parent, object, type)) return;
- if (object.IsFixedArrayExact()) {
+ if (object.IsFixedArrayExact(cage_base())) {
FixedArray array = FixedArray::cast(object);
for (int i = 0; i < array.length(); i++) {
Object entry = array.get(i);
@@ -988,7 +998,7 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
FixedArray constant_pool = FixedArray::cast(bytecode.constant_pool());
for (int i = 0; i < constant_pool.length(); i++) {
Object entry = constant_pool.get(i);
- if (entry.IsFixedArrayExact()) {
+ if (entry.IsFixedArrayExact(cage_base())) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
constant_pool, HeapObject::cast(entry),
ObjectStats::EMBEDDED_OBJECT_TYPE);
@@ -1041,11 +1051,10 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
}
}
int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
- PtrComprCageBase cage_base(heap_->isolate());
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
- Object target = it.rinfo()->target_object(cage_base);
- if (target.IsFixedArrayExact()) {
+ Object target = it.rinfo()->target_object(cage_base());
+ if (target.IsFixedArrayExact(cage_base())) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
@@ -1055,7 +1064,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
if (context.IsNativeContext()) {
RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context.Size());
- if (context.retained_maps().IsWeakArrayList()) {
+ if (context.retained_maps().IsWeakArrayList(cage_base())) {
RecordSimpleVirtualObjectStats(
context, WeakArrayList::cast(context.retained_maps()),
ObjectStats::RETAINED_MAPS_TYPE);
@@ -1101,6 +1110,9 @@ class ObjectStatsVisitor {
namespace {
void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
+ // We don't perform a GC while collecting object stats but need this scope for
+ // the nested SafepointScope inside CombinedHeapObjectIterator.
+ AllowGarbageCollection allow_gc;
CombinedHeapObjectIterator iterator(heap);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 715b83b9ac..b3770fc6c8 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -133,6 +133,19 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object);
}
+#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
+ // The following types have external pointers, which must be visited.
+ // TODO(v8:10391) Consider adding custom visitor IDs for these.
+ if (object.IsExternalOneByteString()) {
+ ExternalOneByteString::BodyDescriptor::IterateBody(map, object, size,
+ visitor);
+ } else if (object.IsExternalTwoByteString()) {
+ ExternalTwoByteString::BodyDescriptor::IterateBody(map, object, size,
+ visitor);
+ } else if (object.IsForeign()) {
+ Foreign::BodyDescriptor::IterateBody(map, object, size, visitor);
+ }
+#endif // V8_SANDBOXED_EXTERNAL_POINTERS
return static_cast<ResultType>(size);
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 858e279ec4..32df0a46d9 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -30,6 +30,7 @@ namespace internal {
V(FixedDoubleArray) \
V(JSArrayBuffer) \
V(JSDataView) \
+ V(JSExternalObject) \
V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
@@ -62,6 +63,7 @@ namespace internal {
IF_WASM(V, WasmJSFunctionData) \
IF_WASM(V, WasmStruct) \
IF_WASM(V, WasmSuspenderObject) \
+ IF_WASM(V, WasmOnFulfilledData) \
IF_WASM(V, WasmTypeInfo)
#define FORWARD_DECLARE(TypeName) class TypeName;
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index 22b07c7442..fbade0ea3d 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -95,9 +95,9 @@ bool PagedSpace::EnsureLabMain(int size_in_bytes, AllocationOrigin origin) {
AllocationResult PagedSpace::AllocateFastUnaligned(int size_in_bytes) {
if (!allocation_info_->CanIncrementTop(size_in_bytes)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
- return AllocationResult(
+ return AllocationResult::FromObject(
HeapObject::FromAddress(allocation_info_->IncrementTop(size_in_bytes)));
}
@@ -108,7 +108,7 @@ AllocationResult PagedSpace::AllocateFastAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_->CanIncrementTop(aligned_size)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
HeapObject obj =
HeapObject::FromAddress(allocation_info_->IncrementTop(aligned_size));
@@ -116,18 +116,18 @@ AllocationResult PagedSpace::AllocateFastAligned(
if (filler_size > 0) {
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
- return AllocationResult(obj);
+ return AllocationResult::FromObject(obj);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureLabMain(size_in_bytes, origin)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
AllocationResult result = AllocateFastUnaligned(size_in_bytes);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@@ -152,12 +152,12 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
if (!EnsureLabMain(allocation_size, origin)) {
- return AllocationResult::Retry(identity());
+ return AllocationResult::Failure();
}
int aligned_size_in_bytes;
AllocationResult result =
AllocateFastAligned(size_in_bytes, &aligned_size_in_bytes, alignment);
- DCHECK(!result.IsRetry());
+ DCHECK(!result.IsFailure());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(result.ToObjectChecked().address(),
size_in_bytes);
@@ -183,11 +183,8 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
result = AllocateFastUnaligned(size_in_bytes);
}
- if (!result.IsRetry()) {
- return result;
- } else {
- return AllocateRawSlow(size_in_bytes, alignment, origin);
- }
+ return result.IsFailure() ? AllocateRawSlow(size_in_bytes, alignment, origin)
+ : result;
}
} // namespace internal
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 0db2d5f989..c5604254be 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -4,6 +4,8 @@
#include "src/heap/paged-spaces.h"
+#include <atomic>
+
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/execution/isolate.h"
@@ -13,8 +15,10 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
+#include "src/heap/safepoint.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/string.h"
#include "src/utils/utils.h"
@@ -103,7 +107,7 @@ void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
+ heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk);
}
accounting_stats_.Clear();
}
@@ -211,15 +215,42 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
}
size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ if (!base::OS::HasLazyCommits()) {
+ DCHECK_EQ(0, committed_physical_memory());
+ return CommittedMemory();
+ }
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
- base::MutexGuard guard(mutex());
+ return committed_physical_memory();
+}
+
+void PagedSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
+ if (!base::OS::HasLazyCommits() || increment_value == 0) return;
+ size_t old_value = committed_physical_memory_.fetch_add(
+ increment_value, std::memory_order_relaxed);
+ USE(old_value);
+ DCHECK_LT(old_value, old_value + increment_value);
+}
+
+void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
+ if (!base::OS::HasLazyCommits() || decrement_value == 0) return;
+ size_t old_value = committed_physical_memory_.fetch_sub(
+ decrement_value, std::memory_order_relaxed);
+ USE(old_value);
+ DCHECK_GT(old_value, old_value - decrement_value);
+}
+
+#if DEBUG
+void PagedSpace::VerifyCommittedPhysicalMemory() {
+ heap()->safepoint()->AssertActive();
size_t size = 0;
for (Page* page : *this) {
+ DCHECK(page->SweepingDone());
size += page->CommittedPhysicalMemory();
}
- return size;
+ // Ensure that the space's counter matches the sum of all page counters.
+ DCHECK_EQ(size, CommittedPhysicalMemory());
}
+#endif // DEBUG
bool PagedSpace::ContainsSlow(Address addr) const {
Page* p = Page::FromAddress(addr);
@@ -264,6 +295,7 @@ size_t PagedSpace::AddPage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
+ IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
return RelinkFreeListCategories(page);
}
@@ -278,6 +310,7 @@ void PagedSpace::RemovePage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
@@ -319,8 +352,8 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
Page* PagedSpace::AllocatePage() {
- return heap()->memory_allocator()->AllocatePage(AreaSize(), this,
- executable());
+ return heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kRegular, AreaSize(), this, executable());
}
Page* PagedSpace::Expand() {
@@ -334,7 +367,7 @@ Page* PagedSpace::Expand() {
}
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
- LocalHeap* local_heap, size_t size_in_bytes) {
+ size_t size_in_bytes) {
Page* page = AllocatePage();
if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_);
@@ -346,6 +379,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
CHECK_LE(size_in_bytes, page->area_size());
Free(page->area_start() + size_in_bytes, page->area_size() - size_in_bytes,
SpaceAccountingMode::kSpaceAccounted);
+ AddRangeToActiveSystemPages(page, object_start, object_start + size_in_bytes);
return std::make_pair(object_start, size_in_bytes);
}
@@ -492,8 +526,9 @@ void PagedSpace::ReleasePage(Page* page) {
}
AccountUncommitted(page->size());
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
accounting_stats_.DecreaseCapacity(page->area_size());
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
}
void PagedSpace::SetReadable() {
@@ -573,6 +608,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
+ AddRangeToActiveSystemPages(page, start, limit);
return true;
}
@@ -585,10 +621,11 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
identity() == MAP_SPACE);
DCHECK(origin == AllocationOrigin::kRuntime ||
origin == AllocationOrigin::kGC);
+ DCHECK_IMPLIES(!local_heap, origin == AllocationOrigin::kGC);
base::Optional<std::pair<Address, size_t>> result =
- TryAllocationFromFreeListBackground(local_heap, min_size_in_bytes,
- max_size_in_bytes, alignment, origin);
+ TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
+ alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -600,7 +637,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Retry the free list allocation.
result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
if (IsSweepingAllowedOnThread(local_heap)) {
@@ -619,8 +656,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment,
- origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
}
}
@@ -628,7 +664,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
- result = ExpandBackground(local_heap, max_size_in_bytes);
+ result = ExpandBackground(max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
@@ -645,15 +681,14 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
return {};
}
base::Optional<std::pair<Address, size_t>>
-PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
+PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
@@ -694,13 +729,15 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
+ AddRangeToActiveSystemPages(page, start, limit);
return std::make_pair(start, used_size_in_bytes);
}
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
// Code space sweeping is only allowed on main thread.
- return local_heap->is_main_thread() || identity() != CODE_SPACE;
+ return (local_heap && local_heap->is_main_thread()) ||
+ identity() != CODE_SPACE;
}
#ifdef DEBUG
@@ -742,7 +779,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->map_space()->Contains(map));
+ isolate->heap()->space_for_maps()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -772,6 +809,9 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
external_space_bytes[t] += external_page_bytes[t];
}
+
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
}
for (int i = 0; i < kNumTypes; i++) {
if (i == ExternalBackingStoreType::kArrayBuffer) continue;
@@ -1003,6 +1043,28 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
return result;
}
+void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
+ Address end) {
+ DCHECK_LE(page->address(), start);
+ DCHECK_LT(start, end);
+ DCHECK_LE(end, page->address() + Page::kPageSize);
+
+ const size_t added_pages = page->active_system_pages()->Add(
+ start - page->address(), end - page->address(),
+ MemoryAllocator::GetCommitPageSizeBits());
+
+ IncrementCommittedPhysicalMemory(added_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
+void PagedSpace::ReduceActiveSystemPages(
+ Page* page, ActiveSystemPages active_system_pages) {
+ const size_t reduced_pages =
+ page->active_system_pages()->Reduce(active_system_pages);
+ DecrementCommittedPhysicalMemory(reduced_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
// -----------------------------------------------------------------------------
// MapSpace implementation
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index bdc4dee23f..2df7083a84 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_PAGED_SPACES_H_
#define V8_HEAP_PAGED_SPACES_H_
+#include <atomic>
#include <memory>
#include <utility>
@@ -15,6 +16,7 @@
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/allocation-stats.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
@@ -108,6 +110,13 @@ class V8_EXPORT_PRIVATE PagedSpace
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
+#if DEBUG
+ void VerifyCommittedPhysicalMemory();
+#endif // DEBUG
+
+ void IncrementCommittedPhysicalMemory(size_t increment_value);
+ void DecrementCommittedPhysicalMemory(size_t decrement_value);
+
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
@@ -194,7 +203,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void ResetFreeList();
// Empty space linear allocation area, returning unused area to free list.
- void FreeLinearAllocationArea();
+ void FreeLinearAllocationArea() override;
void MakeLinearAllocationAreaIterable();
@@ -216,7 +225,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page);
- Page* InitializePage(MemoryChunk* chunk);
+ Page* InitializePage(MemoryChunk* chunk) override;
void ReleasePage(Page* page);
@@ -289,9 +298,11 @@ class V8_EXPORT_PRIVATE PagedSpace
inline void UnlinkFreeListCategories(Page* page);
inline size_t RelinkFreeListCategories(Page* page);
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
- const Page* first_page() const {
- return reinterpret_cast<const Page*>(Space::first_page());
+ Page* first_page() override {
+ return reinterpret_cast<Page*>(memory_chunk_list_.front());
+ }
+ const Page* first_page() const override {
+ return reinterpret_cast<const Page*>(memory_chunk_list_.front());
}
iterator begin() { return iterator(first_page()); }
@@ -325,6 +336,10 @@ class V8_EXPORT_PRIVATE PagedSpace
return &pending_allocation_mutex_;
}
+ void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
+ void ReduceActiveSystemPages(Page* page,
+ ActiveSystemPages active_system_pages);
+
private:
class ConcurrentAllocationMutex {
public:
@@ -374,7 +389,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// a memory area of the given size in it. If successful the method returns
// the address and size of the area.
base::Optional<std::pair<Address, size_t>> ExpandBackground(
- LocalHeap* local_heap, size_t size_in_bytes);
+ size_t size_in_bytes);
Page* AllocatePage();
@@ -413,8 +428,7 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationOrigin origin);
V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
- TryAllocationFromFreeListBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
+ TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin);
@@ -422,6 +436,10 @@ class V8_EXPORT_PRIVATE PagedSpace
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin);
+ size_t committed_physical_memory() const {
+ return committed_physical_memory_.load(std::memory_order_relaxed);
+ }
+
Executability executable_;
CompactionSpaceKind compaction_space_kind_;
@@ -442,6 +460,8 @@ class V8_EXPORT_PRIVATE PagedSpace
// Protects original_top_ and original_limit_.
base::SharedMutex pending_allocation_mutex_;
+ std::atomic<size_t> committed_physical_memory_{0};
+
friend class IncrementalMarking;
friend class MarkCompactCollector;
@@ -485,6 +505,8 @@ class CompactionSpaceCollection : public Malloced {
CompactionSpaceKind compaction_space_kind)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
compaction_space_kind),
+ map_space_(heap, MAP_SPACE, Executability::NOT_EXECUTABLE,
+ compaction_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
compaction_space_kind) {}
@@ -492,6 +514,8 @@ class CompactionSpaceCollection : public Malloced {
switch (space) {
case OLD_SPACE:
return &old_space_;
+ case MAP_SPACE:
+ return &map_space_;
case CODE_SPACE:
return &code_space_;
default:
@@ -502,6 +526,7 @@ class CompactionSpaceCollection : public Malloced {
private:
CompactionSpace old_space_;
+ CompactionSpace map_space_;
CompactionSpace code_space_;
};
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index 3fa267d26c..9265ca5963 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -397,7 +397,7 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
DetachFromHeap();
for (ReadOnlyPage* p : pages_) {
if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) {
- memory_allocator->UnregisterMemory(p);
+ memory_allocator->UnregisterReadOnlyPage(p);
}
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
p->MakeHeaderRelocatable();
@@ -533,6 +533,9 @@ void ReadOnlySpace::Verify(Isolate* isolate) {
CHECK(!object.IsExternalString());
CHECK(!object.IsJSArrayBuffer());
}
+
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
}
CHECK(allocation_pointer_found_in_space);
@@ -667,7 +670,7 @@ AllocationResult ReadOnlySpace::AllocateRawAligned(
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
- return object;
+ return AllocationResult::FromObject(object);
}
AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
@@ -687,7 +690,7 @@ AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
chunk->IncreaseAllocatedBytes(size_in_bytes);
- return object;
+ return AllocationResult::FromObject(object);
}
AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
@@ -697,7 +700,7 @@ AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
- if (!result.IsRetry() && result.To(&heap_obj)) {
+ if (result.To(&heap_obj)) {
DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
}
return result;
diff --git a/deps/v8/src/heap/reference-summarizer.cc b/deps/v8/src/heap/reference-summarizer.cc
new file mode 100644
index 0000000000..fd2668e140
--- /dev/null
+++ b/deps/v8/src/heap/reference-summarizer.cc
@@ -0,0 +1,116 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/reference-summarizer.h"
+
+#include "src/heap/mark-compact-inl.h"
+#include "src/heap/marking-visitor-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// A class which acts as a MarkingState but does not actually update any marking
+// bits. It reports all objects as white and all transitions as successful. It
+// also tracks which objects are retained by the primary object according to the
+// marking visitor.
+class ReferenceSummarizerMarkingState final {
+ public:
+ // Declares that this marking state is collecting retainers, so the marking
+ // visitor must fully visit each object and can't update on-heap state.
+ static constexpr bool kCollectRetainers = true;
+
+ explicit ReferenceSummarizerMarkingState(HeapObject object)
+ : primary_object_(object),
+ local_marking_worklists_(&marking_worklists_),
+ local_weak_objects_(&weak_objects_) {}
+
+ ~ReferenceSummarizerMarkingState() {
+ // Clean up temporary state.
+ local_weak_objects_.Publish();
+ weak_objects_.Clear();
+ local_marking_worklists_.Publish();
+ marking_worklists_.Clear();
+ }
+
+ // Retrieves the references that were collected by this marker. This operation
+ // transfers ownership of the set, so calling it again would yield an empty
+ // result.
+ ReferenceSummary DestructivelyRetrieveReferences() {
+ ReferenceSummary tmp = std::move(references_);
+ references_.Clear();
+ return tmp;
+ }
+
+ // Standard marking visitor functions:
+
+ bool IsWhite(HeapObject obj) const { return true; }
+
+ bool IsBlackOrGrey(HeapObject obj) const { return false; }
+
+ bool WhiteToGrey(HeapObject obj) { return true; }
+
+ bool GreyToBlack(HeapObject obj) { return true; }
+
+ // Adds a retaining relationship found by the marking visitor.
+ void AddStrongReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ AddReference(host, obj, references_.strong_references());
+ }
+
+ // Adds a non-retaining weak reference found by the marking visitor. The value
+ // in an ephemeron hash table entry is also included here, since it is not
+ // known to be strong without further information about the key.
+ void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
+ AddReference(host, obj, references_.weak_references());
+ }
+
+ // Other member functions, not part of the marking visitor contract:
+
+ MarkingWorklists::Local* local_marking_worklists() {
+ return &local_marking_worklists_;
+ }
+ WeakObjects::Local* local_weak_objects() { return &local_weak_objects_; }
+
+ private:
+ void AddReference(
+ HeapObject host, HeapObject obj,
+ std::unordered_set<HeapObject, Object::Hasher>& references) {
+ // It's possible that the marking visitor handles multiple objects at once,
+ // such as a Map and its DescriptorArray, but we're only interested in
+ // references from the primary object.
+ if (host == primary_object_) {
+ references.insert(obj);
+ }
+ }
+
+ ReferenceSummary references_;
+ HeapObject primary_object_;
+ MarkingWorklists marking_worklists_;
+ MarkingWorklists::Local local_marking_worklists_;
+ WeakObjects weak_objects_;
+ WeakObjects::Local local_weak_objects_;
+};
+
+} // namespace
+
+ReferenceSummary ReferenceSummary::SummarizeReferencesFrom(Heap* heap,
+ HeapObject obj) {
+ ReferenceSummarizerMarkingState marking_state(obj);
+
+ MainMarkingVisitor<ReferenceSummarizerMarkingState> visitor(
+ &marking_state, marking_state.local_marking_worklists(),
+ marking_state.local_weak_objects(), heap, 0 /*mark_compact_epoch*/,
+ {} /*code_flush_mode*/, false /*embedder_tracing_enabled*/,
+ true /*should_keep_ages_unchanged*/);
+ visitor.Visit(obj.map(heap->isolate()), obj);
+
+ return marking_state.DestructivelyRetrieveReferences();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/reference-summarizer.h b/deps/v8/src/heap/reference-summarizer.h
new file mode 100644
index 0000000000..a49ac597a1
--- /dev/null
+++ b/deps/v8/src/heap/reference-summarizer.h
@@ -0,0 +1,55 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_REFERENCE_SUMMARIZER_H_
+#define V8_HEAP_REFERENCE_SUMMARIZER_H_
+
+#include <unordered_set>
+
+#include "src/objects/heap-object.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+class ReferenceSummary {
+ public:
+ ReferenceSummary() = default;
+ ReferenceSummary(ReferenceSummary&& other) V8_NOEXCEPT
+ : strong_references_(std::move(other.strong_references_)),
+ weak_references_(std::move(other.weak_references_)) {}
+
+ // Produces a set of objects referred to by the object. This function uses a
+ // realistic marking visitor, so its results are likely to match real GC
+ // behavior. Intended only for verification.
+ static ReferenceSummary SummarizeReferencesFrom(Heap* heap, HeapObject obj);
+
+ // All objects which the chosen object has strong pointers to.
+ std::unordered_set<HeapObject, Object::Hasher>& strong_references() {
+ return strong_references_;
+ }
+
+ // All objects which the chosen object has weak pointers to. The values in
+ // ephemeron hash tables are also included here, even though they aren't
+ // normal weak pointers.
+ std::unordered_set<HeapObject, Object::Hasher>& weak_references() {
+ return weak_references_;
+ }
+
+ void Clear() {
+ strong_references_.clear();
+ weak_references_.clear();
+ }
+
+ private:
+ std::unordered_set<HeapObject, Object::Hasher> strong_references_;
+ std::unordered_set<HeapObject, Object::Hasher> weak_references_;
+ DISALLOW_GARBAGE_COLLECTION(no_gc)
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_REFERENCE_SUMMARIZER_H_
diff --git a/deps/v8/src/heap/remembered-set-inl.h b/deps/v8/src/heap/remembered-set-inl.h
index f7358630d0..b0908839ea 100644
--- a/deps/v8/src/heap/remembered-set-inl.h
+++ b/deps/v8/src/heap/remembered-set-inl.h
@@ -17,26 +17,26 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
Address addr,
Callback callback) {
switch (slot_type) {
- case CODE_TARGET_SLOT: {
+ case SlotType::kCodeEntry: {
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
return UpdateCodeTarget(&rinfo, callback);
}
- case CODE_ENTRY_SLOT: {
+ case SlotType::kConstPoolCodeEntry: {
return UpdateCodeEntry(addr, callback);
}
- case COMPRESSED_EMBEDDED_OBJECT_SLOT: {
+ case SlotType::kEmbeddedObjectCompressed: {
RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case FULL_EMBEDDED_OBJECT_SLOT: {
+ case SlotType::kEmbeddedObjectFull: {
RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case DATA_EMBEDDED_OBJECT_SLOT: {
+ case SlotType::kEmbeddedObjectData: {
RelocInfo rinfo(addr, RelocInfo::DATA_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case COMPRESSED_OBJECT_SLOT: {
+ case SlotType::kConstPoolEmbeddedObjectCompressed: {
HeapObject old_target = HeapObject::cast(Object(
DecompressTaggedAny(heap->isolate(), base::Memory<Tagged_t>(addr))));
HeapObject new_target = old_target;
@@ -47,10 +47,10 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
}
return result;
}
- case FULL_OBJECT_SLOT: {
+ case SlotType::kConstPoolEmbeddedObjectFull: {
return callback(FullMaybeObjectSlot(addr));
}
- case CLEARED_SLOT:
+ case SlotType::kCleared:
break;
}
UNREACHABLE();
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 13a6fedf47..b4badca6d6 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -391,19 +391,6 @@ class RememberedSetSweeping {
}
};
-inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- return CODE_TARGET_SLOT;
- } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
- return FULL_EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- return COMPRESSED_EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsDataEmbeddedObject(rmode)) {
- return DATA_EMBEDDED_OBJECT_SLOT;
- }
- UNREACHABLE();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index bd4c610004..12f6706f76 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -33,14 +33,13 @@ void IsolateSafepoint::EnterLocalSafepointScope() {
DCHECK_NULL(LocalHeap::Current());
DCHECK(AllowGarbageCollection::IsAllowed());
- LockMutex(heap_->isolate()->main_thread_local_heap());
+ LockMutex(isolate()->main_thread_local_heap());
if (++active_safepoint_scopes_ > 1) return;
// Local safepoint can only be initiated on the isolate's main thread.
- DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
+ DCHECK_EQ(ThreadId::Current(), isolate()->thread_id());
- TimedHistogramScope timer(
- heap_->isolate()->counters()->gc_time_to_safepoint());
+ TimedHistogramScope timer(isolate()->counters()->gc_time_to_safepoint());
TRACE_GC(heap_->tracer(), GCTracer::Scope::TIME_TO_SAFEPOINT);
barrier_.Arm();
@@ -72,6 +71,7 @@ class PerClientSafepointData final {
void IsolateSafepoint::InitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
+ shared_isolate()->global_safepoint()->AssertActive();
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
InitiateGlobalSafepointScopeRaw(initiator, client_data);
@@ -79,6 +79,7 @@ void IsolateSafepoint::InitiateGlobalSafepointScope(
void IsolateSafepoint::TryInitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
+ shared_isolate()->global_safepoint()->AssertActive();
if (!local_heaps_mutex_.TryLock()) return;
InitiateGlobalSafepointScopeRaw(initiator, client_data);
}
@@ -95,7 +96,7 @@ void IsolateSafepoint::InitiateGlobalSafepointScopeRaw(
IsolateSafepoint::IncludeMainThread
IsolateSafepoint::IncludeMainThreadUnlessInitiator(Isolate* initiator) {
- const bool is_initiator = heap_->isolate() == initiator;
+ const bool is_initiator = isolate() == initiator;
return is_initiator ? IncludeMainThread::kNo : IncludeMainThread::kYes;
}
@@ -233,23 +234,6 @@ void IsolateSafepoint::Barrier::WaitInUnpark() {
}
}
-bool IsolateSafepoint::ContainsLocalHeap(LocalHeap* local_heap) {
- base::RecursiveMutexGuard guard(&local_heaps_mutex_);
- LocalHeap* current = local_heaps_head_;
-
- while (current) {
- if (current == local_heap) return true;
- current = current->next_;
- }
-
- return false;
-}
-
-bool IsolateSafepoint::ContainsAnyLocalHeap() {
- base::RecursiveMutexGuard guard(&local_heaps_mutex_);
- return local_heaps_head_ != nullptr;
-}
-
void IsolateSafepoint::Iterate(RootVisitor* visitor) {
AssertActive();
for (LocalHeap* current = local_heaps_head_; current;
@@ -263,6 +247,12 @@ void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
DCHECK_NULL(heap_->main_thread_local_heap()->next_);
}
+Isolate* IsolateSafepoint::isolate() const { return heap_->isolate(); }
+
+Isolate* IsolateSafepoint::shared_isolate() const {
+ return isolate()->shared_isolate();
+}
+
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
safepoint_->EnterLocalSafepointScope();
}
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index 8a6823c603..b64df46f3a 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -26,9 +26,6 @@ class IsolateSafepoint final {
public:
explicit IsolateSafepoint(Heap* heap);
- V8_EXPORT_PRIVATE bool ContainsLocalHeap(LocalHeap* local_heap);
- V8_EXPORT_PRIVATE bool ContainsAnyLocalHeap();
-
// Iterate handles in local heaps
void Iterate(RootVisitor* visitor);
@@ -44,7 +41,7 @@ class IsolateSafepoint final {
void AssertActive() { local_heaps_mutex_.AssertHeld(); }
- void AssertMainThreadIsOnlyThread();
+ V8_EXPORT_PRIVATE void AssertMainThreadIsOnlyThread();
private:
class Barrier {
@@ -135,6 +132,9 @@ class IsolateSafepoint final {
local_heaps_head_ = local_heap->next_;
}
+ Isolate* isolate() const;
+ Isolate* shared_isolate() const;
+
Barrier barrier_;
Heap* heap_;
@@ -145,11 +145,9 @@ class IsolateSafepoint final {
int active_safepoint_scopes_;
- friend class Heap;
friend class GlobalSafepoint;
friend class GlobalSafepointScope;
friend class LocalHeap;
- friend class PersistentHandles;
friend class SafepointScope;
};
@@ -181,6 +179,8 @@ class GlobalSafepoint final {
void AssertNoClients();
+ void AssertActive() { clients_mutex_.AssertHeld(); }
+
private:
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveGlobalSafepointScope(Isolate* initiator);
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 8a0a1da96b..14306c0910 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -5,8 +5,8 @@
#ifndef V8_HEAP_SCAVENGER_INL_H_
#define V8_HEAP_SCAVENGER_INL_H_
+#include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/incremental-marking-inl.h"
-#include "src/heap/local-allocator-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/scavenger.h"
#include "src/objects/map.h"
@@ -83,7 +83,8 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
}
bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
- int size) {
+ int size,
+ PromotionHeapChoice promotion_heap_choice) {
// Copy the content of source to target.
target.set_map_word(MapWord::FromMap(map), kRelaxedStore);
heap()->CopyBlock(target.address() + kTaggedSize,
@@ -100,7 +101,8 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->OnMoveEvent(target, source, size);
}
- if (is_incremental_marking_) {
+ if (is_incremental_marking_ &&
+ promotion_heap_choice != kPromoteIntoSharedHeap) {
heap()->incremental_marking()->TransferColor(source, target);
}
heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
@@ -123,7 +125,8 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
if (allocation.To(&target)) {
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
target));
- const bool self_success = MigrateObject(map, object, target, object_size);
+ const bool self_success =
+ MigrateObject(map, object, target, object_size, kPromoteIntoLocalHeap);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
MapWord map_word = object.map_word(kAcquireLoad);
@@ -171,7 +174,8 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
if (allocation.To(&target)) {
DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
target));
- const bool self_success = MigrateObject(map, object, target, object_size);
+ const bool self_success =
+ MigrateObject(map, object, target, object_size, promotion_heap_choice);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
MapWord map_word = object.map_word(kAcquireLoad);
@@ -182,7 +186,11 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
: CopyAndForwardResult::SUCCESS_OLD_GENERATION;
}
HeapObjectReference::Update(slot, target);
- if (object_fields == ObjectFields::kMaybePointers) {
+
+ // During incremental marking we want to push every object in order to
+ // record slots for map words. Necessary for map space compaction.
+ if (object_fields == ObjectFields::kMaybePointers ||
+ is_compacting_including_map_space_) {
promotion_list_local_.PushRegularObject(target, object_size);
}
promoted_size_ += object_size;
@@ -203,7 +211,6 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
// TODO(hpayer): Make this check size based, i.e.
// object_size > kMaxRegularHeapObjectSize
if (V8_UNLIKELY(
- FLAG_young_generation_large_objects &&
BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
@@ -377,7 +384,8 @@ SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
map, slot, String::unchecked_cast(source), size,
ObjectFields::kMaybePointers);
case kVisitDataObject: // External strings have kVisitDataObject.
- if (String::IsInPlaceInternalizable(map.instance_type())) {
+ if (String::IsInPlaceInternalizableExcludingExternal(
+ map.instance_type())) {
return EvacuateInPlaceInternalizableString(
map, slot, String::unchecked_cast(source), size,
ObjectFields::kDataOnly);
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 3e3a67a5e6..56e002a98c 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -4,14 +4,18 @@
#include "src/heap/scavenger.h"
+#include "src/common/globals.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/barrier.h"
+#include "src/heap/concurrent-allocator.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/scavenger-inl.h"
@@ -32,6 +36,17 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
bool record_slots)
: scavenger_(scavenger), record_slots_(record_slots) {}
+ V8_INLINE void VisitMapPointer(HeapObject host) final {
+ if (!record_slots_) return;
+ MapWord map_word = host.map_word(kRelaxedLoad);
+ if (map_word.IsForwardingAddress()) {
+ // Surviving new large objects have forwarding pointers in the map word.
+ DCHECK(MemoryChunk::FromHeapObject(host)->InNewLargeObjectSpace());
+ return;
+ }
+ HandleSlot(host, HeapObjectSlot(host.map_slot()), map_word.ToMap());
+ }
+
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) final {
VisitPointersImpl(host, start, end);
@@ -118,10 +133,9 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
slot.address());
}
}
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target)));
- } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target))) {
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
+ } else if (record_slots_ &&
+ MarkCompactCollector::IsOnEvacuationCandidate(target)) {
// We should never try to record off-heap slots.
DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
// Code slots never appear in new space because CodeDataContainers, the
@@ -281,18 +295,8 @@ void ScavengerCollector::CollectGarbage() {
{
Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
- // Try to finish sweeping here, such that the following code doesn't need to
- // pause & resume sweeping.
- if (sweeper->sweeping_in_progress() && FLAG_concurrent_sweeping &&
- !sweeper->AreSweeperTasksRunning()) {
- // At this point we know that all concurrent sweeping tasks have run
- // out-of-work and quit: all pages are swept. The main thread still needs
- // to complete sweeping though.
- heap_->mark_compact_collector()->EnsureSweepingCompleted();
- }
-
// Pause the concurrent sweeper.
- Sweeper::PauseOrCompleteScope pause_scope(sweeper);
+ Sweeper::PauseScope pause_scope(sweeper);
// Filter out pages from the sweeper that need to be processed for old to
// new slots by the Scavenger. After processing, the Scavenger adds back
// pages that are still unsweeped. This way the Scavenger has exclusive
@@ -400,7 +404,7 @@ void ScavengerCollector::CollectGarbage() {
heap_->UpdateYoungReferencesInExternalStringTable(
&Heap::UpdateYoungReferenceInExternalStringTableEntry);
- heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ heap_->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->UpdateRetainersAfterScavenge();
@@ -497,6 +501,10 @@ void ScavengerCollector::SweepArrayBufferExtensions() {
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
+ const bool is_compacting = heap_->incremental_marking()->IsCompacting();
+ MajorAtomicMarkingState* marking_state =
+ heap_->incremental_marking()->atomic_marking_state();
+
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
HeapObject object = update_info.first;
@@ -504,6 +512,12 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
+
+ if (is_compacting && marking_state->IsBlack(object) &&
+ MarkCompactCollector::IsOnEvacuationCandidate(map)) {
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
+ MemoryChunk::FromHeapObject(object), object.map_slot().address());
+ }
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
@@ -540,6 +554,15 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
large_object_promotion_list_local_(
&promotion_list->large_object_promotion_list_) {}
+namespace {
+ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
+ if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_old_space());
+ }
+ return nullptr;
+}
+} // namespace
+
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
EmptyChunksList* empty_chunks, CopiedList* copied_list,
PromotionList* promotion_list,
@@ -554,12 +577,12 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
copied_size_(0),
promoted_size_(0),
allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
- shared_old_allocator_(heap_->shared_old_allocator_.get()),
+ shared_old_allocator_(CreateSharedOldAllocator(heap_)),
is_logging_(is_logging),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()),
- shared_string_table_(FLAG_shared_string_table &&
- (heap->isolate()->shared_isolate() != nullptr)) {}
+ is_compacting_including_map_space_(is_compacting_ && FLAG_compact_maps),
+ shared_string_table_(shared_old_allocator_.get() != nullptr) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
int size) {
@@ -574,7 +597,13 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
- target.IterateBodyFast(map, size, &visitor);
+
+ if (is_compacting_including_map_space_) {
+ // When we compact map space, we also want to visit the map word.
+ target.IterateFast(map, size, &visitor);
+ } else {
+ target.IterateBodyFast(map, size, &visitor);
+ }
if (map.IsJSArrayBufferMap()) {
DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
@@ -741,6 +770,7 @@ void Scavenger::Finalize() {
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
+ if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
empty_chunks_local_.Publish();
ephemeron_table_list_local_.Publish();
for (auto it = ephemeron_remembered_set_.begin();
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 0eb12a5f3d..0dff0ec133 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -7,8 +7,8 @@
#include "src/base/platform/condition-variable.h"
#include "src/heap/base/worklist.h"
+#include "src/heap/evacuation-allocator.h"
#include "src/heap/index-generator.h"
-#include "src/heap/local-allocator.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/parallel-work-item.h"
#include "src/heap/slot-set.h"
@@ -111,6 +111,8 @@ class Scavenger {
size_t bytes_promoted() const { return promoted_size_; }
private:
+ enum PromotionHeapChoice { kPromoteIntoLocalHeap, kPromoteIntoSharedHeap };
+
// Number of objects to process before interrupting for potentially waking
// up other tasks.
static const int kInterruptThreshold = 128;
@@ -135,7 +137,8 @@ class Scavenger {
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE bool MigrateObject(Map map, HeapObject source, HeapObject target,
- int size);
+ int size,
+ PromotionHeapChoice promotion_heap_choice);
V8_INLINE SlotCallbackResult
RememberedSetEntryNeeded(CopyAndForwardResult result);
@@ -145,8 +148,6 @@ class Scavenger {
SemiSpaceCopyObject(Map map, THeapObjectSlot slot, HeapObject object,
int object_size, ObjectFields object_fields);
- enum PromotionHeapChoice { kPromoteIntoLocalHeap, kPromoteIntoSharedHeap };
-
template <typename THeapObjectSlot,
PromotionHeapChoice promotion_heap_choice = kPromoteIntoLocalHeap>
V8_INLINE CopyAndForwardResult PromoteObject(Map map, THeapObjectSlot slot,
@@ -197,13 +198,14 @@ class Scavenger {
size_t copied_size_;
size_t promoted_size_;
EvacuationAllocator allocator_;
- ConcurrentAllocator* shared_old_allocator_ = nullptr;
+ std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
EphemeronRememberedSet ephemeron_remembered_set_;
const bool is_logging_;
const bool is_incremental_marking_;
const bool is_compacting_;
+ const bool is_compacting_including_map_space_;
const bool shared_string_table_;
friend class IterateAndScavengePromotedObjectsVisitor;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 4e7b2afbdc..806da907c0 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -13,6 +13,7 @@
#include "src/init/setup-isolate.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/arguments.h"
+#include "src/objects/call-site-info.h"
#include "src/objects/cell-inl.h"
#include "src/objects/contexts.h"
#include "src/objects/data-handler.h"
@@ -37,7 +38,6 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
#include "src/objects/source-text-module.h"
-#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects-inl.h"
@@ -151,9 +151,9 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
SKIP_WRITE_BARRIER);
Map map = isolate()->factory()->InitializeMap(
Map::cast(result), instance_type, instance_size, elements_kind,
- inobject_properties);
+ inobject_properties, this);
- return map;
+ return AllocationResult::FromObject(map);
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
@@ -184,7 +184,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
DCHECK(!map.is_in_retained_map_list());
map.clear_padding();
map.set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- return map;
+ return AllocationResult::FromObject(map);
}
void Heap::FinalizePartialMap(Map map) {
@@ -208,7 +208,7 @@ AllocationResult Heap::Allocate(Handle<Map> map,
allocation_type == AllocationType::kYoung ? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
result.set_map_after_allocation(*map, write_barrier_mode);
- return result;
+ return AllocationResult::FromObject(result);
}
bool Heap::CreateInitialMaps() {
@@ -250,7 +250,6 @@ bool Heap::CreateInitialMaps() {
#undef ALLOCATE_PARTIAL_MAP
}
- // Allocate the empty array.
{
AllocationResult alloc =
AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly);
@@ -476,6 +475,9 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(NUMBER_DICTIONARY_TYPE, number_dictionary)
ALLOCATE_VARSIZE_MAP(SIMPLE_NUMBER_DICTIONARY_TYPE,
simple_number_dictionary)
+ ALLOCATE_VARSIZE_MAP(NAME_TO_INDEX_HASH_TABLE_TYPE,
+ name_to_index_hash_table)
+ ALLOCATE_VARSIZE_MAP(REGISTERED_SYMBOL_TABLE_TYPE, registered_symbol_table)
ALLOCATE_VARSIZE_MAP(EMBEDDER_DATA_ARRAY_TYPE, embedder_data_array)
ALLOCATE_VARSIZE_MAP(EPHEMERON_HASH_TABLE_TYPE, ephemeron_hash_table)
@@ -516,6 +518,8 @@ bool Heap::CreateInitialMaps() {
WasmInternalFunction::kSize, wasm_internal_function)
IF_WASM(ALLOCATE_MAP, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData::kSize,
wasm_js_function_data)
+ IF_WASM(ALLOCATE_MAP, WASM_ON_FULFILLED_DATA_TYPE,
+ WasmOnFulfilledData::kSize, wasm_onfulfilled_data)
IF_WASM(ALLOCATE_MAP, WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize,
wasm_type_info)
@@ -523,13 +527,22 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kHeaderSize,
message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kEmbedderDataSlotSize,
+ ALLOCATE_MAP(JS_EXTERNAL_OBJECT_TYPE, JSExternalObject::kHeaderSize,
external)
external_map().set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
}
+ {
+ AllocationResult alloc = AllocateRaw(
+ ArrayList::SizeFor(ArrayList::kFirstIndex), AllocationType::kReadOnly);
+ if (!alloc.To(&obj)) return false;
+ obj.set_map_after_allocation(roots.array_list_map(), SKIP_WRITE_BARRIER);
+ ArrayList::cast(obj).set_length(ArrayList::kFirstIndex);
+ ArrayList::cast(obj).SetLength(0);
+ }
+ set_empty_array_list(ArrayList::cast(obj));
{
AllocationResult alloc =
@@ -781,16 +794,20 @@ void Heap::CreateInitialObjects() {
Handle<NameDictionary> empty_property_dictionary = NameDictionary::New(
isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
+
set_empty_property_dictionary(*empty_property_dictionary);
- set_public_symbol_table(*empty_property_dictionary);
- set_api_symbol_table(*empty_property_dictionary);
- set_api_private_symbol_table(*empty_property_dictionary);
+ Handle<RegisteredSymbolTable> empty_symbol_table = RegisteredSymbolTable::New(
+ isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
+ DCHECK(!empty_symbol_table->HasSufficientCapacityToAdd(1));
+ set_public_symbol_table(*empty_symbol_table);
+ set_api_symbol_table(*empty_symbol_table);
+ set_api_private_symbol_table(*empty_symbol_table);
set_number_string_cache(*factory->NewFixedArray(
kInitialNumberStringCacheSize * 2, AllocationType::kOld));
- set_basic_block_profiling_data(ArrayList::cast(roots.empty_fixed_array()));
+ set_basic_block_profiling_data(roots.empty_array_list());
// Allocate cache for string split and regexp-multiple.
set_string_split_cache(*factory->NewFixedArray(
@@ -811,6 +828,7 @@ void Heap::CreateInitialObjects() {
set_shared_wasm_memories(roots.empty_weak_array_list());
#ifdef V8_ENABLE_WEBASSEMBLY
set_active_continuation(roots.undefined_value());
+ set_active_suspender(roots.undefined_value());
#endif // V8_ENABLE_WEBASSEMBLY
set_script_list(roots.empty_weak_array_list());
diff --git a/deps/v8/src/heap/slot-set.cc b/deps/v8/src/heap/slot-set.cc
index 92540574a0..ae4dd7a79f 100644
--- a/deps/v8/src/heap/slot-set.cc
+++ b/deps/v8/src/heap/slot-set.cc
@@ -64,7 +64,7 @@ void TypedSlotSet::ClearInvalidSlots(
while (chunk != nullptr) {
for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
- if (type == CLEARED_SLOT) continue;
+ if (type == SlotType::kCleared) continue;
uint32_t offset = OffsetField::decode(slot.type_and_offset);
std::map<uint32_t, uint32_t>::const_iterator upper_bound =
invalid_ranges.upper_bound(offset);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 5e70cbc33d..7f6f8c3c41 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -602,15 +602,43 @@ class SlotSet {
STATIC_ASSERT(std::is_standard_layout<SlotSet>::value);
STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
-enum SlotType {
- FULL_EMBEDDED_OBJECT_SLOT,
- COMPRESSED_EMBEDDED_OBJECT_SLOT,
- DATA_EMBEDDED_OBJECT_SLOT,
- FULL_OBJECT_SLOT,
- COMPRESSED_OBJECT_SLOT,
- CODE_TARGET_SLOT,
- CODE_ENTRY_SLOT,
- CLEARED_SLOT
+enum class SlotType : uint8_t {
+ // Full pointer sized slot storing an object start address.
+ // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
+ // accessing. Used when pointer is stored in the instruction stream.
+ kEmbeddedObjectFull,
+
+ // Tagged sized slot storing an object start address.
+ // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
+ // accessing. Used when pointer is stored in the instruction stream.
+ kEmbeddedObjectCompressed,
+
+ // Full pointer sized slot storing an object start address.
+ // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
+ // accessing. Used when pointer is stored in the instruction stream.
+ kEmbeddedObjectData,
+
+ // Full pointer sized slot storing instruction start of Code object.
+ // RelocInfo::target_address/RelocInfo::set_target_address methods are used
+ // for accessing. Used when pointer is stored in the instruction stream.
+ kCodeEntry,
+
+ // Raw full pointer sized slot. Slot is accessed directly. Used when pointer
+ // is stored in constant pool.
+ kConstPoolEmbeddedObjectFull,
+
+ // Raw tagged sized slot. Slot is accessed directly. Used when pointer is
+ // stored in constant pool.
+ kConstPoolEmbeddedObjectCompressed,
+
+ // Raw full pointer sized slot storing instruction start of Code object. Slot
+ // is accessed directly. Used when pointer is stored in constant pool.
+ kConstPoolCodeEntry,
+
+ // Slot got cleared but has not been removed from the slot set.
+ kCleared,
+
+ kLast = kCleared
};
// Data structure for maintaining a list of typed slots in a page.
@@ -669,7 +697,7 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
// This can run concurrently to ClearInvalidSlots().
template <typename Callback>
int Iterate(Callback callback, IterationMode mode) {
- STATIC_ASSERT(CLEARED_SLOT < 8);
+ STATIC_ASSERT(static_cast<uint8_t>(SlotType::kLast) < 8);
Chunk* chunk = head_;
Chunk* previous = nullptr;
int new_count = 0;
@@ -677,7 +705,7 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
bool empty = true;
for (TypedSlot& slot : chunk->buffer) {
SlotType type = TypeField::decode(slot.type_and_offset);
- if (type != CLEARED_SLOT) {
+ if (type != SlotType::kCleared) {
uint32_t offset = OffsetField::decode(slot.type_and_offset);
Address addr = page_start_ + offset;
if (callback(type, addr) == KEEP_SLOT) {
@@ -727,7 +755,8 @@ class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
}
static TypedSlot ClearedTypedSlot() {
- return TypedSlot{TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0)};
+ return TypedSlot{TypeField::encode(SlotType::kCleared) |
+ OffsetField::encode(0)};
}
Address page_start_;
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 796d118988..fb290feee5 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -93,7 +93,8 @@ OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
state_(kOldSpaceState),
old_iterator_(heap->old_space()->begin()),
code_iterator_(heap->code_space()->begin()),
- map_iterator_(heap->map_space()->begin()),
+ map_iterator_(heap->map_space() ? heap->map_space()->begin()
+ : PageRange::iterator(nullptr)),
lo_iterator_(heap->lo_space()->begin()),
code_lo_iterator_(heap->code_lo_space()->begin()) {}
@@ -140,21 +141,19 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
if (!allocation_info_.CanIncrementTop(aligned_size)) {
- return AllocationResult::Retry(NEW_SPACE);
+ return AllocationResult::Failure();
}
HeapObject object =
HeapObject::FromAddress(allocation_info_.IncrementTop(aligned_size));
- if (filler_size > 0) {
- return heap_->PrecedeWithFiller(object, filler_size);
- }
-
- return AllocationResult(object);
+ return filler_size > 0 ? AllocationResult::FromObject(
+ heap_->PrecedeWithFiller(object, filler_size))
+ : AllocationResult::FromObject(object);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
- if (result.IsRetry()) return InvalidBuffer();
+ if (result.IsFailure()) return InvalidBuffer();
HeapObject obj;
bool ok = result.To(&obj);
USE(ok);
@@ -175,6 +174,24 @@ bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
return false;
}
+bool MemoryChunkIterator::HasNext() {
+ if (current_chunk_) return true;
+
+ while (space_iterator_.HasNext()) {
+ Space* space = space_iterator_.Next();
+ current_chunk_ = space->first_page();
+ if (current_chunk_) return true;
+ }
+
+ return false;
+}
+
+MemoryChunk* MemoryChunkIterator::Next() {
+ MemoryChunk* chunk = current_chunk_;
+ current_chunk_ = chunk->list_node().next();
+ return chunk;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 43d01f3989..77be40f779 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -13,6 +13,7 @@
#include "src/base/macros.h"
#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
@@ -22,6 +23,7 @@
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
@@ -251,24 +253,20 @@ void Space::RemoveAllocationObserver(AllocationObserver* observer) {
allocation_counter_.RemoveAllocationObserver(observer);
}
-void Space::PauseAllocationObservers() {
- allocation_observers_paused_depth_++;
- if (allocation_observers_paused_depth_ == 1) allocation_counter_.Pause();
-}
+void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
-void Space::ResumeAllocationObservers() {
- allocation_observers_paused_depth_--;
- if (allocation_observers_paused_depth_ == 0) allocation_counter_.Resume();
-}
+void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
- if (heap()->inline_allocation_disabled()) {
- // Fit the requested area exactly.
+ if (!use_lab_) {
+ // LABs are disabled, so we fit the requested area exactly.
return start + min_size;
- } else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
+ }
+
+ if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
// Ensure there are no unaccounted allocations.
DCHECK_EQ(allocation_info_->start(), allocation_info_->top());
@@ -283,10 +281,27 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
static_cast<uint64_t>(start) + std::max(min_size, rounded_step);
uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end));
return static_cast<Address>(new_end);
- } else {
- // The entire node can be used as the linear allocation area.
- return end;
}
+
+ // LABs are enabled and no observers attached. Return the whole node for the
+ // LAB.
+ return end;
+}
+
+void SpaceWithLinearArea::DisableInlineAllocation() {
+ if (!use_lab_) return;
+
+ use_lab_ = false;
+ FreeLinearAllocationArea();
+ UpdateInlineAllocationLimit(0);
+}
+
+void SpaceWithLinearArea::EnableInlineAllocation() {
+ if (use_lab_) return;
+
+ use_lab_ = true;
+ AdvanceAllocationObservers();
+ UpdateInlineAllocationLimit(0);
}
void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 3ac1e00208..18b760e1a5 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -7,18 +7,19 @@
#include <atomic>
#include <memory>
-#include <vector>
#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
#include "src/heap/base-space.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/free-list.h"
#include "src/heap/heap.h"
#include "src/heap/linear-allocation-area.h"
#include "src/heap/list.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
#include "src/objects/objects.h"
#include "src/utils/allocation.h"
@@ -170,14 +171,23 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
return external_backing_store_bytes_[type];
}
- MemoryChunk* first_page() { return memory_chunk_list_.front(); }
- MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+ virtual MemoryChunk* first_page() { return memory_chunk_list_.front(); }
+ virtual MemoryChunk* last_page() { return memory_chunk_list_.back(); }
- const MemoryChunk* first_page() const { return memory_chunk_list_.front(); }
- const MemoryChunk* last_page() const { return memory_chunk_list_.back(); }
+ virtual const MemoryChunk* first_page() const {
+ return memory_chunk_list_.front();
+ }
+ virtual const MemoryChunk* last_page() const {
+ return memory_chunk_list_.back();
+ }
heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+ virtual Page* InitializePage(MemoryChunk* chunk) {
+ UNREACHABLE();
+ return nullptr;
+ }
+
FreeList* free_list() { return free_list_.get(); }
Address FirstPageAddress() const { return first_page()->address(); }
@@ -187,8 +197,6 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
#endif
protected:
- int allocation_observers_paused_depth_ = 0;
-
AllocationCounter allocation_counter_;
// The List manages the pages that belong to the given space.
@@ -301,6 +309,8 @@ class Page : public MemoryChunk {
void MoveOldToNewRememberedSetForSweeping();
void MergeOldToNewRememberedSets();
+ ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
+
private:
friend class MemoryAllocator;
};
@@ -468,6 +478,7 @@ class SpaceWithLinearArea : public Space {
size_t allocation_size);
void MarkLabStartInitialized();
+ virtual void FreeLinearAllocationArea() = 0;
// When allocation observers are active we may use a lower limit to allow the
// observers to 'interrupt' earlier than the natural limit. Given a linear
@@ -478,18 +489,35 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0;
- V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
+ void DisableInlineAllocation();
+ void EnableInlineAllocation();
+ bool IsInlineAllocationEnabled() const { return use_lab_; }
void PrintAllocationsOrigins();
protected:
- // TODO(ofrobots): make these private after refactoring is complete.
+ V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
+
LinearAllocationArea* const allocation_info_;
+ bool use_lab_ = true;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
+// Iterates over all memory chunks in the heap (across all spaces).
+class MemoryChunkIterator {
+ public:
+ explicit MemoryChunkIterator(Heap* heap) : space_iterator_(heap) {}
+
+ V8_INLINE bool HasNext();
+ V8_INLINE MemoryChunk* Next();
+
+ private:
+ SpaceIterator space_iterator_;
+ MemoryChunk* current_chunk_ = nullptr;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 1b9a9b4eb7..5745c4bf1f 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -6,6 +6,7 @@
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
+#include "src/heap/base/active-system-pages.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/free-list-inl.h"
#include "src/heap/gc-tracer.h"
@@ -27,36 +28,27 @@ Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
iterability_task_started_(false),
should_reduce_memory_(false) {}
-Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
- : sweeper_(sweeper) {
+Sweeper::PauseScope::PauseScope(Sweeper* sweeper) : sweeper_(sweeper) {
if (!sweeper_->sweeping_in_progress()) return;
if (sweeper_->job_handle_ && sweeper_->job_handle_->IsValid())
sweeper_->job_handle_->Cancel();
-
- // Complete sweeping if there's nothing more to do.
- if (sweeper_->IsDoneSweeping()) {
- sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
- DCHECK(!sweeper_->sweeping_in_progress());
- } else {
- // Unless sweeping is complete the flag still indicates that the sweeper
- // is enabled. It just cannot use tasks anymore.
- DCHECK(sweeper_->sweeping_in_progress());
- }
}
-Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
+Sweeper::PauseScope::~PauseScope() {
if (!sweeper_->sweeping_in_progress()) return;
sweeper_->StartSweeperTasks();
}
Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
- Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
+ Sweeper* sweeper, const PauseScope& pause_scope)
: sweeper_(sweeper),
- pause_or_complete_scope_(pause_or_complete_scope),
sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
- USE(pause_or_complete_scope_);
+ // The PauseScope here only serves as a witness that concurrent sweeping has
+ // been paused.
+ USE(pause_scope);
+
if (!sweeping_in_progress_) return;
int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
@@ -135,7 +127,6 @@ class Sweeper::IncrementalSweeperTask final : public CancelableTask {
void RunInternal() final {
VMState<GC> state(isolate_);
TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
-
sweeper_->incremental_sweeper_pending_ = false;
if (sweeper_->sweeping_in_progress()) {
@@ -165,16 +156,14 @@ void Sweeper::StartSweeping() {
// evacuating a page, already swept pages will have enough free bytes to
// hold the objects to move (and therefore, we won't need to wait for more
// pages to be swept in order to move those objects).
- // Since maps don't move, there is no need to sort the pages from MAP_SPACE
- // before sweeping them.
- if (space != MAP_SPACE) {
- int space_index = GetSweepSpaceIndex(space);
- std::sort(
- sweeping_list_[space_index].begin(),
- sweeping_list_[space_index].end(), [marking_state](Page* a, Page* b) {
- return marking_state->live_bytes(a) > marking_state->live_bytes(b);
- });
- }
+ // We sort in descending order of live bytes, i.e., ascending order of free
+ // bytes, because GetSweepingPageSafe returns pages in reverse order.
+ int space_index = GetSweepSpaceIndex(space);
+ std::sort(
+ sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
+ [marking_state](Page* a, Page* b) {
+ return marking_state->live_bytes(a) > marking_state->live_bytes(b);
+ });
});
}
@@ -337,6 +326,15 @@ int Sweeper::RawSweep(
CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
if (code_object_registry) code_object_registry->Clear();
+ base::Optional<ActiveSystemPages> active_system_pages_after_sweeping;
+ if (should_reduce_memory_) {
+ // Only decrement counter when we discard unused system pages.
+ active_system_pages_after_sweeping = ActiveSystemPages();
+ active_system_pages_after_sweeping->Init(
+ MemoryChunkLayout::kMemoryChunkHeaderSize,
+ MemoryAllocator::GetCommitPageSizeBits(), Page::kPageSize);
+ }
+
// Phase 2: Free the non-live memory and clean-up the regular remembered set
// entires.
@@ -385,11 +383,18 @@ int Sweeper::RawSweep(
&old_to_new_cleanup);
}
Map map = object.map(cage_base, kAcquireLoad);
- DCHECK(map.IsMap(cage_base));
+ // Map might be forwarded during GC.
+ DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map));
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
+ if (active_system_pages_after_sweeping) {
+ active_system_pages_after_sweeping->Add(
+ free_end - p->address(), free_start - p->address(),
+ MemoryAllocator::GetCommitPageSizeBits());
+ }
+
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
p->object_start_bitmap()->SetBit(object.address());
#endif
@@ -412,6 +417,13 @@ int Sweeper::RawSweep(
CleanupInvalidTypedSlotsOfFreeRanges(p, free_ranges_map);
ClearMarkBitsAndHandleLivenessStatistics(p, live_bytes, free_list_mode);
+ if (active_system_pages_after_sweeping) {
+ // Decrement accounted memory for discarded memory.
+ PagedSpace* paged_space = static_cast<PagedSpace*>(p->owner());
+ paged_space->ReduceActiveSystemPages(p,
+ *active_system_pages_after_sweeping);
+ }
+
if (code_object_registry) code_object_registry->Finalize();
p->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
@@ -441,6 +453,8 @@ bool Sweeper::ConcurrentSweepSpace(AllocationSpace identity,
}
bool Sweeper::IncrementalSweepSpace(AllocationSpace identity) {
+ TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING,
+ ThreadKind::kMain);
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index f6a362d596..9ac9172b51 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -5,7 +5,6 @@
#ifndef V8_HEAP_SWEEPER_H_
#define V8_HEAP_SWEEPER_H_
-#include <deque>
#include <map>
#include <vector>
@@ -32,11 +31,11 @@ class Sweeper {
using SweptList = std::vector<Page*>;
using FreeRangesMap = std::map<uint32_t, uint32_t>;
- // Pauses the sweeper tasks or completes sweeping.
- class V8_NODISCARD PauseOrCompleteScope final {
+ // Pauses the sweeper tasks.
+ class V8_NODISCARD PauseScope final {
public:
- explicit PauseOrCompleteScope(Sweeper* sweeper);
- ~PauseOrCompleteScope();
+ explicit PauseScope(Sweeper* sweeper);
+ ~PauseScope();
private:
Sweeper* const sweeper_;
@@ -48,8 +47,7 @@ class Sweeper {
// after exiting this scope.
class V8_NODISCARD FilterSweepingPagesScope final {
public:
- FilterSweepingPagesScope(
- Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope);
+ FilterSweepingPagesScope(Sweeper* sweeper, const PauseScope& pause_scope);
~FilterSweepingPagesScope();
template <typename Callback>
@@ -70,7 +68,6 @@ class Sweeper {
private:
Sweeper* const sweeper_;
SweepingList old_space_sweeping_list_;
- const PauseOrCompleteScope& pause_or_complete_scope_;
bool sweeping_in_progress_;
};
diff --git a/deps/v8/src/heap/third-party/heap-api-stub.cc b/deps/v8/src/heap/third-party/heap-api-stub.cc
index 3ca562fe58..9690a8ca27 100644
--- a/deps/v8/src/heap/third-party/heap-api-stub.cc
+++ b/deps/v8/src/heap/third-party/heap-api-stub.cc
@@ -51,13 +51,25 @@ bool Heap::InOldSpace(Address) { return false; }
bool Heap::InReadOnlySpace(Address) { return false; }
// static
+bool Heap::InLargeObjectSpace(Address address) { return false; }
+
+// static
bool Heap::IsValidHeapObject(HeapObject) { return false; }
// static
+bool Heap::IsImmovable(HeapObject) { return false; }
+
+// static
bool Heap::IsValidCodeObject(HeapObject) { return false; }
+void Heap::ResetIterator() {}
+
+HeapObject Heap::NextObject() { return HeapObject(); }
+
bool Heap::CollectGarbage() { return false; }
+size_t Heap::Capacity() { return 0; }
+
} // namespace third_party_heap
} // namespace internal
} // namespace v8