summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/heap
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/heap')
-rw-r--r--chromium/v8/src/heap/allocation-stats.h117
-rw-r--r--chromium/v8/src/heap/base-space.cc33
-rw-r--r--chromium/v8/src/heap/base-space.h81
-rw-r--r--chromium/v8/src/heap/base/asm/arm/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/arm64/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/arm64/push_registers_masm.S (renamed from chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S)0
-rw-r--r--chromium/v8/src/heap/base/asm/ia32/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/ia32/push_registers_masm.S (renamed from chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S)0
-rw-r--r--chromium/v8/src/heap/base/asm/mips/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/mips64/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/ppc/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/s390/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc (renamed from chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc)0
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_masm.S (renamed from chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S)0
-rw-r--r--chromium/v8/src/heap/base/stack.cc (renamed from chromium/v8/src/heap/cppgc/stack.cc)10
-rw-r--r--chromium/v8/src/heap/base/stack.h (renamed from chromium/v8/src/heap/cppgc/stack.h)14
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.cc38
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h231
-rw-r--r--chromium/v8/src/heap/code-object-registry.cc75
-rw-r--r--chromium/v8/src/heap/code-object-registry.h38
-rw-r--r--chromium/v8/src/heap/code-stats.cc2
-rw-r--r--chromium/v8/src/heap/combined-heap.cc3
-rw-r--r--chromium/v8/src/heap/combined-heap.h2
-rw-r--r--chromium/v8/src/heap/concurrent-allocator-inl.h18
-rw-r--r--chromium/v8/src/heap/concurrent-allocator.cc78
-rw-r--r--chromium/v8/src/heap/concurrent-allocator.h20
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc6
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.cc141
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.h42
-rw-r--r--chromium/v8/src/heap/cppgc/allocation.cc16
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap-local-data.cc36
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.cc85
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.h53
-rw-r--r--chromium/v8/src/heap/cppgc/free-list.cc9
-rw-r--r--chromium/v8/src/heap/cppgc/garbage-collector.h56
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.h3
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/gc-invoker.cc105
-rw-r--r--chromium/v8/src/heap/cppgc/gc-invoker.h47
-rw-r--r--chromium/v8/src/heap/cppgc/globals.h7
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.cc88
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.h151
-rw-r--r--chromium/v8/src/heap/cppgc/heap-growing.cc99
-rw-r--r--chromium/v8/src/heap/cppgc/heap-growing.h53
-rw-r--r--chromium/v8/src/heap/cppgc/heap-inl.h33
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header-inl.h5
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.h3
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page-inl.h30
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.cc141
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.h56
-rw-r--r--chromium/v8/src/heap/cppgc/heap-space.cc20
-rw-r--r--chromium/v8/src/heap/cppgc/heap-space.h5
-rw-r--r--chromium/v8/src/heap/cppgc/heap.cc110
-rw-r--r--chromium/v8/src/heap/cppgc/heap.h134
-rw-r--r--chromium/v8/src/heap/cppgc/marker.cc140
-rw-r--r--chromium/v8/src/heap/cppgc/marker.h90
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.cc67
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.h22
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator-inl.h4
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.cc145
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.h41
-rw-r--r--chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h1
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory-inl.h10
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory.h16
-rw-r--r--chromium/v8/src/heap/cppgc/persistent-node.cc12
-rw-r--r--chromium/v8/src/heap/cppgc/platform.cc10
-rw-r--r--chromium/v8/src/heap/cppgc/pointer-policies.cc4
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.cc19
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.h2
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap.cc13
-rw-r--r--chromium/v8/src/heap/cppgc/raw-heap.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/raw-heap.h10
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.cc114
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.h130
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.cc479
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.h7
-rw-r--r--chromium/v8/src/heap/cppgc/task-handle.h47
-rw-r--r--chromium/v8/src/heap/cppgc/virtual-memory.cc56
-rw-r--r--chromium/v8/src/heap/cppgc/virtual-memory.h60
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.cc76
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.h34
-rw-r--r--chromium/v8/src/heap/cppgc/write-barrier.cc84
-rw-r--r--chromium/v8/src/heap/factory-base.cc2
-rw-r--r--chromium/v8/src/heap/factory.cc81
-rw-r--r--chromium/v8/src/heap/factory.h13
-rw-r--r--chromium/v8/src/heap/finalization-registry-cleanup-task.h3
-rw-r--r--chromium/v8/src/heap/free-list-inl.h36
-rw-r--r--chromium/v8/src/heap/free-list.cc596
-rw-r--r--chromium/v8/src/heap/free-list.h520
-rw-r--r--chromium/v8/src/heap/heap-inl.h30
-rw-r--r--chromium/v8/src/heap/heap.cc274
-rw-r--r--chromium/v8/src/heap/heap.h54
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc74
-rw-r--r--chromium/v8/src/heap/incremental-marking.h13
-rw-r--r--chromium/v8/src/heap/large-spaces.cc13
-rw-r--r--chromium/v8/src/heap/list.h12
-rw-r--r--chromium/v8/src/heap/local-allocator.h2
-rw-r--r--chromium/v8/src/heap/local-heap.cc8
-rw-r--r--chromium/v8/src/heap/local-heap.h27
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h6
-rw-r--r--chromium/v8/src/heap/mark-compact.cc71
-rw-r--r--chromium/v8/src/heap/mark-compact.h45
-rw-r--r--chromium/v8/src/heap/marking-visitor.h11
-rw-r--r--chromium/v8/src/heap/memory-allocator.cc778
-rw-r--r--chromium/v8/src/heap/memory-allocator.h451
-rw-r--r--chromium/v8/src/heap/memory-chunk.cc307
-rw-r--r--chromium/v8/src/heap/memory-chunk.h197
-rw-r--r--chromium/v8/src/heap/memory-measurement.cc16
-rw-r--r--chromium/v8/src/heap/memory-measurement.h3
-rw-r--r--chromium/v8/src/heap/new-spaces-inl.h179
-rw-r--r--chromium/v8/src/heap/new-spaces.cc653
-rw-r--r--chromium/v8/src/heap/new-spaces.h501
-rw-r--r--chromium/v8/src/heap/object-stats.cc3
-rw-r--r--chromium/v8/src/heap/off-thread-heap.cc191
-rw-r--r--chromium/v8/src/heap/off-thread-heap.h25
-rw-r--r--chromium/v8/src/heap/paged-spaces-inl.h208
-rw-r--r--chromium/v8/src/heap/paged-spaces.cc1047
-rw-r--r--chromium/v8/src/heap/paged-spaces.h588
-rw-r--r--chromium/v8/src/heap/read-only-heap.cc30
-rw-r--r--chromium/v8/src/heap/read-only-heap.h5
-rw-r--r--chromium/v8/src/heap/read-only-spaces.cc437
-rw-r--r--chromium/v8/src/heap/read-only-spaces.h90
-rw-r--r--chromium/v8/src/heap/remembered-set-inl.h446
-rw-r--r--chromium/v8/src/heap/remembered-set.h406
-rw-r--r--chromium/v8/src/heap/safepoint.cc26
-rw-r--r--chromium/v8/src/heap/safepoint.h8
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h4
-rw-r--r--chromium/v8/src/heap/scavenger.cc3
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc20
-rw-r--r--chromium/v8/src/heap/spaces-inl.h405
-rw-r--r--chromium/v8/src/heap/spaces.cc3441
-rw-r--r--chromium/v8/src/heap/spaces.h2276
-rw-r--r--chromium/v8/src/heap/sweeper.cc26
-rw-r--r--chromium/v8/src/heap/sweeper.h5
135 files changed, 11091 insertions, 7574 deletions
diff --git a/chromium/v8/src/heap/allocation-stats.h b/chromium/v8/src/heap/allocation-stats.h
new file mode 100644
index 00000000000..b05158f91b4
--- /dev/null
+++ b/chromium/v8/src/heap/allocation-stats.h
@@ -0,0 +1,117 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ALLOCATION_STATS_H_
+#define V8_HEAP_ALLOCATION_STATS_H_
+
+#include <atomic>
+#include <unordered_map>
+
+#include "src/base/macros.h"
+#include "src/heap/basic-memory-chunk.h"
+
+namespace v8 {
+namespace internal {
+
+// An abstraction of the accounting statistics of a page-structured space.
+//
+// The stats are only set by functions that ensure they stay balanced. These
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
+class AllocationStats {
+ public:
+ AllocationStats() { Clear(); }
+
+ AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
+ capacity_ = stats.capacity_.load();
+ max_capacity_ = stats.max_capacity_;
+ size_.store(stats.size_);
+#ifdef DEBUG
+ allocated_on_page_ = stats.allocated_on_page_;
+#endif
+ return *this;
+ }
+
+ // Zero out all the allocation statistics (i.e., no capacity).
+ void Clear() {
+ capacity_ = 0;
+ max_capacity_ = 0;
+ ClearSize();
+ }
+
+ void ClearSize() {
+ size_ = 0;
+#ifdef DEBUG
+ allocated_on_page_.clear();
+#endif
+ }
+
+ // Accessors for the allocation statistics.
+ size_t Capacity() { return capacity_; }
+ size_t MaxCapacity() { return max_capacity_; }
+ size_t Size() { return size_; }
+#ifdef DEBUG
+ size_t AllocatedOnPage(BasicMemoryChunk* page) {
+ return allocated_on_page_[page];
+ }
+#endif
+
+ void IncreaseAllocatedBytes(size_t bytes, BasicMemoryChunk* page) {
+#ifdef DEBUG
+ size_t size = size_;
+ DCHECK_GE(size + bytes, size);
+#endif
+ size_.fetch_add(bytes);
+#ifdef DEBUG
+ allocated_on_page_[page] += bytes;
+#endif
+ }
+
+ void DecreaseAllocatedBytes(size_t bytes, BasicMemoryChunk* page) {
+ DCHECK_GE(size_, bytes);
+ size_.fetch_sub(bytes);
+#ifdef DEBUG
+ DCHECK_GE(allocated_on_page_[page], bytes);
+ allocated_on_page_[page] -= bytes;
+#endif
+ }
+
+ void DecreaseCapacity(size_t bytes) {
+ DCHECK_GE(capacity_, bytes);
+ DCHECK_GE(capacity_ - bytes, size_);
+ capacity_ -= bytes;
+ }
+
+ void IncreaseCapacity(size_t bytes) {
+ DCHECK_GE(capacity_ + bytes, capacity_);
+ capacity_ += bytes;
+ if (capacity_ > max_capacity_) {
+ max_capacity_ = capacity_;
+ }
+ }
+
+ private:
+ // |capacity_|: The number of object-area bytes (i.e., not including page
+ // bookkeeping structures) currently in the space.
+ // During evacuation capacity of the main spaces is accessed from multiple
+ // threads to check the old generation hard limit.
+ std::atomic<size_t> capacity_;
+
+ // |max_capacity_|: The maximum capacity ever observed.
+ size_t max_capacity_;
+
+ // |size_|: The number of allocated bytes.
+ std::atomic<size_t> size_;
+
+#ifdef DEBUG
+ std::unordered_map<BasicMemoryChunk*, size_t, BasicMemoryChunk::Hasher>
+ allocated_on_page_;
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_ALLOCATION_STATS_H_
diff --git a/chromium/v8/src/heap/base-space.cc b/chromium/v8/src/heap/base-space.cc
new file mode 100644
index 00000000000..aabbeaebf54
--- /dev/null
+++ b/chromium/v8/src/heap/base-space.cc
@@ -0,0 +1,33 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/base-space.h"
+
+namespace v8 {
+namespace internal {
+
+const char* BaseSpace::GetSpaceName(AllocationSpace space) {
+ switch (space) {
+ case NEW_SPACE:
+ return "new_space";
+ case OLD_SPACE:
+ return "old_space";
+ case MAP_SPACE:
+ return "map_space";
+ case CODE_SPACE:
+ return "code_space";
+ case LO_SPACE:
+ return "large_object_space";
+ case NEW_LO_SPACE:
+ return "new_large_object_space";
+ case CODE_LO_SPACE:
+ return "code_large_object_space";
+ case RO_SPACE:
+ return "read_only_space";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/base-space.h b/chromium/v8/src/heap/base-space.h
new file mode 100644
index 00000000000..4b121e470cd
--- /dev/null
+++ b/chromium/v8/src/heap/base-space.h
@@ -0,0 +1,81 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASE_SPACE_H_
+#define V8_HEAP_BASE_SPACE_H_
+
+#include <atomic>
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/logging/log.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+// ----------------------------------------------------------------------------
+// BaseSpace is the abstract superclass for all allocation spaces.
+class V8_EXPORT_PRIVATE BaseSpace : public Malloced {
+ public:
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ AllocationSpace identity() { return id_; }
+
+ // Returns name of the space.
+ static const char* GetSpaceName(AllocationSpace space);
+
+ const char* name() { return GetSpaceName(id_); }
+
+ void AccountCommitted(size_t bytes) {
+ DCHECK_GE(committed_ + bytes, committed_);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(size_t bytes) {
+ DCHECK_GE(committed_, committed_ - bytes);
+ committed_ -= bytes;
+ }
+
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual size_t CommittedMemory() { return committed_; }
+
+ virtual size_t MaximumCommittedMemory() { return max_committed_; }
+
+ // Approximate amount of physical memory committed for this space.
+ virtual size_t CommittedPhysicalMemory() = 0;
+
+ // Returns allocated size.
+ virtual size_t Size() = 0;
+
+ protected:
+ BaseSpace(Heap* heap, AllocationSpace id)
+ : heap_(heap), id_(id), committed_(0), max_committed_(0) {}
+
+ virtual ~BaseSpace() = default;
+
+ protected:
+ Heap* heap_;
+ AllocationSpace id_;
+
+ // Keeps track of committed memory in a space.
+ std::atomic<size_t> committed_;
+ size_t max_committed_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaseSpace);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BASE_SPACE_H_
diff --git a/chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/arm/push_registers_asm.cc
index 5246c3f6c3e..5246c3f6c3e 100644
--- a/chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/arm/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/arm64/push_registers_asm.cc
index 30d4de1f308..30d4de1f308 100644
--- a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/arm64/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S b/chromium/v8/src/heap/base/asm/arm64/push_registers_masm.S
index 9773654ffcf..9773654ffcf 100644
--- a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/arm64/push_registers_masm.S
diff --git a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/ia32/push_registers_asm.cc
index ed9c14a50e9..ed9c14a50e9 100644
--- a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/ia32/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S b/chromium/v8/src/heap/base/asm/ia32/push_registers_masm.S
index a35fd6e527d..a35fd6e527d 100644
--- a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/ia32/push_registers_masm.S
diff --git a/chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/mips/push_registers_asm.cc
index 4a46caa6c52..4a46caa6c52 100644
--- a/chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/mips/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/mips64/push_registers_asm.cc
index 6befa3bcc0c..6befa3bcc0c 100644
--- a/chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/mips64/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/ppc/push_registers_asm.cc
index 6936819ba2b..6936819ba2b 100644
--- a/chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/ppc/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/s390/push_registers_asm.cc
index 6b9b2c08536..6b9b2c08536 100644
--- a/chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/s390/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
index 68f7918c93c..68f7918c93c 100644
--- a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
diff --git a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
index 627843830fa..627843830fa 100644
--- a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
diff --git a/chromium/v8/src/heap/cppgc/stack.cc b/chromium/v8/src/heap/base/stack.cc
index b99693708c6..cd284444747 100644
--- a/chromium/v8/src/heap/cppgc/stack.cc
+++ b/chromium/v8/src/heap/base/stack.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/heap/cppgc/stack.h"
+#include "src/heap/base/stack.h"
#include <limits>
@@ -10,8 +10,8 @@
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/sanitizers.h"
-namespace cppgc {
-namespace internal {
+namespace heap {
+namespace base {
using IterateStackCallback = void (*)(const Stack*, StackVisitor*, intptr_t*);
extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
@@ -125,5 +125,5 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
IterateSafeStackIfNecessary(visitor);
}
-} // namespace internal
-} // namespace cppgc
+} // namespace base
+} // namespace heap
diff --git a/chromium/v8/src/heap/cppgc/stack.h b/chromium/v8/src/heap/base/stack.h
index 3f561aed08e..a46e6e660ed 100644
--- a/chromium/v8/src/heap/cppgc/stack.h
+++ b/chromium/v8/src/heap/base/stack.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_CPPGC_STACK_H_
-#define V8_HEAP_CPPGC_STACK_H_
+#ifndef V8_HEAP_BASE_STACK_H_
+#define V8_HEAP_BASE_STACK_H_
#include "src/base/macros.h"
-namespace cppgc {
-namespace internal {
+namespace heap {
+namespace base {
class StackVisitor {
public:
@@ -37,7 +37,7 @@ class V8_EXPORT_PRIVATE Stack final {
const void* stack_start_;
};
-} // namespace internal
-} // namespace cppgc
+} // namespace base
+} // namespace heap
-#endif // V8_HEAP_CPPGC_STACK_H_
+#endif // V8_HEAP_BASE_STACK_H_
diff --git a/chromium/v8/src/heap/basic-memory-chunk.cc b/chromium/v8/src/heap/basic-memory-chunk.cc
index fa94f60f4ec..50eb8392915 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.cc
+++ b/chromium/v8/src/heap/basic-memory-chunk.cc
@@ -7,8 +7,8 @@
#include <cstdlib>
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/incremental-marking.h"
#include "src/objects/heap-object.h"
-#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
@@ -39,5 +39,41 @@ void BasicMemoryChunk::ReleaseMarkingBitmap() {
marking_bitmap_ = nullptr;
}
+// static
+BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
+ size_t size, Address area_start,
+ Address area_end,
+ BaseSpace* owner,
+ VirtualMemory reservation) {
+ BasicMemoryChunk* chunk = FromAddress(base);
+ DCHECK_EQ(base, chunk->address());
+ new (chunk) BasicMemoryChunk(size, area_start, area_end);
+
+ chunk->heap_ = heap;
+ chunk->set_owner(owner);
+ chunk->reservation_ = std::move(reservation);
+ chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
+ chunk->allocated_bytes_ = chunk->area_size();
+ chunk->wasted_memory_ = 0;
+
+ return chunk;
+}
+
+bool BasicMemoryChunk::InOldSpace() const {
+ return owner()->identity() == OLD_SPACE;
+}
+
+bool BasicMemoryChunk::InLargeObjectSpace() const {
+ return owner()->identity() == LO_SPACE;
+}
+
+#ifdef THREAD_SANITIZER
+void BasicMemoryChunk::SynchronizedHeapLoad() {
+ CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
+ reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
+ InReadOnlySpace());
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
index 205d02ce247..8d8fff39fbe 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.h
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -6,25 +6,29 @@
#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
#include <type_traits>
+#include <unordered_map>
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
#include "src/heap/marking.h"
-#include "src/heap/slot-set.h"
+#include "src/objects/heap-object.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
-class MemoryChunk;
-
-enum RememberedSetType {
- OLD_TO_NEW,
- OLD_TO_OLD,
- NUMBER_OF_REMEMBERED_SET_TYPES
-};
+class BaseSpace;
class BasicMemoryChunk {
public:
+ // Use with std data structures.
+ struct Hasher {
+ size_t operator()(BasicMemoryChunk* const chunk) const {
+ return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
+ }
+ };
+
enum Flag {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
@@ -109,11 +113,30 @@ class BasicMemoryChunk {
Address address() const { return reinterpret_cast<Address>(this); }
+ // Returns the offset of a given address to this page.
+ inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
+
+ // Returns the address for a given offset to the this page.
+ Address OffsetToAddress(size_t offset) {
+ Address address_in_page = address() + offset;
+ DCHECK_GE(address_in_page, area_start());
+ DCHECK_LT(address_in_page, area_end());
+ return address_in_page;
+ }
+
+ // Some callers rely on the fact that this can operate on both
+ // tagged and aligned object addresses.
+ inline uint32_t AddressToMarkbitIndex(Address addr) const {
+ return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
+ }
+
+ inline Address MarkbitIndexToAddress(uint32_t index) const {
+ return this->address() + (index << kTaggedSizeLog2);
+ }
+
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
- size_t buckets() const { return SlotSet::BucketsForSize(size()); }
-
Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; }
@@ -123,6 +146,16 @@ class BasicMemoryChunk {
return static_cast<size_t>(area_end() - area_start());
}
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ // Gets the chunk's owner or null if the space has been detached.
+ BaseSpace* owner() const { return owner_; }
+
+ void set_owner(BaseSpace* space) { owner_ = space; }
+
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void SetFlag(Flag flag) {
if (access_mode == AccessMode::NON_ATOMIC) {
@@ -155,9 +188,69 @@ class BasicMemoryChunk {
}
}
+ using Flags = uintptr_t;
+
+ static const Flags kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const Flags kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
+
+ static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
+
+ static const Flags kIsLargePageMask = LARGE_PAGE;
+
+ static const Flags kSkipEvacuationSlotsRecordingMask =
+ kEvacuationCandidateMask | kIsInYoungGenerationMask;
+
bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
- // TODO(v8:7464): Add methods for down casting to MemoryChunk.
+ bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
+
+ void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
+
+ bool CanAllocate() {
+ return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsEvacuationCandidate() {
+ DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
+ IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
+ return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool ShouldSkipEvacuationSlotRecording() {
+ uintptr_t flags = GetFlags<access_mode>();
+ return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
+ ((flags & COMPACTION_WAS_ABORTED) == 0);
+ }
+
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ }
+
+ bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
+ bool IsToPage() const { return IsFlagSet(TO_PAGE); }
+ bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
+ bool InYoungGeneration() const {
+ return (GetFlags() & kIsInYoungGenerationMask) != 0;
+ }
+ bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
+ bool InNewLargeObjectSpace() const {
+ return InYoungGeneration() && IsLargePage();
+ }
+ bool InOldSpace() const;
+ V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+
+ bool IsWritable() const {
+ // If this is a read-only space chunk but heap_ is non-null, it has not yet
+ // been sealed and can be written to.
+ return !InReadOnlySpace() || heap_ != nullptr;
+ }
bool Contains(Address addr) const {
return addr >= area_start() && addr < area_end();
@@ -171,23 +264,92 @@ class BasicMemoryChunk {
void ReleaseMarkingBitmap();
+ static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ BaseSpace* owner,
+ VirtualMemory reservation);
+
+ size_t wasted_memory() { return wasted_memory_; }
+ void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
+ size_t allocated_bytes() { return allocated_bytes_; }
+
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kAreaStartOffset = kHeapOffset + kSystemPointerSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
- static const intptr_t kOldToNewSlotSetOffset =
- kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize =
- kSizeOffset + kSizetSize // size_t size
- + kUIntptrSize // uintptr_t flags_
- + kSystemPointerSize // Bitmap* marking_bitmap_
- + kSystemPointerSize // Heap* heap_
- + kSystemPointerSize // Address area_start_
- + kSystemPointerSize // Address area_end_
- + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
+ kSizeOffset + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize // Address area_end_
+ + kSizetSize // size_t allocated_bytes_
+ + kSizetSize // size_t wasted_memory_
+ + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ + kSystemPointerSize // Address owner_
+ + 3 * kSystemPointerSize; // VirtualMemory reservation_
+
+ // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+ static BasicMemoryChunk* FromAddress(Address a) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
+ }
+
+ // Only works if the object is in the first kPageSize of the MemoryChunk.
+ static BasicMemoryChunk* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
+ return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
+ }
+
+ template <AccessMode mode>
+ ConcurrentBitmap<mode>* marking_bitmap() const {
+ return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
+ }
+
+ Address HighWaterMark() { return address() + high_water_mark_; }
+
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == kNullAddress) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationAreaAddress.
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromAddress(mark - 1);
+ intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
+ intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
+ while ((new_mark > old_mark) &&
+ !chunk->high_water_mark_.compare_exchange_weak(
+ old_mark, new_mark, std::memory_order_acq_rel)) {
+ }
+ }
+
+ VirtualMemory* reserved_memory() { return &reservation_; }
+
+ void ResetAllocationStatistics() {
+ allocated_bytes_ = area_size();
+ wasted_memory_ = 0;
+ }
+
+ void IncreaseAllocatedBytes(size_t bytes) {
+ DCHECK_LE(bytes, area_size());
+ allocated_bytes_ += bytes;
+ }
+
+ void DecreaseAllocatedBytes(size_t bytes) {
+ DCHECK_LE(bytes, area_size());
+ DCHECK_GE(allocated_bytes(), bytes);
+ allocated_bytes_ -= bytes;
+ }
+
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race in
+ // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
+ // release store.
+ void SynchronizedHeapLoad();
+#endif
protected:
// Overall size of the chunk, including the header and guards.
@@ -207,12 +369,31 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
- // A single slot set for small pages (of size kPageSize) or an array of slot
- // set for large pages. In the latter case the number of entries in the array
- // is ceil(size() / kPageSize).
- SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ // Byte allocated on the page, which includes all objects on the page and the
+ // linear allocation area.
+ size_t allocated_bytes_;
+ // Freed memory that was not added to the free list.
+ size_t wasted_memory_;
+
+ // Assuming the initial allocation on a page is sequential, count highest
+ // number of bytes ever allocated on the page.
+ std::atomic<intptr_t> high_water_mark_;
+
+ // The space owning this memory chunk.
+ std::atomic<BaseSpace*> owner_;
+
+ // If the chunk needs to remember its memory reservation, it is stored here.
+ VirtualMemory reservation_;
friend class BasicMemoryChunkValidator;
+ friend class ConcurrentMarkingState;
+ friend class MajorMarkingState;
+ friend class MajorAtomicMarkingState;
+ friend class MajorNonAtomicMarkingState;
+ friend class MemoryAllocator;
+ friend class MinorMarkingState;
+ friend class MinorNonAtomicMarkingState;
+ friend class PagedSpace;
};
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
@@ -227,8 +408,6 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, marking_bitmap_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
- STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
- offsetof(BasicMemoryChunk, slot_set_));
};
} // namespace internal
diff --git a/chromium/v8/src/heap/code-object-registry.cc b/chromium/v8/src/heap/code-object-registry.cc
new file mode 100644
index 00000000000..ebaa29fbaeb
--- /dev/null
+++ b/chromium/v8/src/heap/code-object-registry.cc
@@ -0,0 +1,75 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/code-object-registry.h"
+
+#include <algorithm>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
+ auto result = code_object_registry_newly_allocated_.insert(code);
+ USE(result);
+ DCHECK(result.second);
+}
+
+void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
+ code_object_registry_already_existing_.push_back(code);
+}
+
+void CodeObjectRegistry::Clear() {
+ code_object_registry_already_existing_.clear();
+ code_object_registry_newly_allocated_.clear();
+}
+
+void CodeObjectRegistry::Finalize() {
+ code_object_registry_already_existing_.shrink_to_fit();
+}
+
+bool CodeObjectRegistry::Contains(Address object) const {
+ return (code_object_registry_newly_allocated_.find(object) !=
+ code_object_registry_newly_allocated_.end()) ||
+ (std::binary_search(code_object_registry_already_existing_.begin(),
+ code_object_registry_already_existing_.end(),
+ object));
+}
+
+Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
+ Address address) const {
+ // Let's first find the object which comes right before address in the vector
+ // of already existing code objects.
+ Address already_existing_set_ = 0;
+ Address newly_allocated_set_ = 0;
+ if (!code_object_registry_already_existing_.empty()) {
+ auto it =
+ std::upper_bound(code_object_registry_already_existing_.begin(),
+ code_object_registry_already_existing_.end(), address);
+ if (it != code_object_registry_already_existing_.begin()) {
+ already_existing_set_ = *(--it);
+ }
+ }
+
+ // Next, let's find the object which comes right before address in the set
+ // of newly allocated code objects.
+ if (!code_object_registry_newly_allocated_.empty()) {
+ auto it = code_object_registry_newly_allocated_.upper_bound(address);
+ if (it != code_object_registry_newly_allocated_.begin()) {
+ newly_allocated_set_ = *(--it);
+ }
+ }
+
+ // The code objects which contains address has to be in one of the two
+ // data structures.
+ DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
+
+ // The address which is closest to the given address is the code object.
+ return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
+ : newly_allocated_set_;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/code-object-registry.h b/chromium/v8/src/heap/code-object-registry.h
new file mode 100644
index 00000000000..beab1766256
--- /dev/null
+++ b/chromium/v8/src/heap/code-object-registry.h
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CODE_OBJECT_REGISTRY_H_
+#define V8_HEAP_CODE_OBJECT_REGISTRY_H_
+
+#include <set>
+#include <vector>
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// The CodeObjectRegistry holds all start addresses of code objects of a given
+// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
+// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
+// to the actual code object.
+class V8_EXPORT_PRIVATE CodeObjectRegistry {
+ public:
+ void RegisterNewlyAllocatedCodeObject(Address code);
+ void RegisterAlreadyExistingCodeObject(Address code);
+ void Clear();
+ void Finalize();
+ bool Contains(Address code) const;
+ Address GetCodeObjectStartFromInnerAddress(Address address) const;
+
+ private:
+ std::vector<Address> code_object_registry_already_existing_;
+ std::set<Address> code_object_registry_newly_allocated_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CODE_OBJECT_REGISTRY_H_
diff --git a/chromium/v8/src/heap/code-stats.cc b/chromium/v8/src/heap/code-stats.cc
index 27b1315c6b5..6e685c47b38 100644
--- a/chromium/v8/src/heap/code-stats.cc
+++ b/chromium/v8/src/heap/code-stats.cc
@@ -7,7 +7,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
#include "src/heap/large-spaces.h"
-#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
+#include "src/heap/paged-spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
namespace v8 {
diff --git a/chromium/v8/src/heap/combined-heap.cc b/chromium/v8/src/heap/combined-heap.cc
index 0416bb62a42..3079e600f22 100644
--- a/chromium/v8/src/heap/combined-heap.cc
+++ b/chromium/v8/src/heap/combined-heap.cc
@@ -10,7 +10,8 @@ namespace internal {
CombinedHeapObjectIterator::CombinedHeapObjectIterator(
Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
- : heap_iterator_(heap, filtering),
+ : safepoint_scope_(heap),
+ heap_iterator_(heap, filtering),
ro_heap_iterator_(heap->isolate()->read_only_heap()) {}
HeapObject CombinedHeapObjectIterator::Next() {
diff --git a/chromium/v8/src/heap/combined-heap.h b/chromium/v8/src/heap/combined-heap.h
index d7e58dfb87c..55664114d39 100644
--- a/chromium/v8/src/heap/combined-heap.h
+++ b/chromium/v8/src/heap/combined-heap.h
@@ -7,6 +7,7 @@
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
+#include "src/heap/safepoint.h"
#include "src/heap/third-party/heap-api.h"
#include "src/objects/objects.h"
@@ -25,6 +26,7 @@ class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
HeapObject Next();
private:
+ SafepointScope safepoint_scope_;
HeapObjectIterator heap_iterator_;
ReadOnlyHeapObjectIterator ro_heap_iterator_;
};
diff --git a/chromium/v8/src/heap/concurrent-allocator-inl.h b/chromium/v8/src/heap/concurrent-allocator-inl.h
index 65f1be313f8..15a4ef5f89c 100644
--- a/chromium/v8/src/heap/concurrent-allocator-inl.h
+++ b/chromium/v8/src/heap/concurrent-allocator-inl.h
@@ -8,8 +8,8 @@
#include "include/v8-internal.h"
#include "src/common/globals.h"
#include "src/heap/concurrent-allocator.h"
-
#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
@@ -23,15 +23,7 @@ AllocationResult ConcurrentAllocator::Allocate(int object_size,
// TODO(dinfuehr): Add support for allocation observers
CHECK(FLAG_concurrent_allocation);
if (object_size > kMaxLabObjectSize) {
- auto result = space_->SlowGetLinearAllocationAreaBackground(
- local_heap_, object_size, object_size, alignment, origin);
-
- if (result) {
- HeapObject object = HeapObject::FromAddress(result->first);
- return AllocationResult(object);
- } else {
- return AllocationResult::Retry(OLD_SPACE);
- }
+ return AllocateOutsideLab(object_size, alignment, origin);
}
return AllocateInLab(object_size, alignment, origin);
@@ -69,6 +61,12 @@ bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
if (!result) return false;
+ if (local_heap_->heap()->incremental_marking()->black_allocation()) {
+ Address top = result->first;
+ Address limit = top + result->second;
+ Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
+ }
+
HeapObject object = HeapObject::FromAddress(result->first);
LocalAllocationBuffer saved_lab = std::move(lab_);
lab_ = LocalAllocationBuffer::FromResult(
diff --git a/chromium/v8/src/heap/concurrent-allocator.cc b/chromium/v8/src/heap/concurrent-allocator.cc
index 7fd29110215..9625bdb13aa 100644
--- a/chromium/v8/src/heap/concurrent-allocator.cc
+++ b/chromium/v8/src/heap/concurrent-allocator.cc
@@ -4,12 +4,52 @@
#include "src/heap/concurrent-allocator.h"
+#include "src/execution/isolate.h"
+#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
#include "src/heap/local-heap.h"
+#include "src/heap/marking.h"
namespace v8 {
namespace internal {
+void StressConcurrentAllocatorTask::RunInternal() {
+ Heap* heap = isolate_->heap();
+ LocalHeap local_heap(heap);
+ ConcurrentAllocator* allocator = local_heap.old_space_allocator();
+
+ const int kNumIterations = 2000;
+ const int kObjectSize = 10 * kTaggedSize;
+ const int kLargeObjectSize = 8 * KB;
+
+ for (int i = 0; i < kNumIterations; i++) {
+ Address address = allocator->AllocateOrFail(
+ kObjectSize, AllocationAlignment::kWordAligned,
+ AllocationOrigin::kRuntime);
+ heap->CreateFillerObjectAtBackground(
+ address, kObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
+ address = allocator->AllocateOrFail(kLargeObjectSize,
+ AllocationAlignment::kWordAligned,
+ AllocationOrigin::kRuntime);
+ heap->CreateFillerObjectAtBackground(
+ address, kLargeObjectSize, ClearFreedMemoryMode::kDontClearFreedMemory);
+ if (i % 10 == 0) {
+ local_heap.Safepoint();
+ }
+ }
+
+ Schedule(isolate_);
+}
+
+// static
+void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
+ CHECK(FLAG_local_heaps && FLAG_concurrent_allocation);
+ auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate);
+ const double kDelayInSeconds = 0.1;
+ V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task),
+ kDelayInSeconds);
+}
+
Address ConcurrentAllocator::PerformCollectionAndAllocateAgain(
int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
Heap* heap = local_heap_->heap();
@@ -39,5 +79,43 @@ void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
lab_.MakeIterable();
}
+void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
+ Address top = lab_.top();
+ Address limit = lab_.limit();
+
+ if (top != kNullAddress && top != limit) {
+ Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
+ }
+}
+
+void ConcurrentAllocator::UnmarkLinearAllocationArea() {
+ Address top = lab_.top();
+ Address limit = lab_.limit();
+
+ if (top != kNullAddress && top != limit) {
+ Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
+ limit);
+ }
+}
+
+AllocationResult ConcurrentAllocator::AllocateOutsideLab(
+ int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
+ auto result = space_->SlowGetLinearAllocationAreaBackground(
+ local_heap_, object_size, object_size, alignment, origin);
+
+ if (result) {
+ HeapObject object = HeapObject::FromAddress(result->first);
+
+ if (local_heap_->heap()->incremental_marking()->black_allocation()) {
+ local_heap_->heap()->incremental_marking()->MarkBlackBackground(
+ object, object_size);
+ }
+
+ return AllocationResult(object);
+ } else {
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/concurrent-allocator.h b/chromium/v8/src/heap/concurrent-allocator.h
index f165d009620..795e37d339c 100644
--- a/chromium/v8/src/heap/concurrent-allocator.h
+++ b/chromium/v8/src/heap/concurrent-allocator.h
@@ -8,12 +8,27 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
class LocalHeap;
+class StressConcurrentAllocatorTask : public CancelableTask {
+ public:
+ explicit StressConcurrentAllocatorTask(Isolate* isolate)
+ : CancelableTask(isolate), isolate_(isolate) {}
+
+ void RunInternal() override;
+
+ // Schedules task on background thread
+ static void Schedule(Isolate* isolate);
+
+ private:
+ Isolate* isolate_;
+};
+
// Concurrent allocator for allocation from background threads/tasks.
// Allocations are served from a TLAB if possible.
class ConcurrentAllocator {
@@ -36,6 +51,8 @@ class ConcurrentAllocator {
void FreeLinearAllocationArea();
void MakeLinearAllocationAreaIterable();
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
private:
inline bool EnsureLab(AllocationOrigin origin);
@@ -43,6 +60,9 @@ class ConcurrentAllocator {
AllocationAlignment alignment,
AllocationOrigin origin);
+ V8_EXPORT_PRIVATE AllocationResult AllocateOutsideLab(
+ int object_size, AllocationAlignment alignment, AllocationOrigin origin);
+
V8_EXPORT_PRIVATE Address PerformCollectionAndAllocateAgain(
int object_size, AllocationAlignment alignment, AllocationOrigin origin);
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index 7b9385b441f..aef84c0637d 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -41,10 +41,10 @@ class ConcurrentMarkingState final
explicit ConcurrentMarkingState(MemoryChunkDataMap* memory_chunk_data)
: memory_chunk_data_(memory_chunk_data) {}
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const BasicMemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
@@ -298,7 +298,7 @@ class ConcurrentMarkingVisitor final
#ifdef THREAD_SANITIZER
// This is needed because TSAN does not process the memory fence
// emitted after page initialization.
- MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
+ BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
}
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
new file mode 100644
index 00000000000..b9723ddb656
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -0,0 +1,141 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc-js/cpp-heap.h"
+
+#include "include/cppgc/platform.h"
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/base/platform/time.h"
+#include "src/flags/flags.h"
+#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/marking-worklist.h"
+#include "src/heap/sweeper.h"
+#include "src/init/v8.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class CppgcPlatformAdapter final : public cppgc::Platform {
+ public:
+ explicit CppgcPlatformAdapter(v8::Isolate* isolate)
+ : platform_(V8::GetCurrentPlatform()), isolate_(isolate) {}
+
+ CppgcPlatformAdapter(const CppgcPlatformAdapter&) = delete;
+ CppgcPlatformAdapter& operator=(const CppgcPlatformAdapter&) = delete;
+
+ PageAllocator* GetPageAllocator() final {
+ return platform_->GetPageAllocator();
+ }
+
+ double MonotonicallyIncreasingTime() final {
+ return platform_->MonotonicallyIncreasingTime();
+ }
+
+ std::shared_ptr<TaskRunner> GetForegroundTaskRunner() final {
+ return platform_->GetForegroundTaskRunner(isolate_);
+ }
+
+ std::unique_ptr<JobHandle> PostJob(TaskPriority priority,
+ std::unique_ptr<JobTask> job_task) final {
+ return platform_->PostJob(priority, std::move(job_task));
+ }
+
+ private:
+ v8::Platform* platform_;
+ v8::Isolate* isolate_;
+};
+
+class UnifiedHeapMarker : public cppgc::internal::Marker {
+ public:
+ explicit UnifiedHeapMarker(cppgc::internal::HeapBase& heap);
+
+ void AddObject(void*);
+
+ // TODO(chromium:1056170): Implement unified heap specific
+ // CreateMutatorThreadMarkingVisitor and AdvanceMarkingWithDeadline.
+};
+
+UnifiedHeapMarker::UnifiedHeapMarker(cppgc::internal::HeapBase& heap)
+ : cppgc::internal::Marker(heap) {}
+
+void UnifiedHeapMarker::AddObject(void* object) {
+ auto& header = cppgc::internal::HeapObjectHeader::FromPayload(object);
+ marking_visitor_->MarkObject(header);
+}
+
+} // namespace
+
+CppHeap::CppHeap(v8::Isolate* isolate, size_t custom_spaces)
+ : cppgc::internal::HeapBase(std::make_shared<CppgcPlatformAdapter>(isolate),
+ custom_spaces) {
+ CHECK(!FLAG_incremental_marking_wrappers);
+}
+
+void CppHeap::RegisterV8References(
+ const std::vector<std::pair<void*, void*> >& embedder_fields) {
+ DCHECK(marker_);
+ for (auto& tuple : embedder_fields) {
+ // First field points to type.
+ // Second field points to object.
+ static_cast<UnifiedHeapMarker*>(marker_.get())->AddObject(tuple.second);
+ }
+ marking_done_ = false;
+}
+
+void CppHeap::TracePrologue(TraceFlags flags) {
+ marker_ = std::make_unique<UnifiedHeapMarker>(AsBase());
+ const UnifiedHeapMarker::MarkingConfig marking_config{
+ UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
+ cppgc::Heap::StackState::kNoHeapPointers,
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic};
+ marker_->StartMarking(marking_config);
+ marking_done_ = false;
+}
+
+bool CppHeap::AdvanceTracing(double deadline_in_ms) {
+ marking_done_ = marker_->AdvanceMarkingWithDeadline(
+ v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms));
+ return marking_done_;
+}
+
+bool CppHeap::IsTracingDone() { return marking_done_; }
+
+void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
+ const UnifiedHeapMarker::MarkingConfig marking_config{
+ UnifiedHeapMarker::MarkingConfig::CollectionType::kMajor,
+ cppgc::Heap::StackState::kNoHeapPointers,
+ UnifiedHeapMarker::MarkingConfig::MarkingType::kAtomic};
+ marker_->EnterAtomicPause(marking_config);
+}
+
+void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
+ CHECK(marking_done_);
+ marker_->LeaveAtomicPause();
+ {
+ // Pre finalizers are forbidden from allocating objects
+ cppgc::internal::ObjectAllocator::NoAllocationScope no_allocation_scope_(
+ object_allocator_);
+ marker()->ProcessWeakness();
+ prefinalizer_handler()->InvokePreFinalizers();
+ }
+ {
+ NoGCScope no_gc(*this);
+ sweeper().Start(cppgc::internal::Sweeper::Config::kAtomic);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.h b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
new file mode 100644
index 00000000000..469bee5e882
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_JS_CPP_HEAP_H_
+#define V8_HEAP_CPPGC_JS_CPP_HEAP_H_
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/heap-base.h"
+
+namespace v8 {
+
+class Isolate;
+
+namespace internal {
+
+// A C++ heap implementation used with V8 to implement unified heap.
+class V8_EXPORT_PRIVATE CppHeap final : public cppgc::internal::HeapBase,
+ public v8::EmbedderHeapTracer {
+ public:
+ CppHeap(v8::Isolate* isolate, size_t custom_spaces);
+
+ HeapBase& AsBase() { return *this; }
+ const HeapBase& AsBase() const { return *this; }
+
+ void RegisterV8References(
+ const std::vector<std::pair<void*, void*> >& embedder_fields) final;
+ void TracePrologue(TraceFlags flags) final;
+ bool AdvanceTracing(double deadline_in_ms) final;
+ bool IsTracingDone() final;
+ void TraceEpilogue(TraceSummary* trace_summary) final;
+ void EnterFinalPause(EmbedderStackState stack_state) final;
+
+ private:
+ bool marking_done_ = false;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CPPGC_JS_CPP_HEAP_H_
diff --git a/chromium/v8/src/heap/cppgc/allocation.cc b/chromium/v8/src/heap/cppgc/allocation.cc
index 32f917da5ac..04bcea82d03 100644
--- a/chromium/v8/src/heap/cppgc/allocation.cc
+++ b/chromium/v8/src/heap/cppgc/allocation.cc
@@ -6,7 +6,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/heap-inl.h"
+#include "src/heap/cppgc/object-allocator-inl.h"
namespace cppgc {
namespace internal {
@@ -15,19 +15,17 @@ STATIC_ASSERT(api_constants::kLargeObjectSizeThreshold ==
kLargeObjectSizeThreshold);
// static
-void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
- size_t size,
- GCInfoIndex index) {
- DCHECK_NOT_NULL(heap);
- return Heap::From(heap)->Allocate(size, index);
+void* MakeGarbageCollectedTraitInternal::Allocate(
+ cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index) {
+ return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index);
}
// static
void* MakeGarbageCollectedTraitInternal::Allocate(
- cppgc::Heap* heap, size_t size, GCInfoIndex index,
+ cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index,
CustomSpaceIndex space_index) {
- DCHECK_NOT_NULL(heap);
- return Heap::From(heap)->Allocate(size, index, space_index);
+ return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index,
+ space_index);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
new file mode 100644
index 00000000000..55ededdc087
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/caged-heap-local-data.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+#if defined(CPPGC_YOUNG_GENERATION)
+
+static_assert(
+ std::is_trivially_default_constructible<AgeTable>::value,
+ "To support lazy committing, AgeTable must be trivially constructible");
+
+void AgeTable::Reset(PageAllocator* allocator) {
+ // TODO(chromium:1029379): Consider MADV_DONTNEED instead of MADV_FREE on
+ // POSIX platforms.
+ std::fill(table_.begin(), table_.end(), Age::kOld);
+ const uintptr_t begin = RoundUp(reinterpret_cast<uintptr_t>(table_.begin()),
+ allocator->CommitPageSize());
+ const uintptr_t end = RoundDown(reinterpret_cast<uintptr_t>(table_.end()),
+ allocator->CommitPageSize());
+ allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
+}
+
+#endif
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.cc b/chromium/v8/src/heap/cppgc/caged-heap.cc
new file mode 100644
index 00000000000..16cb30aa281
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap.cc
@@ -0,0 +1,85 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !defined(CPPGC_CAGED_HEAP)
+#error "Must be compiled with caged heap enabled"
+#endif
+
+#include "src/heap/cppgc/caged-heap.h"
+
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/logging.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+STATIC_ASSERT(api_constants::kCagedHeapReservationSize ==
+ kCagedHeapReservationSize);
+STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
+ kCagedHeapReservationAlignment);
+
+namespace {
+
+VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
+ DCHECK_NOT_NULL(platform_allocator);
+ DCHECK_EQ(0u,
+ kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
+
+ static constexpr size_t kAllocationTries = 4;
+ for (size_t i = 0; i < kAllocationTries; ++i) {
+ void* hint = reinterpret_cast<void*>(RoundDown(
+ reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
+ kCagedHeapReservationAlignment));
+
+ VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
+ kCagedHeapReservationAlignment, hint);
+ if (memory.IsReserved()) return memory;
+ }
+
+ FATAL("Fatal process out of memory: Failed to reserve memory for caged heap");
+ UNREACHABLE();
+}
+
+std::unique_ptr<CagedHeap::AllocatorType> CreateBoundedAllocator(
+ v8::PageAllocator* platform_allocator, void* caged_heap_start) {
+ DCHECK(caged_heap_start);
+
+ auto start =
+ reinterpret_cast<CagedHeap::AllocatorType::Address>(caged_heap_start);
+
+ return std::make_unique<CagedHeap::AllocatorType>(
+ platform_allocator, start, kCagedHeapReservationSize, kPageSize);
+}
+
+} // namespace
+
+CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
+ : reserved_area_(ReserveCagedHeap(platform_allocator)) {
+ DCHECK_NOT_NULL(heap_base);
+
+ void* caged_heap_start = reserved_area_.address();
+ CHECK(platform_allocator->SetPermissions(
+ reserved_area_.address(),
+ RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
+ PageAllocator::kReadWrite));
+
+ auto* local_data =
+ new (reserved_area_.address()) CagedHeapLocalData(heap_base);
+#if defined(CPPGC_YOUNG_GENERATION)
+ local_data->age_table.Reset(platform_allocator);
+#endif
+ USE(local_data);
+
+ caged_heap_start = reinterpret_cast<void*>(
+ RoundUp(reinterpret_cast<uintptr_t>(caged_heap_start) +
+ sizeof(CagedHeapLocalData),
+ kPageSize));
+ bounded_allocator_ =
+ CreateBoundedAllocator(platform_allocator, caged_heap_start);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.h b/chromium/v8/src/heap/cppgc/caged-heap.h
new file mode 100644
index 00000000000..7ac34624a0a
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
+#define V8_HEAP_CPPGC_CAGED_HEAP_H_
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/virtual-memory.h"
+
+namespace cppgc {
+namespace internal {
+
+struct CagedHeapLocalData;
+class HeapBase;
+
+class CagedHeap final {
+ public:
+ using AllocatorType = v8::base::BoundedPageAllocator;
+
+ CagedHeap(HeapBase* heap, PageAllocator* platform_allocator);
+
+ CagedHeap(const CagedHeap&) = delete;
+ CagedHeap& operator=(const CagedHeap&) = delete;
+
+ AllocatorType& allocator() { return *bounded_allocator_; }
+ const AllocatorType& allocator() const { return *bounded_allocator_; }
+
+ CagedHeapLocalData& local_data() {
+ return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
+ }
+ const CagedHeapLocalData& local_data() const {
+ return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
+ }
+
+ static uintptr_t OffsetFromAddress(void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ (kCagedHeapReservationAlignment - 1);
+ }
+
+ private:
+ VirtualMemory reserved_area_;
+ std::unique_ptr<AllocatorType> bounded_allocator_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_CAGED_HEAP_H_
diff --git a/chromium/v8/src/heap/cppgc/free-list.cc b/chromium/v8/src/heap/cppgc/free-list.cc
index e5e6b70793d..8f649059323 100644
--- a/chromium/v8/src/heap/cppgc/free-list.cc
+++ b/chromium/v8/src/heap/cppgc/free-list.cc
@@ -68,12 +68,17 @@ void FreeList::Add(FreeList::Block block) {
if (block.size < sizeof(Entry)) {
// Create wasted entry. This can happen when an almost emptied linear
// allocation buffer is returned to the freelist.
+ // This could be SET_MEMORY_ACCESSIBLE. Since there's no payload, the next
+ // operating overwrites the memory completely, and we can thus avoid
+ // zeroing it out.
+ ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(HeapObjectHeader));
new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
return;
}
- // Make sure the freelist header is writable.
- SET_MEMORY_ACCESIBLE(block.address, sizeof(Entry));
+ // Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not
+ // needed as we write the whole payload of Entry.
+ ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(Entry));
Entry* entry = new (block.address) Entry(size);
const size_t index = BucketIndexForSize(static_cast<uint32_t>(size));
entry->Link(&free_list_heads_[index]);
diff --git a/chromium/v8/src/heap/cppgc/garbage-collector.h b/chromium/v8/src/heap/cppgc/garbage-collector.h
new file mode 100644
index 00000000000..6c906fd501a
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/garbage-collector.h
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+#define V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/sweeper.h"
+
+namespace cppgc {
+namespace internal {
+
+// GC interface that allows abstraction over the actual GC invocation. This is
+// needed to mock/fake GC for testing.
+class GarbageCollector {
+ public:
+ struct Config {
+ using CollectionType = Marker::MarkingConfig::CollectionType;
+ using StackState = cppgc::Heap::StackState;
+ using MarkingType = Marker::MarkingConfig::MarkingType;
+ using SweepingType = Sweeper::Config;
+
+ static constexpr Config ConservativeAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr Config PreciseAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr Config MinorPreciseAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
+ SweepingType sweeping_type = SweepingType::kAtomic;
+ };
+
+ // Executes a garbage collection specified in config.
+ virtual void CollectGarbage(Config config) = 0;
+
+ // The current epoch that the GC maintains. The epoch is increased on every
+ // GC invocation.
+ virtual size_t epoch() const = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.cc b/chromium/v8/src/heap/cppgc/gc-info-table.cc
index dda5f0a7e83..8f2ee965011 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.cc
@@ -18,6 +18,11 @@ namespace internal {
namespace {
+// GCInfoTable::table_, the table which holds GCInfos, is maintained as a
+// contiguous array reserved upfront. Subparts of the array are (re-)committed
+// as read/write or read-only in OS pages, whose size is a power of 2. To avoid
+// having GCInfos that cross the boundaries between these subparts we force the
+// size of GCInfo to be a power of 2 as well.
constexpr size_t kEntrySize = sizeof(GCInfo);
static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize),
"GCInfoTable entries size must be power of "
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.h b/chromium/v8/src/heap/cppgc/gc-info-table.h
index 25141f5d1cc..749f30b258c 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.h
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.h
@@ -22,7 +22,10 @@ namespace internal {
// inherit from GarbageCollected.
struct GCInfo final {
FinalizationCallback finalize;
+ TraceCallback trace;
bool has_v_table;
+ // Keep sizeof(GCInfo) a power of 2.
+ size_t padding = 0;
};
class V8_EXPORT GCInfoTable final {
diff --git a/chromium/v8/src/heap/cppgc/gc-info.cc b/chromium/v8/src/heap/cppgc/gc-info.cc
index 007eab3a338..70970139b17 100644
--- a/chromium/v8/src/heap/cppgc/gc-info.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info.cc
@@ -10,9 +10,10 @@ namespace cppgc {
namespace internal {
RegisteredGCInfoIndex::RegisteredGCInfoIndex(
- FinalizationCallback finalization_callback, bool has_v_table)
+ FinalizationCallback finalization_callback, TraceCallback trace_callback,
+ bool has_v_table)
: index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
- {finalization_callback, has_v_table})) {}
+ {finalization_callback, trace_callback, has_v_table})) {}
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/gc-invoker.cc b/chromium/v8/src/heap/cppgc/gc-invoker.cc
new file mode 100644
index 00000000000..a1212d80523
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/gc-invoker.cc
@@ -0,0 +1,105 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/gc-invoker.h"
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/task-handle.h"
+
+namespace cppgc {
+namespace internal {
+
+class GCInvoker::GCInvokerImpl final : public GarbageCollector {
+ public:
+ GCInvokerImpl(GarbageCollector*, cppgc::Platform*, cppgc::Heap::StackSupport);
+ ~GCInvokerImpl();
+
+ GCInvokerImpl(const GCInvokerImpl&) = delete;
+ GCInvokerImpl& operator=(const GCInvokerImpl&) = delete;
+
+ void CollectGarbage(GarbageCollector::Config) final;
+ size_t epoch() const final { return collector_->epoch(); }
+
+ private:
+ class GCTask final : public cppgc::Task {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ static Handle Post(GarbageCollector* collector, cppgc::TaskRunner* runner) {
+ auto task = std::make_unique<GCInvoker::GCInvokerImpl::GCTask>(collector);
+ auto handle = task->GetHandle();
+ runner->PostNonNestableTask(std::move(task));
+ return handle;
+ }
+
+ explicit GCTask(GarbageCollector* collector)
+ : collector_(collector), saved_epoch_(collector->epoch()) {}
+
+ private:
+ void Run() final {
+ if (handle_.IsCanceled() || (collector_->epoch() != saved_epoch_)) return;
+
+ collector_->CollectGarbage(
+ GarbageCollector::Config::PreciseAtomicConfig());
+ handle_.Cancel();
+ }
+
+ Handle GetHandle() { return handle_; }
+
+ GarbageCollector* collector_;
+ Handle handle_;
+ size_t saved_epoch_;
+ };
+
+ GarbageCollector* collector_;
+ cppgc::Platform* platform_;
+ cppgc::Heap::StackSupport stack_support_;
+ GCTask::Handle gc_task_handle_;
+};
+
+GCInvoker::GCInvokerImpl::GCInvokerImpl(GarbageCollector* collector,
+ cppgc::Platform* platform,
+ cppgc::Heap::StackSupport stack_support)
+ : collector_(collector),
+ platform_(platform),
+ stack_support_(stack_support) {}
+
+GCInvoker::GCInvokerImpl::~GCInvokerImpl() {
+ if (gc_task_handle_) {
+ gc_task_handle_.Cancel();
+ }
+}
+
+void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
+ if ((config.stack_state ==
+ GarbageCollector::Config::StackState::kNoHeapPointers) ||
+ (stack_support_ ==
+ cppgc::Heap::StackSupport::kSupportsConservativeStackScan)) {
+ collector_->CollectGarbage(config);
+ } else if (platform_->GetForegroundTaskRunner()->NonNestableTasksEnabled()) {
+ if (!gc_task_handle_) {
+ gc_task_handle_ =
+ GCTask::Post(collector_, platform_->GetForegroundTaskRunner().get());
+ }
+ }
+}
+
+GCInvoker::GCInvoker(GarbageCollector* collector, cppgc::Platform* platform,
+ cppgc::Heap::StackSupport stack_support)
+ : impl_(std::make_unique<GCInvoker::GCInvokerImpl>(collector, platform,
+ stack_support)) {}
+
+GCInvoker::~GCInvoker() = default;
+
+void GCInvoker::CollectGarbage(GarbageCollector::Config config) {
+ impl_->CollectGarbage(config);
+}
+
+size_t GCInvoker::epoch() const { return impl_->epoch(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/gc-invoker.h b/chromium/v8/src/heap/cppgc/gc-invoker.h
new file mode 100644
index 00000000000..a9e3369b3e9
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/gc-invoker.h
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GC_INVOKER_H_
+#define V8_HEAP_CPPGC_GC_INVOKER_H_
+
+#include "include/cppgc/heap.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/garbage-collector.h"
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+// GC invoker that dispatches GC depending on StackSupport and StackState:
+// 1. If StackState specifies no stack scan needed the GC is invoked
+// synchronously.
+// 2. If StackState specifies conservative GC and StackSupport prohibits stack
+// scanning: Delay GC until it can be invoked without accessing the stack.
+// To do so, a precise GC without stack scan is scheduled using the platform
+// if non-nestable tasks are supported, and otherwise no operation is carried
+// out. This means that the heuristics allows to arbitrary go over the limit
+// in case non-nestable tasks are not supported and only conservative GCs are
+// requested.
+class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
+ public:
+ GCInvoker(GarbageCollector*, cppgc::Platform*, cppgc::Heap::StackSupport);
+ ~GCInvoker();
+
+ GCInvoker(const GCInvoker&) = delete;
+ GCInvoker& operator=(const GCInvoker&) = delete;
+
+ void CollectGarbage(GarbageCollector::Config) final;
+ size_t epoch() const final;
+
+ private:
+ class GCInvokerImpl;
+ std::unique_ptr<GCInvokerImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GC_INVOKER_H_
diff --git a/chromium/v8/src/heap/cppgc/globals.h b/chromium/v8/src/heap/cppgc/globals.h
index 734abd508ef..d286a7fa428 100644
--- a/chromium/v8/src/heap/cppgc/globals.h
+++ b/chromium/v8/src/heap/cppgc/globals.h
@@ -16,6 +16,10 @@ namespace internal {
using Address = uint8_t*;
using ConstAddress = const uint8_t*;
+constexpr size_t kKB = 1024;
+constexpr size_t kMB = kKB * 1024;
+constexpr size_t kGB = kMB * 1024;
+
// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
// do not fully support all alignment restrictions (following
// alignof(std​::​max_­align_­t)) but limit to alignof(double).
@@ -42,6 +46,9 @@ constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
constexpr GCInfoIndex kFreeListGCInfoIndex = 0;
constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
+constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
+constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-base.cc b/chromium/v8/src/heap/cppgc/heap-base.cc
new file mode 100644
index 00000000000..7963df0af3f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-base.cc
@@ -0,0 +1,88 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-base.h"
+
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
+ friend class HeapVisitor<ObjectSizeCounter>;
+
+ public:
+ size_t GetSize(RawHeap* heap) {
+ Traverse(heap);
+ return accumulated_size_;
+ }
+
+ private:
+ static size_t ObjectSize(const HeapObjectHeader* header) {
+ const size_t size =
+ header->IsLargeObject()
+ ? static_cast<const LargePage*>(BasePage::FromPayload(header))
+ ->PayloadSize()
+ : header->GetSize();
+ DCHECK_GE(size, sizeof(HeapObjectHeader));
+ return size - sizeof(HeapObjectHeader);
+ }
+
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree()) return true;
+ accumulated_size_ += ObjectSize(header);
+ return true;
+ }
+
+ size_t accumulated_size_ = 0;
+};
+
+} // namespace
+
+HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
+ size_t custom_spaces)
+ : raw_heap_(this, custom_spaces),
+ platform_(std::move(platform)),
+#if defined(CPPGC_CAGED_HEAP)
+ caged_heap_(this, platform_->GetPageAllocator()),
+ page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
+#else
+ page_backend_(
+ std::make_unique<PageBackend>(platform_->GetPageAllocator())),
+#endif
+ stats_collector_(std::make_unique<StatsCollector>()),
+ stack_(std::make_unique<heap::base::Stack>(
+ v8::base::Stack::GetStackStart())),
+ prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
+ object_allocator_(&raw_heap_, page_backend_.get(),
+ stats_collector_.get()),
+ sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()) {
+}
+
+HeapBase::~HeapBase() = default;
+
+size_t HeapBase::ObjectPayloadSize() const {
+ return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
+}
+
+HeapBase::NoGCScope::NoGCScope(HeapBase& heap) : heap_(heap) {
+ heap_.no_gc_scope_++;
+}
+
+HeapBase::NoGCScope::~NoGCScope() { heap_.no_gc_scope_--; }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-base.h b/chromium/v8/src/heap/cppgc/heap-base.h
new file mode 100644
index 00000000000..cc61ed32fc8
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-base.h
@@ -0,0 +1,151 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_BASE_H_
+#define V8_HEAP_CPPGC_HEAP_BASE_H_
+
+#include <memory>
+#include <set>
+
+#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/macros.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/sweeper.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "src/heap/cppgc/caged-heap.h"
+#endif
+
+namespace heap {
+namespace base {
+class Stack;
+} // namespace base
+} // namespace heap
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+namespace testing {
+class TestWithHeap;
+}
+
+class Marker;
+class PageBackend;
+class PreFinalizerHandler;
+class StatsCollector;
+
+// Base class for heap implementations.
+class V8_EXPORT_PRIVATE HeapBase {
+ public:
+ // NoGCScope allows going over limits and avoids triggering garbage
+ // collection triggered through allocations or even explicitly.
+ class V8_EXPORT_PRIVATE NoGCScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoGCScope(HeapBase& heap);
+ ~NoGCScope();
+
+ NoGCScope(const NoGCScope&) = delete;
+ NoGCScope& operator=(const NoGCScope&) = delete;
+
+ private:
+ HeapBase& heap_;
+ };
+
+ HeapBase(std::shared_ptr<cppgc::Platform> platform, size_t custom_spaces);
+ virtual ~HeapBase();
+
+ HeapBase(const HeapBase&) = delete;
+ HeapBase& operator=(const HeapBase&) = delete;
+
+ RawHeap& raw_heap() { return raw_heap_; }
+ const RawHeap& raw_heap() const { return raw_heap_; }
+
+ cppgc::Platform* platform() { return platform_.get(); }
+ const cppgc::Platform* platform() const { return platform_.get(); }
+
+ PageBackend* page_backend() { return page_backend_.get(); }
+ const PageBackend* page_backend() const { return page_backend_.get(); }
+
+ StatsCollector* stats_collector() { return stats_collector_.get(); }
+ const StatsCollector* stats_collector() const {
+ return stats_collector_.get();
+ }
+
+#if defined(CPPGC_CAGED_HEAP)
+ CagedHeap& caged_heap() { return caged_heap_; }
+ const CagedHeap& caged_heap() const { return caged_heap_; }
+#endif
+
+ heap::base::Stack* stack() { return stack_.get(); }
+
+ PreFinalizerHandler* prefinalizer_handler() {
+ return prefinalizer_handler_.get();
+ }
+
+ Marker* marker() const { return marker_.get(); }
+
+ ObjectAllocator& object_allocator() { return object_allocator_; }
+
+ Sweeper& sweeper() { return sweeper_; }
+
+ PersistentRegion& GetStrongPersistentRegion() {
+ return strong_persistent_region_;
+ }
+ const PersistentRegion& GetStrongPersistentRegion() const {
+ return strong_persistent_region_;
+ }
+ PersistentRegion& GetWeakPersistentRegion() {
+ return weak_persistent_region_;
+ }
+ const PersistentRegion& GetWeakPersistentRegion() const {
+ return weak_persistent_region_;
+ }
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::set<void*>& remembered_slots() { return remembered_slots_; }
+#endif
+
+ size_t ObjectPayloadSize() const;
+
+ protected:
+ bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
+
+ RawHeap raw_heap_;
+ std::shared_ptr<cppgc::Platform> platform_;
+#if defined(CPPGC_CAGED_HEAP)
+ CagedHeap caged_heap_;
+#endif
+ std::unique_ptr<PageBackend> page_backend_;
+
+ std::unique_ptr<StatsCollector> stats_collector_;
+ std::unique_ptr<heap::base::Stack> stack_;
+ std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
+ std::unique_ptr<Marker> marker_;
+
+ ObjectAllocator object_allocator_;
+ Sweeper sweeper_;
+
+ PersistentRegion strong_persistent_region_;
+ PersistentRegion weak_persistent_region_;
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::set<void*> remembered_slots_;
+#endif
+
+ size_t no_gc_scope_ = 0;
+
+ friend class testing::TestWithHeap;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_BASE_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-growing.cc b/chromium/v8/src/heap/cppgc/heap-growing.cc
new file mode 100644
index 00000000000..751d32b0e6d
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-growing.cc
@@ -0,0 +1,99 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-growing.h"
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/task-handle.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapGrowing::HeapGrowingImpl final
+ : public StatsCollector::AllocationObserver {
+ public:
+ HeapGrowingImpl(GarbageCollector*, StatsCollector*,
+ cppgc::Heap::ResourceConstraints);
+ ~HeapGrowingImpl();
+
+ HeapGrowingImpl(const HeapGrowingImpl&) = delete;
+ HeapGrowingImpl& operator=(const HeapGrowingImpl&) = delete;
+
+ void AllocatedObjectSizeIncreased(size_t) final;
+ // Only trigger GC on growing.
+ void AllocatedObjectSizeDecreased(size_t) final {}
+ void ResetAllocatedObjectSize(size_t) final;
+
+ size_t limit() const { return limit_; }
+
+ private:
+ void ConfigureLimit(size_t allocated_object_size);
+
+ GarbageCollector* collector_;
+ StatsCollector* stats_collector_;
+ // Allow 1 MB heap by default;
+ size_t initial_heap_size_ = 1 * kMB;
+ size_t limit_ = 0; // See ConfigureLimit().
+
+ SingleThreadedHandle gc_task_handle_;
+};
+
+HeapGrowing::HeapGrowingImpl::HeapGrowingImpl(
+ GarbageCollector* collector, StatsCollector* stats_collector,
+ cppgc::Heap::ResourceConstraints constraints)
+ : collector_(collector),
+ stats_collector_(stats_collector),
+ gc_task_handle_(SingleThreadedHandle::NonEmptyTag{}) {
+ if (constraints.initial_heap_size_bytes > 0) {
+ initial_heap_size_ = constraints.initial_heap_size_bytes;
+ }
+ constexpr size_t kNoAllocatedBytes = 0;
+ ConfigureLimit(kNoAllocatedBytes);
+ stats_collector->RegisterObserver(this);
+}
+
+HeapGrowing::HeapGrowingImpl::~HeapGrowingImpl() {
+ stats_collector_->UnregisterObserver(this);
+}
+
+void HeapGrowing::HeapGrowingImpl::AllocatedObjectSizeIncreased(size_t) {
+ if (stats_collector_->allocated_object_size() > limit_) {
+ collector_->CollectGarbage(
+ GarbageCollector::Config::ConservativeAtomicConfig());
+ }
+}
+
+void HeapGrowing::HeapGrowingImpl::ResetAllocatedObjectSize(
+ size_t allocated_object_size) {
+ ConfigureLimit(allocated_object_size);
+}
+
+void HeapGrowing::HeapGrowingImpl::ConfigureLimit(
+ size_t allocated_object_size) {
+ const size_t size = std::max(allocated_object_size, initial_heap_size_);
+ limit_ = std::max(static_cast<size_t>(size * kGrowingFactor),
+ size + kMinLimitIncrease);
+}
+
+HeapGrowing::HeapGrowing(GarbageCollector* collector,
+ StatsCollector* stats_collector,
+ cppgc::Heap::ResourceConstraints constraints)
+ : impl_(std::make_unique<HeapGrowing::HeapGrowingImpl>(
+ collector, stats_collector, constraints)) {}
+
+HeapGrowing::~HeapGrowing() = default;
+
+size_t HeapGrowing::limit() const { return impl_->limit(); }
+
+// static
+constexpr double HeapGrowing::kGrowingFactor;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-growing.h b/chromium/v8/src/heap/cppgc/heap-growing.h
new file mode 100644
index 00000000000..772fc2db55f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-growing.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_GROWING_H_
+#define V8_HEAP_CPPGC_HEAP_GROWING_H_
+
+#include "include/cppgc/heap.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+class GarbageCollector;
+class StatsCollector;
+
+// Growing strategy that invokes garbage collection using GarbageCollector based
+// on allocation statistics provided by StatsCollector and ResourceConstraints.
+//
+// Implements a fixed-ratio growing strategy with an initial heap size that the
+// GC can ignore to avoid excessive GCs for smaller heaps.
+class V8_EXPORT_PRIVATE HeapGrowing final {
+ public:
+ // Constant growing factor for growing the heap limit.
+ static constexpr double kGrowingFactor = 1.5;
+ // For smaller heaps, allow allocating at least LAB in each regular space
+ // before triggering GC again.
+ static constexpr size_t kMinLimitIncrease =
+ kPageSize * RawHeap::kNumberOfRegularSpaces;
+
+ HeapGrowing(GarbageCollector*, StatsCollector*,
+ cppgc::Heap::ResourceConstraints);
+ ~HeapGrowing();
+
+ HeapGrowing(const HeapGrowing&) = delete;
+ HeapGrowing& operator=(const HeapGrowing&) = delete;
+
+ size_t limit() const;
+
+ private:
+ class HeapGrowingImpl;
+ std::unique_ptr<HeapGrowingImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_GROWING_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-inl.h b/chromium/v8/src/heap/cppgc/heap-inl.h
deleted file mode 100644
index 4fe3186230f..00000000000
--- a/chromium/v8/src/heap/cppgc/heap-inl.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_HEAP_INL_H_
-#define V8_HEAP_CPPGC_HEAP_INL_H_
-
-#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap.h"
-#include "src/heap/cppgc/object-allocator-inl.h"
-
-namespace cppgc {
-namespace internal {
-
-void* Heap::Allocate(size_t size, GCInfoIndex index) {
- DCHECK(is_allocation_allowed());
- void* result = object_allocator_.AllocateObject(size, index);
- objects_.push_back(&HeapObjectHeader::FromPayload(result));
- return result;
-}
-
-void* Heap::Allocate(size_t size, GCInfoIndex index,
- CustomSpaceIndex space_index) {
- DCHECK(is_allocation_allowed());
- void* result = object_allocator_.AllocateObject(size, index, space_index);
- objects_.push_back(&HeapObjectHeader::FromPayload(result));
- return result;
-}
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_HEAP_INL_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header-inl.h b/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
index cba7b24a4cb..0348013e08b 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
@@ -113,6 +113,11 @@ bool HeapObjectHeader::TryMarkAtomic() {
}
template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsYoung() const {
+ return !IsMarked<mode>();
+}
+
+template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsFree() const {
return GetGCInfoIndex() == kFreeListGCInfoIndex;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.h b/chromium/v8/src/heap/cppgc/heap-object-header.h
index b517617dd1e..9a2b5283888 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.h
@@ -80,6 +80,9 @@ class HeapObjectHeader {
inline bool TryMarkAtomic();
template <AccessMode = AccessMode::kNonAtomic>
+ bool IsYoung() const;
+
+ template <AccessMode = AccessMode::kNonAtomic>
bool IsFree() const;
inline bool IsFinalizable() const;
diff --git a/chromium/v8/src/heap/cppgc/heap-page-inl.h b/chromium/v8/src/heap/cppgc/heap-page-inl.h
new file mode 100644
index 00000000000..a416a62e492
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-page-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
+#define V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
+
+#include "src/heap/cppgc/heap-page.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+BasePage* BasePage::FromPayload(void* payload) {
+ return reinterpret_cast<BasePage*>(
+ (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+}
+
+// static
+const BasePage* BasePage::FromPayload(const void* payload) {
+ return reinterpret_cast<const BasePage*>(
+ (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
+ kPageBaseMask) +
+ kGuardPageSize);
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-page.cc b/chromium/v8/src/heap/cppgc/heap-page.cc
index e8afbafbd2a..f95f4a37eb6 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.cc
+++ b/chromium/v8/src/heap/cppgc/heap-page.cc
@@ -14,7 +14,7 @@
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
-#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/page-memory-inl.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
@@ -27,63 +27,120 @@ Address AlignAddress(Address address, size_t alignment) {
RoundUp(reinterpret_cast<uintptr_t>(address), alignment));
}
-} // namespace
+const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
+ const void* address) {
+ if (page->is_large()) {
+ return LargePage::From(page)->ObjectHeader();
+ }
+ const ObjectStartBitmap& bitmap =
+ NormalPage::From(page)->object_start_bitmap();
+ const HeapObjectHeader* header =
+ bitmap.FindHeader(static_cast<ConstAddress>(address));
+ DCHECK_LT(address,
+ reinterpret_cast<ConstAddress>(header) +
+ header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ return header;
+}
-STATIC_ASSERT(kPageSize == api_constants::kPageAlignment);
+} // namespace
// static
-BasePage* BasePage::FromPayload(void* payload) {
- return reinterpret_cast<BasePage*>(
- (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+BasePage* BasePage::FromInnerAddress(const HeapBase* heap, void* address) {
+ return const_cast<BasePage*>(
+ FromInnerAddress(heap, const_cast<const void*>(address)));
}
// static
-const BasePage* BasePage::FromPayload(const void* payload) {
+const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
+ const void* address) {
return reinterpret_cast<const BasePage*>(
- (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
- kPageBaseMask) +
- kGuardPageSize);
+ heap->page_backend()->Lookup(static_cast<ConstAddress>(address)));
}
-HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(void* address) {
- return const_cast<HeapObjectHeader*>(
+// static
+void BasePage::Destroy(BasePage* page) {
+ if (page->is_large()) {
+ LargePage::Destroy(LargePage::From(page));
+ } else {
+ NormalPage::Destroy(NormalPage::From(page));
+ }
+}
+
+Address BasePage::PayloadStart() {
+ return is_large() ? LargePage::From(this)->PayloadStart()
+ : NormalPage::From(this)->PayloadStart();
+}
+
+ConstAddress BasePage::PayloadStart() const {
+ return const_cast<BasePage*>(this)->PayloadStart();
+}
+
+Address BasePage::PayloadEnd() {
+ return is_large() ? LargePage::From(this)->PayloadEnd()
+ : NormalPage::From(this)->PayloadEnd();
+}
+
+ConstAddress BasePage::PayloadEnd() const {
+ return const_cast<BasePage*>(this)->PayloadEnd();
+}
+
+HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
+ return const_cast<HeapObjectHeader&>(
ObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
}
-const HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(
- const void* address) {
+const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
+ const void* address) const {
+ const HeapObjectHeader* header =
+ ObjectHeaderFromInnerAddressImpl(this, address);
+ DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
+ return *header;
+}
+
+HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
+ void* address) const {
+ return const_cast<HeapObjectHeader*>(
+ TryObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
+}
+
+const HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
+ const void* address) const {
if (is_large()) {
- return LargePage::From(this)->ObjectHeader();
+ if (!LargePage::From(this)->PayloadContains(
+ static_cast<ConstAddress>(address)))
+ return nullptr;
+ } else {
+ const NormalPage* normal_page = NormalPage::From(this);
+ if (!normal_page->PayloadContains(static_cast<ConstAddress>(address)))
+ return nullptr;
+ // Check that the space has no linear allocation buffer.
+ DCHECK(!NormalPageSpace::From(normal_page->space())
+ ->linear_allocation_buffer()
+ .size());
}
- ObjectStartBitmap& bitmap = NormalPage::From(this)->object_start_bitmap();
- HeapObjectHeader* header =
- bitmap.FindHeader(static_cast<ConstAddress>(address));
- DCHECK_LT(address,
- reinterpret_cast<ConstAddress>(header) +
- header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
- DCHECK_NE(kFreeListGCInfoIndex,
- header->GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
+
+ // |address| is on the heap, so we FromInnerAddress can get the header.
+ const HeapObjectHeader* header =
+ ObjectHeaderFromInnerAddressImpl(this, address);
+ if (header->IsFree()) return nullptr;
+ DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
return header;
}
-BasePage::BasePage(Heap* heap, BaseSpace* space, PageType type)
+BasePage::BasePage(HeapBase* heap, BaseSpace* space, PageType type)
: heap_(heap), space_(space), type_(type) {
DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
kPageOffsetMask);
- DCHECK_EQ(reinterpret_cast<void*>(&heap_),
- FromPayload(this) + api_constants::kHeapOffset);
DCHECK_EQ(&heap_->raw_heap(), space_->raw_heap());
}
// static
-NormalPage* NormalPage::Create(NormalPageSpace* space) {
- DCHECK(space);
- Heap* heap = space->raw_heap()->heap();
- DCHECK(heap);
- void* memory = heap->page_backend()->AllocateNormalPageMemory(space->index());
- auto* normal_page = new (memory) NormalPage(heap, space);
- space->AddPage(normal_page);
- space->AddToFreeList(normal_page->PayloadStart(), normal_page->PayloadSize());
+NormalPage* NormalPage::Create(PageBackend* page_backend,
+ NormalPageSpace* space) {
+ DCHECK_NOT_NULL(page_backend);
+ DCHECK_NOT_NULL(space);
+ void* memory = page_backend->AllocateNormalPageMemory(space->index());
+ auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
return normal_page;
}
@@ -98,7 +155,7 @@ void NormalPage::Destroy(NormalPage* page) {
reinterpret_cast<Address>(page));
}
-NormalPage::NormalPage(Heap* heap, BaseSpace* space)
+NormalPage::NormalPage(HeapBase* heap, BaseSpace* space)
: BasePage(heap, space, PageType::kNormal),
object_start_bitmap_(PayloadStart()) {
DCHECK_LT(kLargeObjectSizeThreshold,
@@ -142,23 +199,25 @@ size_t NormalPage::PayloadSize() {
return kPageSize - 2 * kGuardPageSize - header_size;
}
-LargePage::LargePage(Heap* heap, BaseSpace* space, size_t size)
+LargePage::LargePage(HeapBase* heap, BaseSpace* space, size_t size)
: BasePage(heap, space, PageType::kLarge), payload_size_(size) {}
LargePage::~LargePage() = default;
// static
-LargePage* LargePage::Create(LargePageSpace* space, size_t size) {
- DCHECK(space);
+LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
+ size_t size) {
+ DCHECK_NOT_NULL(page_backend);
+ DCHECK_NOT_NULL(space);
DCHECK_LE(kLargeObjectSizeThreshold, size);
+
const size_t page_header_size =
RoundUp(sizeof(LargePage), kAllocationGranularity);
const size_t allocation_size = page_header_size + size;
- Heap* heap = space->raw_heap()->heap();
- void* memory = heap->page_backend()->AllocateLargePageMemory(allocation_size);
+ auto* heap = space->raw_heap()->heap();
+ void* memory = page_backend->AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(heap, space, size);
- space->AddPage(page);
return page;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-page.h b/chromium/v8/src/heap/cppgc/heap-page.h
index c676bc4bde0..7559d5f1ece 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.h
+++ b/chromium/v8/src/heap/cppgc/heap-page.h
@@ -17,19 +17,24 @@ namespace internal {
class BaseSpace;
class NormalPageSpace;
class LargePageSpace;
-class Heap;
+class HeapBase;
class PageBackend;
class V8_EXPORT_PRIVATE BasePage {
public:
- static BasePage* FromPayload(void*);
- static const BasePage* FromPayload(const void*);
+ static inline BasePage* FromPayload(void*);
+ static inline const BasePage* FromPayload(const void*);
+
+ static BasePage* FromInnerAddress(const HeapBase*, void*);
+ static const BasePage* FromInnerAddress(const HeapBase*, const void*);
+
+ static void Destroy(BasePage*);
BasePage(const BasePage&) = delete;
BasePage& operator=(const BasePage&) = delete;
- Heap* heap() { return heap_; }
- const Heap* heap() const { return heap_; }
+ HeapBase* heap() { return heap_; }
+ const HeapBase* heap() const { return heap_; }
BaseSpace* space() { return space_; }
const BaseSpace* space() const { return space_; }
@@ -37,16 +42,29 @@ class V8_EXPORT_PRIVATE BasePage {
bool is_large() const { return type_ == PageType::kLarge; }
+ Address PayloadStart();
+ ConstAddress PayloadStart() const;
+ Address PayloadEnd();
+ ConstAddress PayloadEnd() const;
+
// |address| must refer to real object.
- HeapObjectHeader* ObjectHeaderFromInnerAddress(void* address);
- const HeapObjectHeader* ObjectHeaderFromInnerAddress(const void* address);
+ HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
+ const HeapObjectHeader& ObjectHeaderFromInnerAddress(
+ const void* address) const;
+
+ // |address| is guaranteed to point into the page but not payload. Returns
+ // nullptr when pointing into free list entries and the valid header
+ // otherwise.
+ HeapObjectHeader* TryObjectHeaderFromInnerAddress(void* address) const;
+ const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
+ const void* address) const;
protected:
enum class PageType { kNormal, kLarge };
- BasePage(Heap*, BaseSpace*, PageType);
+ BasePage(HeapBase*, BaseSpace*, PageType);
private:
- Heap* heap_;
+ HeapBase* heap_;
BaseSpace* space_;
PageType type_;
};
@@ -98,8 +116,8 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
using iterator = IteratorImpl<HeapObjectHeader>;
using const_iterator = IteratorImpl<const HeapObjectHeader>;
- // Allocates a new page.
- static NormalPage* Create(NormalPageSpace*);
+ // Allocates a new page in the detached state.
+ static NormalPage* Create(PageBackend*, NormalPageSpace*);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(NormalPage*);
@@ -130,13 +148,17 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
static size_t PayloadSize();
+ bool PayloadContains(ConstAddress address) const {
+ return (PayloadStart() <= address) && (address < PayloadEnd());
+ }
+
ObjectStartBitmap& object_start_bitmap() { return object_start_bitmap_; }
const ObjectStartBitmap& object_start_bitmap() const {
return object_start_bitmap_;
}
private:
- NormalPage(Heap* heap, BaseSpace* space);
+ NormalPage(HeapBase* heap, BaseSpace* space);
~NormalPage();
ObjectStartBitmap object_start_bitmap_;
@@ -144,8 +166,8 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
class V8_EXPORT_PRIVATE LargePage final : public BasePage {
public:
- // Allocates a new page.
- static LargePage* Create(LargePageSpace*, size_t);
+ // Allocates a new page in the detached state.
+ static LargePage* Create(PageBackend*, LargePageSpace*, size_t);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(LargePage*);
@@ -168,8 +190,12 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
size_t PayloadSize() const { return payload_size_; }
+ bool PayloadContains(ConstAddress address) const {
+ return (PayloadStart() <= address) && (address < PayloadEnd());
+ }
+
private:
- LargePage(Heap* heap, BaseSpace* space, size_t);
+ LargePage(HeapBase* heap, BaseSpace* space, size_t);
~LargePage();
size_t payload_size_;
diff --git a/chromium/v8/src/heap/cppgc/heap-space.cc b/chromium/v8/src/heap/cppgc/heap-space.cc
index 70ddb935314..3a213dc18ad 100644
--- a/chromium/v8/src/heap/cppgc/heap-space.cc
+++ b/chromium/v8/src/heap/cppgc/heap-space.cc
@@ -7,7 +7,8 @@
#include <algorithm>
#include "src/base/logging.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/base/platform/mutex.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
namespace cppgc {
@@ -17,11 +18,13 @@ BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
: heap_(heap), index_(index), type_(type) {}
void BaseSpace::AddPage(BasePage* page) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
pages_.push_back(page);
}
void BaseSpace::RemovePage(BasePage* page) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
auto it = std::find(pages_.cbegin(), pages_.cend(), page);
DCHECK_NE(pages_.cend(), it);
pages_.erase(it);
@@ -36,21 +39,6 @@ BaseSpace::Pages BaseSpace::RemoveAllPages() {
NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index)
: BaseSpace(heap, index, PageType::kNormal) {}
-void NormalPageSpace::AddToFreeList(void* address, size_t size) {
- free_list_.Add({address, size});
- NormalPage::From(BasePage::FromPayload(address))
- ->object_start_bitmap()
- .SetBit(static_cast<Address>(address));
-}
-
-void NormalPageSpace::ResetLinearAllocationBuffer() {
- if (current_lab_.size()) {
- DCHECK_NOT_NULL(current_lab_.start());
- AddToFreeList(current_lab_.start(), current_lab_.size());
- current_lab_.Set(nullptr, 0);
- }
-}
-
LargePageSpace::LargePageSpace(RawHeap* heap, size_t index)
: BaseSpace(heap, index, PageType::kLarge) {}
diff --git a/chromium/v8/src/heap/cppgc/heap-space.h b/chromium/v8/src/heap/cppgc/heap-space.h
index d84207c2cd4..a7e50d4f48d 100644
--- a/chromium/v8/src/heap/cppgc/heap-space.h
+++ b/chromium/v8/src/heap/cppgc/heap-space.h
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
namespace cppgc {
@@ -53,6 +54,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
private:
RawHeap* heap_;
Pages pages_;
+ v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
};
@@ -92,9 +94,6 @@ class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
NormalPageSpace(RawHeap* heap, size_t index);
- void AddToFreeList(void*, size_t);
- void ResetLinearAllocationBuffer();
-
LinearAllocationBuffer& linear_allocation_buffer() { return current_lab_; }
const LinearAllocationBuffer& linear_allocation_buffer() const {
return current_lab_;
diff --git a/chromium/v8/src/heap/cppgc/heap.cc b/chromium/v8/src/heap/cppgc/heap.cc
index ee400cee28c..431ad8df668 100644
--- a/chromium/v8/src/heap/cppgc/heap.cc
+++ b/chromium/v8/src/heap/cppgc/heap.cc
@@ -4,15 +4,13 @@
#include "src/heap/cppgc/heap.h"
-#include <memory>
-
-#include "src/base/platform/platform.h"
+#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/gc-invoker.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
-#include "src/heap/cppgc/stack.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
namespace cppgc {
@@ -31,49 +29,49 @@ void VerifyCustomSpaces(
} // namespace
-std::unique_ptr<Heap> Heap::Create(cppgc::Heap::HeapOptions options) {
+std::unique_ptr<Heap> Heap::Create(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options) {
+ DCHECK(platform.get());
VerifyCustomSpaces(options.custom_spaces);
- return std::make_unique<internal::Heap>(options.custom_spaces.size());
+ return std::make_unique<internal::Heap>(std::move(platform),
+ std::move(options));
}
void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
Heap::StackState stack_state) {
- internal::Heap::From(this)->CollectGarbage({stack_state});
+ internal::Heap::From(this)->CollectGarbage(
+ {internal::GarbageCollector::Config::CollectionType::kMajor,
+ stack_state});
+}
+
+AllocationHandle& Heap::GetAllocationHandle() {
+ return internal::Heap::From(this)->object_allocator();
}
namespace internal {
namespace {
-class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
- friend class HeapVisitor<ObjectSizeCounter>;
+class Unmarker final : private HeapVisitor<Unmarker> {
+ friend class HeapVisitor<Unmarker>;
public:
- size_t GetSize(RawHeap* heap) {
- Traverse(heap);
- return accumulated_size_;
- }
+ explicit Unmarker(RawHeap* heap) { Traverse(heap); }
private:
- static size_t ObjectSize(const HeapObjectHeader* header) {
- const size_t size =
- header->IsLargeObject()
- ? static_cast<const LargePage*>(BasePage::FromPayload(header))
- ->PayloadSize()
- : header->GetSize();
- DCHECK_GE(size, sizeof(HeapObjectHeader));
- return size - sizeof(HeapObjectHeader);
- }
-
bool VisitHeapObjectHeader(HeapObjectHeader* header) {
- if (header->IsFree()) return true;
- accumulated_size_ += ObjectSize(header);
+ if (header->IsMarked()) header->Unmark();
return true;
}
-
- size_t accumulated_size_ = 0;
};
+void CheckConfig(Heap::Config config) {
+ CHECK_WITH_MSG(
+ (config.collection_type != Heap::Config::CollectionType::kMinor) ||
+ (config.stack_state == Heap::Config::StackState::kNoHeapPointers),
+ "Minor GCs with stack is currently not supported");
+}
+
} // namespace
// static
@@ -81,56 +79,50 @@ cppgc::LivenessBroker LivenessBrokerFactory::Create() {
return cppgc::LivenessBroker();
}
-Heap::Heap(size_t custom_spaces)
- : raw_heap_(this, custom_spaces),
- page_backend_(std::make_unique<PageBackend>(&system_allocator_)),
- object_allocator_(&raw_heap_),
- sweeper_(&raw_heap_),
- stack_(std::make_unique<Stack>(v8::base::Stack::GetStackStart())),
- prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()) {}
+Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options)
+ : HeapBase(platform, options.custom_spaces.size()),
+ gc_invoker_(this, platform_.get(), options.stack_support),
+ growing_(&gc_invoker_, stats_collector_.get(),
+ options.resource_constraints) {}
Heap::~Heap() {
- NoGCScope no_gc(this);
+ NoGCScope no_gc(*this);
// Finish already running GC if any, but don't finalize live objects.
sweeper_.Finish();
}
-void Heap::CollectGarbage(GCConfig config) {
+void Heap::CollectGarbage(Config config) {
+ CheckConfig(config);
+
if (in_no_gc_scope()) return;
epoch_++;
- // TODO(chromium:1056170): Replace with proper mark-sweep algorithm.
+#if defined(CPPGC_YOUNG_GENERATION)
+ if (config.collection_type == Config::CollectionType::kMajor)
+ Unmarker unmarker(&raw_heap());
+#endif
+
// "Marking".
- marker_ = std::make_unique<Marker>(this);
- marker_->StartMarking(Marker::MarkingConfig(config.stack_state));
- marker_->FinishMarking();
+ marker_ = std::make_unique<Marker>(AsBase());
+ const Marker::MarkingConfig marking_config{
+ config.collection_type, config.stack_state, config.marking_type};
+ marker_->StartMarking(marking_config);
+ marker_->FinishMarking(marking_config);
// "Sweeping and finalization".
{
// Pre finalizers are forbidden from allocating objects
- NoAllocationScope no_allocation_scope_(this);
+ ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
marker_->ProcessWeakness();
prefinalizer_handler_->InvokePreFinalizers();
}
marker_.reset();
{
- NoGCScope no_gc(this);
- sweeper_.Start(Sweeper::Config::kAtomic);
+ NoGCScope no_gc(*this);
+ sweeper_.Start(config.sweeping_type);
}
}
-size_t Heap::ObjectPayloadSize() const {
- return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
-}
-
-Heap::NoGCScope::NoGCScope(Heap* heap) : heap_(heap) { heap_->no_gc_scope_++; }
-
-Heap::NoGCScope::~NoGCScope() { heap_->no_gc_scope_--; }
-
-Heap::NoAllocationScope::NoAllocationScope(Heap* heap) : heap_(heap) {
- heap_->no_allocation_scope_++;
-}
-Heap::NoAllocationScope::~NoAllocationScope() { heap_->no_allocation_scope_--; }
-
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap.h b/chromium/v8/src/heap/cppgc/heap.h
index fa19b74be53..f96f81e3217 100644
--- a/chromium/v8/src/heap/cppgc/heap.h
+++ b/chromium/v8/src/heap/cppgc/heap.h
@@ -5,143 +5,47 @@
#ifndef V8_HEAP_CPPGC_HEAP_H_
#define V8_HEAP_CPPGC_HEAP_H_
-#include <memory>
-#include <vector>
-
#include "include/cppgc/heap.h"
-#include "include/cppgc/internal/gc-info.h"
-#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/liveness-broker.h"
-#include "src/base/page-allocator.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/object-allocator.h"
-#include "src/heap/cppgc/page-memory.h"
-#include "src/heap/cppgc/prefinalizer-handler.h"
-#include "src/heap/cppgc/raw-heap.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "include/cppgc/macros.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/gc-invoker.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-growing.h"
namespace cppgc {
namespace internal {
-class Stack;
-
class V8_EXPORT_PRIVATE LivenessBrokerFactory {
public:
static LivenessBroker Create();
};
-class V8_EXPORT_PRIVATE Heap final : public cppgc::Heap {
+class V8_EXPORT_PRIVATE Heap final : public HeapBase,
+ public cppgc::Heap,
+ public GarbageCollector {
public:
- // NoGCScope allows going over limits and avoids triggering garbage
- // collection triggered through allocations or even explicitly.
- class V8_EXPORT_PRIVATE NoGCScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoGCScope(Heap* heap);
- ~NoGCScope();
-
- NoGCScope(const NoGCScope&) = delete;
- NoGCScope& operator=(const NoGCScope&) = delete;
-
- private:
- Heap* const heap_;
- };
-
- // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
- // allocations during GC.
- class V8_EXPORT_PRIVATE NoAllocationScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoAllocationScope(Heap* heap);
- ~NoAllocationScope();
-
- NoAllocationScope(const NoAllocationScope&) = delete;
- NoAllocationScope& operator=(const NoAllocationScope&) = delete;
-
- private:
- Heap* const heap_;
- };
-
- struct GCConfig {
- using StackState = Heap::StackState;
-
- static GCConfig Default() { return {StackState::kMayContainHeapPointers}; }
-
- StackState stack_state = StackState::kMayContainHeapPointers;
- };
-
static Heap* From(cppgc::Heap* heap) { return static_cast<Heap*>(heap); }
-
- explicit Heap(size_t custom_spaces);
- ~Heap() final;
-
- inline void* Allocate(size_t size, GCInfoIndex index);
- inline void* Allocate(size_t size, GCInfoIndex index,
- CustomSpaceIndex space_index);
-
- void CollectGarbage(GCConfig config = GCConfig::Default());
-
- PreFinalizerHandler* prefinalizer_handler() {
- return prefinalizer_handler_.get();
- }
-
- PersistentRegion& GetStrongPersistentRegion() {
- return strong_persistent_region_;
+ static const Heap* From(const cppgc::Heap* heap) {
+ return static_cast<const Heap*>(heap);
}
- const PersistentRegion& GetStrongPersistentRegion() const {
- return strong_persistent_region_;
- }
- PersistentRegion& GetWeakPersistentRegion() {
- return weak_persistent_region_;
- }
- const PersistentRegion& GetWeakPersistentRegion() const {
- return weak_persistent_region_;
- }
-
- RawHeap& raw_heap() { return raw_heap_; }
- const RawHeap& raw_heap() const { return raw_heap_; }
- Stack* stack() { return stack_.get(); }
-
- PageBackend* page_backend() { return page_backend_.get(); }
- const PageBackend* page_backend() const { return page_backend_.get(); }
-
- Sweeper& sweeper() { return sweeper_; }
+ Heap(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options);
+ ~Heap() final;
- size_t epoch() const { return epoch_; }
+ HeapBase& AsBase() { return *this; }
+ const HeapBase& AsBase() const { return *this; }
- size_t ObjectPayloadSize() const;
+ void CollectGarbage(Config config) final;
- // Temporary getter until proper visitation of on-stack objects is
- // implemented.
- std::vector<HeapObjectHeader*>& objects() { return objects_; }
+ size_t epoch() const final { return epoch_; }
private:
- bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
- bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
-
- RawHeap raw_heap_;
-
- v8::base::PageAllocator system_allocator_;
- std::unique_ptr<PageBackend> page_backend_;
- ObjectAllocator object_allocator_;
- Sweeper sweeper_;
-
- std::unique_ptr<Stack> stack_;
- std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
- std::unique_ptr<Marker> marker_;
- std::vector<HeapObjectHeader*> objects_;
-
- PersistentRegion strong_persistent_region_;
- PersistentRegion weak_persistent_region_;
+ GCInvoker gc_invoker_;
+ HeapGrowing growing_;
size_t epoch_ = 0;
-
- size_t no_gc_scope_ = 0;
- size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/marker.cc b/chromium/v8/src/heap/cppgc/marker.cc
index 5a30c89f0dd..1ba6d766a4f 100644
--- a/chromium/v8/src/heap/cppgc/marker.cc
+++ b/chromium/v8/src/heap/cppgc/marker.cc
@@ -4,14 +4,75 @@
#include "src/heap/cppgc/marker.h"
+#include "include/cppgc/internal/process-heap.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#endif
namespace cppgc {
namespace internal {
namespace {
+
+void EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+ HeapBase& heap) {
+ if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+ config.marking_type ==
+ Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ ProcessHeap::EnterIncrementalOrConcurrentMarking();
+ }
+#if defined(CPPGC_CAGED_HEAP)
+ heap.caged_heap().local_data().is_marking_in_progress = true;
+#endif
+}
+
+void ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+ HeapBase& heap) {
+ if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+ config.marking_type ==
+ Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ ProcessHeap::ExitIncrementalOrConcurrentMarking();
+ }
+#if defined(CPPGC_CAGED_HEAP)
+ heap.caged_heap().local_data().is_marking_in_progress = false;
+#endif
+}
+
+// Visit remembered set that was recorded in the generational barrier.
+void VisitRememberedSlots(HeapBase& heap, MarkingVisitor* visitor) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ for (void* slot : heap.remembered_slots()) {
+ auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
+ ->ObjectHeaderFromInnerAddress(slot);
+ if (slot_header.IsYoung()) continue;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!MarkingVisitor::IsInConstruction(slot_header));
+
+ void* value = *reinterpret_cast<void**>(slot);
+ visitor->DynamicallyMarkAddress(static_cast<Address>(value));
+ }
+#endif
+}
+
+// Assumes that all spaces have their LABs reset.
+void ResetRememberedSet(HeapBase& heap) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ auto& local_data = heap.caged_heap().local_data();
+ local_data.age_table.Reset(&heap.caged_heap().allocator());
+ heap.remembered_slots().clear();
+#endif
+}
+
template <typename Worklist, typename Callback>
bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
Callback callback, int task_id) {
@@ -31,11 +92,12 @@ bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
}
return true;
}
+
} // namespace
constexpr int Marker::kMutatorThreadId;
-Marker::Marker(Heap* heap)
+Marker::Marker(HeapBase& heap)
: heap_(heap), marking_visitor_(CreateMutatorThreadMarkingVisitor()) {}
Marker::~Marker() {
@@ -44,17 +106,15 @@ Marker::~Marker() {
// and should thus already be marked.
if (!not_fully_constructed_worklist_.IsEmpty()) {
#if DEBUG
- DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state_);
+ DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
NotFullyConstructedItem item;
NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
kMutatorThreadId);
while (view.Pop(&item)) {
- // TODO(chromium:1056170): uncomment following check after implementing
- // FromInnerAddress.
- //
- // HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
- // reinterpret_cast<Address>(const_cast<void*>(item)));
- // DCHECK(header->IsMarked())
+ const HeapObjectHeader& header =
+ BasePage::FromPayload(item)->ObjectHeaderFromInnerAddress(
+ static_cast<ConstAddress>(item));
+ DCHECK(header.IsMarked());
}
#else
not_fully_constructed_worklist_.Clear();
@@ -63,19 +123,40 @@ Marker::~Marker() {
}
void Marker::StartMarking(MarkingConfig config) {
+ heap().stats_collector()->NotifyMarkingStarted();
+
config_ = config;
VisitRoots();
+ EnterIncrementalMarkingIfNeeded(config, heap());
}
-void Marker::FinishMarking() {
- if (config_.stack_state_ == MarkingConfig::StackState::kNoHeapPointers) {
+void Marker::EnterAtomicPause(MarkingConfig config) {
+ ExitIncrementalMarkingIfNeeded(config_, heap());
+ config_ = config;
+
+ // VisitRoots also resets the LABs.
+ VisitRoots();
+ if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
FlushNotFullyConstructedObjects();
+ } else {
+ MarkNotFullyConstructedObjects();
}
+}
+
+void Marker::LeaveAtomicPause() {
+ ResetRememberedSet(heap());
+ heap().stats_collector()->NotifyMarkingCompleted(
+ marking_visitor_->marked_bytes());
+}
+
+void Marker::FinishMarking(MarkingConfig config) {
+ EnterAtomicPause(config);
AdvanceMarkingWithDeadline(v8::base::TimeDelta::Max());
+ LeaveAtomicPause();
}
void Marker::ProcessWeakness() {
- heap_->GetWeakPersistentRegion().Trace(marking_visitor_.get());
+ heap().GetWeakPersistentRegion().Trace(marking_visitor_.get());
// Call weak callbacks on objects that may now be pointing to dead objects.
WeakCallbackItem item;
@@ -89,9 +170,17 @@ void Marker::ProcessWeakness() {
}
void Marker::VisitRoots() {
- heap_->GetStrongPersistentRegion().Trace(marking_visitor_.get());
- if (config_.stack_state_ != MarkingConfig::StackState::kNoHeapPointers)
- heap_->stack()->IteratePointers(marking_visitor_.get());
+ // Reset LABs before scanning roots. LABs are cleared to allow
+ // ObjectStartBitmap handling without considering LABs.
+ heap().object_allocator().ResetLinearAllocationBuffers();
+
+ heap().GetStrongPersistentRegion().Trace(marking_visitor_.get());
+ if (config_.stack_state != MarkingConfig::StackState::kNoHeapPointers) {
+ heap().stack()->IteratePointers(marking_visitor_.get());
+ }
+ if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ VisitRememberedSlots(heap(), marking_visitor_.get());
+ }
}
std::unique_ptr<MutatorThreadMarkingVisitor>
@@ -127,6 +216,19 @@ bool Marker::AdvanceMarkingWithDeadline(v8::base::TimeDelta duration) {
},
kMutatorThreadId))
return false;
+
+ if (!DrainWorklistWithDeadline(
+ deadline, &write_barrier_worklist_,
+ [visitor](HeapObjectHeader* header) {
+ DCHECK(header);
+ DCHECK(!MutatorThreadMarkingVisitor::IsInConstruction(*header));
+ const GCInfo& gcinfo =
+ GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex());
+ gcinfo.trace(visitor, header->Payload());
+ visitor->AccountMarkedBytes(*header);
+ },
+ kMutatorThreadId))
+ return false;
} while (!marking_worklist_.IsLocalViewEmpty(kMutatorThreadId));
return true;
@@ -141,10 +243,20 @@ void Marker::FlushNotFullyConstructedObjects() {
DCHECK(not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId));
}
+void Marker::MarkNotFullyConstructedObjects() {
+ NotFullyConstructedItem item;
+ NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
+ kMutatorThreadId);
+ while (view.Pop(&item)) {
+ marking_visitor_->TraceConservativelyIfNeeded(item);
+ }
+}
+
void Marker::ClearAllWorklistsForTesting() {
marking_worklist_.Clear();
not_fully_constructed_worklist_.Clear();
previously_not_fully_constructed_worklist_.Clear();
+ write_barrier_worklist_.Clear();
weak_callback_worklist_.Clear();
}
diff --git a/chromium/v8/src/heap/cppgc/marker.h b/chromium/v8/src/heap/cppgc/marker.h
index c18c23df2ca..3edba06c4b6 100644
--- a/chromium/v8/src/heap/cppgc/marker.h
+++ b/chromium/v8/src/heap/cppgc/marker.h
@@ -16,9 +16,19 @@
namespace cppgc {
namespace internal {
-class Heap;
+class HeapBase;
+class HeapObjectHeader;
class MutatorThreadMarkingVisitor;
+// Marking algorithm. Example for a valid call sequence creating the marking
+// phase:
+// 1. StartMarking()
+// 2. AdvanceMarkingWithDeadline() [Optional, depending on environment.]
+// 3. EnterAtomicPause()
+// 4. AdvanceMarkingWithDeadline()
+// 5. LeaveAtomicPause()
+//
+// Alternatively, FinishMarking combines steps 3.-5.
class V8_EXPORT_PRIVATE Marker {
static constexpr int kNumConcurrentMarkers = 0;
static constexpr int kNumMarkers = 1 + kNumConcurrentMarkers;
@@ -41,34 +51,29 @@ class V8_EXPORT_PRIVATE Marker {
Worklist<NotFullyConstructedItem, 16 /* local entries */, kNumMarkers>;
using WeakCallbackWorklist =
Worklist<WeakCallbackItem, 64 /* local entries */, kNumMarkers>;
+ using WriteBarrierWorklist =
+ Worklist<HeapObjectHeader*, 64 /*local entries */, kNumMarkers>;
struct MarkingConfig {
+ enum class CollectionType : uint8_t {
+ kMinor,
+ kMajor,
+ };
using StackState = cppgc::Heap::StackState;
- enum class IncrementalMarking : uint8_t { kDisabled };
- enum class ConcurrentMarking : uint8_t { kDisabled };
-
- static MarkingConfig Default() {
- return {StackState::kMayContainHeapPointers,
- IncrementalMarking::kDisabled, ConcurrentMarking::kDisabled};
- }
-
- explicit MarkingConfig(StackState stack_state)
- : MarkingConfig(stack_state, IncrementalMarking::kDisabled,
- ConcurrentMarking::kDisabled) {}
-
- MarkingConfig(StackState stack_state,
- IncrementalMarking incremental_marking_state,
- ConcurrentMarking concurrent_marking_state)
- : stack_state_(stack_state),
- incremental_marking_state_(incremental_marking_state),
- concurrent_marking_state_(concurrent_marking_state) {}
-
- StackState stack_state_;
- IncrementalMarking incremental_marking_state_;
- ConcurrentMarking concurrent_marking_state_;
+ enum MarkingType : uint8_t {
+ kAtomic,
+ kIncremental,
+ kIncrementalAndConcurrent
+ };
+
+ static constexpr MarkingConfig Default() { return {}; }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
};
- explicit Marker(Heap* heap);
+ explicit Marker(HeapBase& heap);
virtual ~Marker();
Marker(const Marker&) = delete;
@@ -77,34 +82,56 @@ class V8_EXPORT_PRIVATE Marker {
// Initialize marking according to the given config. This method will
// trigger incremental/concurrent marking if needed.
void StartMarking(MarkingConfig config);
- // Finalize marking. This method stops incremental/concurrent marking
- // if exsists and performs atomic pause marking.
- void FinishMarking();
+
+ // Signals entering the atomic marking pause. The method
+ // - stops incremental/concurrent marking;
+ // - flushes back any in-construction worklists if needed;
+ // - Updates the MarkingConfig if the stack state has changed;
+ void EnterAtomicPause(MarkingConfig config);
+
+ // Makes marking progress.
+ virtual bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
+
+ // Signals leaving the atomic marking pause. This method expects no more
+ // objects to be marked and merely updates marking states if needed.
+ void LeaveAtomicPause();
+
+ // Combines:
+ // - EnterAtomicPause()
+ // - AdvanceMarkingWithDeadline()
+ // - LeaveAtomicPause()
+ void FinishMarking(MarkingConfig config);
void ProcessWeakness();
- Heap* heap() { return heap_; }
+ HeapBase& heap() { return heap_; }
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
return &not_fully_constructed_worklist_;
}
+ WriteBarrierWorklist* write_barrier_worklist() {
+ return &write_barrier_worklist_;
+ }
WeakCallbackWorklist* weak_callback_worklist() {
return &weak_callback_worklist_;
}
void ClearAllWorklistsForTesting();
+ MutatorThreadMarkingVisitor* GetMarkingVisitorForTesting() {
+ return marking_visitor_.get();
+ }
+
protected:
virtual std::unique_ptr<MutatorThreadMarkingVisitor>
CreateMutatorThreadMarkingVisitor();
- private:
void VisitRoots();
- bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
void FlushNotFullyConstructedObjects();
+ void MarkNotFullyConstructedObjects();
- Heap* const heap_;
+ HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
std::unique_ptr<MutatorThreadMarkingVisitor> marking_visitor_;
@@ -112,6 +139,7 @@ class V8_EXPORT_PRIVATE Marker {
MarkingWorklist marking_worklist_;
NotFullyConstructedWorklist not_fully_constructed_worklist_;
NotFullyConstructedWorklist previously_not_fully_constructed_worklist_;
+ WriteBarrierWorklist write_barrier_worklist_;
WeakCallbackWorklist weak_callback_worklist_;
};
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.cc b/chromium/v8/src/heap/cppgc/marking-visitor.cc
index 9647f9b3ca3..37d88e65ee3 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.cc
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.cc
@@ -5,8 +5,8 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "include/cppgc/garbage-collected.h"
-#include "include/cppgc/internal/accessors.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
@@ -17,13 +17,14 @@ bool MarkingVisitor::IsInConstruction(const HeapObjectHeader& header) {
return header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>();
}
-MarkingVisitor::MarkingVisitor(Marker* marking_handler, int task_id)
- : marker_(marking_handler),
- marking_worklist_(marking_handler->marking_worklist(), task_id),
- not_fully_constructed_worklist_(
- marking_handler->not_fully_constructed_worklist(), task_id),
- weak_callback_worklist_(marking_handler->weak_callback_worklist(),
- task_id) {}
+MarkingVisitor::MarkingVisitor(
+ HeapBase& heap, Marker::MarkingWorklist* marking_worklist,
+ Marker::NotFullyConstructedWorklist* not_fully_constructed_worklist,
+ Marker::WeakCallbackWorklist* weak_callback_worklist, int task_id)
+ : ConservativeTracingVisitor(heap, *heap.page_backend()),
+ marking_worklist_(marking_worklist, task_id),
+ not_fully_constructed_worklist_(not_fully_constructed_worklist, task_id),
+ weak_callback_worklist_(weak_callback_worklist, task_id) {}
void MarkingVisitor::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ +=
@@ -74,11 +75,22 @@ void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
// construction, then it should be reachable from the stack.
return;
}
- // Since weak roots arev only traced at the end of marking, we can execute
+ // Since weak roots are only traced at the end of marking, we can execute
// the callback instead of registering it.
weak_callback(LivenessBrokerFactory::Create(), weak_root);
}
+void MarkingVisitor::VisitPointer(const void* address) {
+ TraceConservativelyIfNeeded(address);
+}
+
+void MarkingVisitor::VisitConservatively(HeapObjectHeader& header,
+ TraceConservativelyCallback callback) {
+ MarkHeaderNoTracing(&header);
+ callback(this, header);
+ AccountMarkedBytes(header);
+}
+
void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
TraceDescriptor desc) {
DCHECK(header);
@@ -94,7 +106,7 @@ void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
bool MarkingVisitor::MarkHeaderNoTracing(HeapObjectHeader* header) {
DCHECK(header);
// A GC should only mark the objects that belong in its heap.
- DCHECK_EQ(marker_->heap(), BasePage::FromPayload(header)->heap());
+ DCHECK_EQ(&heap_, BasePage::FromPayload(header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
DCHECK(!header->IsFree());
@@ -114,30 +126,29 @@ void MarkingVisitor::FlushWorklists() {
}
void MarkingVisitor::DynamicallyMarkAddress(ConstAddress address) {
- for (auto* header : marker_->heap()->objects()) {
- if (address >= header->Payload() &&
- address < (header->Payload() + header->GetSize())) {
- header->TryMarkAtomic();
- }
+ HeapObjectHeader& header =
+ BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress(
+ const_cast<Address>(address));
+ DCHECK(!IsInConstruction(header));
+ if (MarkHeaderNoTracing(&header)) {
+ marking_worklist_.Push(
+ {reinterpret_cast<void*>(header.Payload()),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
- // TODO(chromium:1056170): Implement dynamically getting HeapObjectHeader
- // for handling previously_not_fully_constructed objects. Requires object
- // start bitmap.
}
-void MarkingVisitor::VisitPointer(const void* address) {
- for (auto* header : marker_->heap()->objects()) {
- if (address >= header->Payload() &&
- address < (header->Payload() + header->GetSize())) {
- header->TryMarkAtomic();
- }
- }
- // TODO(chromium:1056170): Implement proper conservative scanning for
- // on-stack objects. Requires page bloom filter.
+void MarkingVisitor::MarkObject(HeapObjectHeader& header) {
+ MarkHeader(
+ &header,
+ {header.Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
MutatorThreadMarkingVisitor::MutatorThreadMarkingVisitor(Marker* marker)
- : MarkingVisitor(marker, Marker::kMutatorThreadId) {}
+ : MarkingVisitor(marker->heap(), marker->marking_worklist(),
+ marker->not_fully_constructed_worklist(),
+ marker->weak_callback_worklist(),
+ Marker::kMutatorThreadId) {}
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.h b/chromium/v8/src/heap/cppgc/marking-visitor.h
index 33616b37844..50427162a14 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.h
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.h
@@ -8,20 +8,25 @@
#include "include/cppgc/source-location.h"
#include "include/cppgc/trace-trait.h"
#include "include/v8config.h"
+#include "src/base/macros.h"
+#include "src/heap/base/stack.h"
#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/stack.h"
#include "src/heap/cppgc/visitor.h"
namespace cppgc {
namespace internal {
-class MarkingVisitor : public VisitorBase, public StackVisitor {
+class BasePage;
+class HeapObjectHeader;
+
+class MarkingVisitor : public ConservativeTracingVisitor,
+ public heap::base::StackVisitor {
public:
- MarkingVisitor(Marker*, int);
+ MarkingVisitor(HeapBase&, Marker::MarkingWorklist*,
+ Marker::NotFullyConstructedWorklist*,
+ Marker::WeakCallbackWorklist*, int);
virtual ~MarkingVisitor() = default;
MarkingVisitor(const MarkingVisitor&) = delete;
@@ -30,6 +35,7 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
void FlushWorklists();
void DynamicallyMarkAddress(ConstAddress);
+ void MarkObject(HeapObjectHeader&);
void AccountMarkedBytes(const HeapObjectHeader&);
size_t marked_bytes() const { return marked_bytes_; }
@@ -43,7 +49,10 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
void VisitRoot(const void*, TraceDescriptor) override;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) override;
+ void VisitConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) override;
+ // StackMarker interface.
void VisitPointer(const void*) override;
private:
@@ -51,12 +60,11 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
bool MarkHeaderNoTracing(HeapObjectHeader*);
void RegisterWeakCallback(WeakCallback, const void*) override;
- Marker* const marker_;
Marker::MarkingWorklist::View marking_worklist_;
Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist_;
Marker::WeakCallbackWorklist::View weak_callback_worklist_;
- size_t marked_bytes_;
+ size_t marked_bytes_ = 0;
};
class V8_EXPORT_PRIVATE MutatorThreadMarkingVisitor : public MarkingVisitor {
diff --git a/chromium/v8/src/heap/cppgc/object-allocator-inl.h b/chromium/v8/src/heap/cppgc/object-allocator-inl.h
index 7d8d126d633..b75c296f51a 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator-inl.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator-inl.h
@@ -10,7 +10,7 @@
#include "src/base/logging.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
@@ -20,6 +20,7 @@ namespace cppgc {
namespace internal {
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
+ DCHECK(is_allocation_allowed());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
@@ -30,6 +31,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index) {
+ DCHECK(is_allocation_allowed());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.cc b/chromium/v8/src/heap/cppgc/object-allocator.cc
index df83d8ee9d3..b8203a1d8a2 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.cc
+++ b/chromium/v8/src/heap/cppgc/object-allocator.cc
@@ -4,36 +4,119 @@
#include "src/heap/cppgc/object-allocator.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/object-allocator-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
namespace internal {
namespace {
-void* AllocateLargeObject(RawHeap* raw_heap, LargePageSpace* space, size_t size,
+void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ DCHECK_LT(begin, end);
+
+ static constexpr auto kEntrySize = AgeTable::kEntrySizeInBytes;
+
+ const uintptr_t offset_begin = CagedHeap::OffsetFromAddress(begin);
+ const uintptr_t offset_end = CagedHeap::OffsetFromAddress(end);
+
+ const uintptr_t young_offset_begin = (begin == page->PayloadStart())
+ ? RoundDown(offset_begin, kEntrySize)
+ : RoundUp(offset_begin, kEntrySize);
+ const uintptr_t young_offset_end = (end == page->PayloadEnd())
+ ? RoundUp(offset_end, kEntrySize)
+ : RoundDown(offset_end, kEntrySize);
+
+ auto& age_table = page->heap()->caged_heap().local_data().age_table;
+ for (auto offset = young_offset_begin; offset < young_offset_end;
+ offset += AgeTable::kEntrySizeInBytes) {
+ age_table[offset] = AgeTable::Age::kYoung;
+ }
+
+ // Set to kUnknown the first and the last regions of the newly allocated
+ // linear buffer.
+ if (begin != page->PayloadStart() && !IsAligned(offset_begin, kEntrySize))
+ age_table[offset_begin] = AgeTable::Age::kUnknown;
+ if (end != page->PayloadEnd() && !IsAligned(offset_end, kEntrySize))
+ age_table[offset_end] = AgeTable::Age::kUnknown;
+#endif
+}
+
+void AddToFreeList(NormalPageSpace* space, Address start, size_t size) {
+ auto& free_list = space->free_list();
+ free_list.Add({start, size});
+ NormalPage::From(BasePage::FromPayload(start))
+ ->object_start_bitmap()
+ .SetBit(start);
+}
+
+void ReplaceLinearAllocationBuffer(NormalPageSpace* space,
+ StatsCollector* stats_collector,
+ Address new_buffer, size_t new_size) {
+ DCHECK_NOT_NULL(space);
+ DCHECK_NOT_NULL(stats_collector);
+
+ auto& lab = space->linear_allocation_buffer();
+ if (lab.size()) {
+ AddToFreeList(space, lab.start(), lab.size());
+ stats_collector->NotifyExplicitFree(lab.size());
+ }
+
+ lab.Set(new_buffer, new_size);
+ if (new_size) {
+ DCHECK_NOT_NULL(new_buffer);
+ stats_collector->NotifyAllocation(new_size);
+ auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
+ page->object_start_bitmap().ClearBit(new_buffer);
+ MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
+ }
+}
+
+void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
+ StatsCollector* stats_collector, size_t size,
GCInfoIndex gcinfo) {
- LargePage* page = LargePage::Create(space, size);
+ LargePage* page = LargePage::Create(page_backend, space, size);
+ space->AddPage(page);
+
auto* header = new (page->ObjectHeader())
HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
+ stats_collector->NotifyAllocation(size);
+ MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
+
return header->Payload();
}
} // namespace
-ObjectAllocator::ObjectAllocator(RawHeap* heap) : raw_heap_(heap) {}
+ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
+ StatsCollector* stats_collector)
+ : raw_heap_(heap),
+ page_backend_(page_backend),
+ stats_collector_(stats_collector) {}
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo) {
+ void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
+ stats_collector_->NotifySafePointForConservativeCollection();
+ return memory;
+}
+
+void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
+ size_t size, GCInfoIndex gcinfo) {
DCHECK_EQ(0, size & kAllocationMask);
DCHECK_LE(kFreeListEntrySize, size);
@@ -41,7 +124,8 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
if (size >= kLargeObjectSizeThreshold) {
auto* large_space = LargePageSpace::From(
raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
- return AllocateLargeObject(raw_heap_, large_space, size, gcinfo);
+ return AllocateLargeObject(page_backend_, large_space, stats_collector_,
+ size, gcinfo);
}
// 2. Try to allocate from the freelist.
@@ -57,11 +141,17 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
raw_heap_->heap()->sweeper().Finish();
// 5. Add a new page to this heap.
- NormalPage::Create(space);
+ auto* new_page = NormalPage::Create(page_backend_, space);
+ space->AddPage(new_page);
+
+ // 6. Set linear allocation buffer to new page.
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
+ new_page->PayloadStart(),
+ new_page->PayloadSize());
- // 6. Try to allocate from the freelist. This allocation must succeed.
- void* result = AllocateFromFreeList(space, size, gcinfo);
- CPPGC_CHECK(result);
+ // 7. Allocate from it. The allocation must succeed.
+ void* result = AllocateObjectOnSpace(space, size, gcinfo);
+ CHECK(result);
return result;
}
@@ -71,17 +161,40 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace* space, size_t size,
const FreeList::Block entry = space->free_list().Allocate(size);
if (!entry.address) return nullptr;
- auto& current_lab = space->linear_allocation_buffer();
- if (current_lab.size()) {
- space->AddToFreeList(current_lab.start(), current_lab.size());
- }
+ ReplaceLinearAllocationBuffer(
+ space, stats_collector_, static_cast<Address>(entry.address), entry.size);
- current_lab.Set(static_cast<Address>(entry.address), entry.size);
- NormalPage::From(BasePage::FromPayload(current_lab.start()))
- ->object_start_bitmap()
- .ClearBit(current_lab.start());
return AllocateObjectOnSpace(space, size, gcinfo);
}
+void ObjectAllocator::ResetLinearAllocationBuffers() {
+ class Resetter : public HeapVisitor<Resetter> {
+ public:
+ explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}
+
+ bool VisitLargePageSpace(LargePageSpace*) { return true; }
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
+ return true;
+ }
+
+ private:
+ StatsCollector* stats_collector_;
+ } visitor(stats_collector_);
+
+ visitor.Traverse(raw_heap_);
+}
+
+ObjectAllocator::NoAllocationScope::NoAllocationScope(
+ ObjectAllocator& allocator)
+ : allocator_(allocator) {
+ allocator.no_allocation_scope_++;
+}
+
+ObjectAllocator::NoAllocationScope::~NoAllocationScope() {
+ allocator_.no_allocation_scope_--;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.h b/chromium/v8/src/heap/cppgc/object-allocator.h
index 510a935f565..1536ed63730 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator.h
@@ -5,33 +5,70 @@
#ifndef V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
#define V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
+#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/gc-info.h"
+#include "include/cppgc/macros.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
+
+class V8_EXPORT AllocationHandle {
+ private:
+ AllocationHandle() = default;
+ friend class internal::ObjectAllocator;
+};
+
namespace internal {
-class V8_EXPORT_PRIVATE ObjectAllocator final {
+class StatsCollector;
+class PageBackend;
+
+class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
- explicit ObjectAllocator(RawHeap* heap);
+ // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
+ // allocations during GC.
+ class V8_EXPORT_PRIVATE NoAllocationScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoAllocationScope(ObjectAllocator&);
+ ~NoAllocationScope();
+
+ NoAllocationScope(const NoAllocationScope&) = delete;
+ NoAllocationScope& operator=(const NoAllocationScope&) = delete;
+
+ private:
+ ObjectAllocator& allocator_;
+ };
+
+ ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
+ StatsCollector* stats_collector);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index);
+ void ResetLinearAllocationBuffers();
+
private:
// Returns the initially tried SpaceType to allocate an object of |size| bytes
// on. Returns the largest regular object size bucket for large objects.
inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
size_t size);
+ bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
+
inline void* AllocateObjectOnSpace(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo);
void* OutOfLineAllocate(NormalPageSpace*, size_t, GCInfoIndex);
+ void* OutOfLineAllocateImpl(NormalPageSpace*, size_t, GCInfoIndex);
void* AllocateFromFreeList(NormalPageSpace*, size_t, GCInfoIndex);
RawHeap* raw_heap_;
+ PageBackend* page_backend_;
+ StatsCollector* stats_collector_;
+ size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h b/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
index 93243979aac..6d963cc9486 100644
--- a/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
+++ b/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
@@ -19,6 +19,7 @@ ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
+ DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
size_t object_offset =
address_maybe_pointing_to_the_middle_of_object - offset_;
size_t object_start_number = object_offset / kAllocationGranularity;
diff --git a/chromium/v8/src/heap/cppgc/page-memory-inl.h b/chromium/v8/src/heap/cppgc/page-memory-inl.h
index 23ce061b435..8b2022eeb26 100644
--- a/chromium/v8/src/heap/cppgc/page-memory-inl.h
+++ b/chromium/v8/src/heap/cppgc/page-memory-inl.h
@@ -16,19 +16,19 @@ inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
return kGuardPageSize % allocator->CommitPageSize() == 0;
}
-Address NormalPageMemoryRegion::Lookup(Address address) const {
+Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
size_t index = GetIndex(address);
if (!page_memories_in_use_[index]) return nullptr;
const MemoryRegion writeable_region = GetPageMemory(index).writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
-Address LargePageMemoryRegion::Lookup(Address address) const {
+Address LargePageMemoryRegion::Lookup(ConstAddress address) const {
const MemoryRegion writeable_region = GetPageMemory().writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
-Address PageMemoryRegion::Lookup(Address address) const {
+Address PageMemoryRegion::Lookup(ConstAddress address) const {
DCHECK(reserved_region().Contains(address));
return is_large()
? static_cast<const LargePageMemoryRegion*>(this)->Lookup(address)
@@ -36,7 +36,7 @@ Address PageMemoryRegion::Lookup(Address address) const {
address);
}
-PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
+PageMemoryRegion* PageMemoryRegionTree::Lookup(ConstAddress address) const {
auto it = set_.upper_bound(address);
// This check also covers set_.size() > 0, since for empty vectors it is
// guaranteed that begin() == end().
@@ -46,7 +46,7 @@ PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
return nullptr;
}
-Address PageBackend::Lookup(Address address) const {
+Address PageBackend::Lookup(ConstAddress address) const {
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
return pmr ? pmr->Lookup(address) : nullptr;
}
diff --git a/chromium/v8/src/heap/cppgc/page-memory.h b/chromium/v8/src/heap/cppgc/page-memory.h
index f3bc685fa31..b7f1917be7f 100644
--- a/chromium/v8/src/heap/cppgc/page-memory.h
+++ b/chromium/v8/src/heap/cppgc/page-memory.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE MemoryRegion final {
size_t size() const { return size_; }
Address end() const { return base_ + size_; }
- bool Contains(Address addr) const {
+ bool Contains(ConstAddress addr) const {
return (reinterpret_cast<uintptr_t>(addr) -
reinterpret_cast<uintptr_t>(base_)) < size_;
}
@@ -70,7 +70,7 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
// Lookup writeable base for an |address| that's contained in
// PageMemoryRegion. Filters out addresses that are contained in non-writeable
// regions (e.g. guard pages).
- inline Address Lookup(Address address) const;
+ inline Address Lookup(ConstAddress address) const;
// Disallow copy/move.
PageMemoryRegion(const PageMemoryRegion&) = delete;
@@ -111,7 +111,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
// protection.
void Free(Address);
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
void UnprotectForTesting() final;
@@ -122,7 +122,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
page_memories_in_use_[index] = value;
}
- size_t GetIndex(Address address) const {
+ size_t GetIndex(ConstAddress address) const {
return static_cast<size_t>(address - reserved_region().base()) >>
kPageSizeLog2;
}
@@ -143,7 +143,7 @@ class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
reserved_region().size() - 2 * kGuardPageSize));
}
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
void UnprotectForTesting() final;
};
@@ -161,10 +161,10 @@ class V8_EXPORT_PRIVATE PageMemoryRegionTree final {
void Add(PageMemoryRegion*);
void Remove(PageMemoryRegion*);
- inline PageMemoryRegion* Lookup(Address) const;
+ inline PageMemoryRegion* Lookup(ConstAddress) const;
private:
- std::map<Address, PageMemoryRegion*> set_;
+ std::map<ConstAddress, PageMemoryRegion*> set_;
};
// A pool of PageMemory objects represented by the writeable base addresses.
@@ -216,7 +216,7 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns the writeable base if |address| is contained in a valid page
// memory.
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
// Disallow copy/move.
PageBackend(const PageBackend&) = delete;
diff --git a/chromium/v8/src/heap/cppgc/persistent-node.cc b/chromium/v8/src/heap/cppgc/persistent-node.cc
index 299cefc5210..9c5113f86a2 100644
--- a/chromium/v8/src/heap/cppgc/persistent-node.cc
+++ b/chromium/v8/src/heap/cppgc/persistent-node.cc
@@ -7,9 +7,21 @@
#include <algorithm>
#include <numeric>
+#include "include/cppgc/persistent.h"
+
namespace cppgc {
namespace internal {
+PersistentRegion::~PersistentRegion() {
+ for (auto& slots : nodes_) {
+ for (auto& node : *slots) {
+ if (node.IsUsed()) {
+ static_cast<PersistentBase*>(node.owner())->ClearFromGC();
+ }
+ }
+ }
+}
+
size_t PersistentRegion::NodesInUse() const {
return std::accumulate(
nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
diff --git a/chromium/v8/src/heap/cppgc/platform.cc b/chromium/v8/src/heap/cppgc/platform.cc
index 3b20060392d..e96d69b2257 100644
--- a/chromium/v8/src/heap/cppgc/platform.cc
+++ b/chromium/v8/src/heap/cppgc/platform.cc
@@ -8,18 +8,12 @@
#include "src/heap/cppgc/gc-info-table.h"
namespace cppgc {
-namespace internal {
-
-static PageAllocator* g_page_allocator;
-
-} // namespace internal
-void InitializePlatform(PageAllocator* page_allocator) {
- internal::g_page_allocator = page_allocator;
+void InitializeProcess(PageAllocator* page_allocator) {
internal::GlobalGCInfoTable::Create(page_allocator);
}
-void ShutdownPlatform() { internal::g_page_allocator = nullptr; }
+void ShutdownProcess() {}
namespace internal {
diff --git a/chromium/v8/src/heap/cppgc/pointer-policies.cc b/chromium/v8/src/heap/cppgc/pointer-policies.cc
index e9dfcecdf3e..5048d1bd59f 100644
--- a/chromium/v8/src/heap/cppgc/pointer-policies.cc
+++ b/chromium/v8/src/heap/cppgc/pointer-policies.cc
@@ -3,10 +3,10 @@
// found in the LICENSE file.
#include "include/cppgc/internal/pointer-policies.h"
-#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/internal/persistent-node.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
index 40107c15262..c28cedfbab9 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
@@ -15,14 +16,16 @@ namespace internal {
// static
void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
- cppgc::Heap* heap, PreFinalizer prefinalzier) {
- internal::Heap::From(heap)->prefinalizer_handler()->RegisterPrefinalizer(
- prefinalzier);
+ PreFinalizer pre_finalizer) {
+ BasePage::FromPayload(pre_finalizer.object)
+ ->heap()
+ ->prefinalizer_handler()
+ ->RegisterPrefinalizer(pre_finalizer);
}
bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
const PreFinalizer& other) {
- return (object_ == other.object_) && (callback_ == other.callback_);
+ return (object == other.object) && (callback == other.callback);
}
PreFinalizerHandler::PreFinalizerHandler()
@@ -32,12 +35,12 @@ PreFinalizerHandler::PreFinalizerHandler()
{
}
-void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer prefinalizer) {
+void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
DCHECK(CurrentThreadIsCreationThread());
DCHECK_EQ(ordered_pre_finalizers_.end(),
std::find(ordered_pre_finalizers_.begin(),
- ordered_pre_finalizers_.end(), prefinalizer));
- ordered_pre_finalizers_.push_back(prefinalizer);
+ ordered_pre_finalizers_.end(), pre_finalizer));
+ ordered_pre_finalizers_.push_back(pre_finalizer);
}
void PreFinalizerHandler::InvokePreFinalizers() {
@@ -48,7 +51,7 @@ void PreFinalizerHandler::InvokePreFinalizers() {
std::remove_if(ordered_pre_finalizers_.rbegin(),
ordered_pre_finalizers_.rend(),
[liveness_broker](const PreFinalizer& pf) {
- return (pf.callback_)(liveness_broker, pf.object_);
+ return (pf.callback)(liveness_broker, pf.object);
})
.base());
ordered_pre_finalizers_.shrink_to_fit();
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
index a6255534710..15d24e862cf 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -19,7 +19,7 @@ class PreFinalizerHandler final {
PreFinalizerHandler();
- void RegisterPrefinalizer(PreFinalizer prefinalzier);
+ void RegisterPrefinalizer(PreFinalizer pre_finalizer);
void InvokePreFinalizers();
diff --git a/chromium/v8/src/heap/cppgc/process-heap.cc b/chromium/v8/src/heap/cppgc/process-heap.cc
new file mode 100644
index 00000000000..14089883967
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/process-heap.cc
@@ -0,0 +1,13 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/process-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+AtomicEntryFlag ProcessHeap::concurrent_marking_flag_;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/raw-heap.cc b/chromium/v8/src/heap/cppgc/raw-heap.cc
index cf7311b46f2..19200ae8a20 100644
--- a/chromium/v8/src/heap/cppgc/raw-heap.cc
+++ b/chromium/v8/src/heap/cppgc/raw-heap.cc
@@ -12,7 +12,7 @@ namespace internal {
// static
constexpr size_t RawHeap::kNumberOfRegularSpaces;
-RawHeap::RawHeap(Heap* heap, size_t custom_spaces) : main_heap_(heap) {
+RawHeap::RawHeap(HeapBase* heap, size_t custom_spaces) : main_heap_(heap) {
size_t i = 0;
for (; i < static_cast<size_t>(RegularSpaceType::kLarge); ++i) {
spaces_.push_back(std::make_unique<NormalPageSpace>(this, i));
diff --git a/chromium/v8/src/heap/cppgc/raw-heap.h b/chromium/v8/src/heap/cppgc/raw-heap.h
index 0591fa87ab7..e63fc32c439 100644
--- a/chromium/v8/src/heap/cppgc/raw-heap.h
+++ b/chromium/v8/src/heap/cppgc/raw-heap.h
@@ -16,7 +16,7 @@
namespace cppgc {
namespace internal {
-class Heap;
+class HeapBase;
class BaseSpace;
// RawHeap is responsible for space management.
@@ -47,7 +47,7 @@ class V8_EXPORT_PRIVATE RawHeap final {
using iterator = Spaces::iterator;
using const_iterator = Spaces::const_iterator;
- explicit RawHeap(Heap* heap, size_t custom_spaces);
+ explicit RawHeap(HeapBase* heap, size_t custom_spaces);
~RawHeap();
// Space iteration support.
@@ -77,8 +77,8 @@ class V8_EXPORT_PRIVATE RawHeap final {
return const_cast<RawHeap&>(*this).CustomSpace(space_index);
}
- Heap* heap() { return main_heap_; }
- const Heap* heap() const { return main_heap_; }
+ HeapBase* heap() { return main_heap_; }
+ const HeapBase* heap() const { return main_heap_; }
private:
size_t SpaceIndexForCustomSpace(CustomSpaceIndex space_index) const {
@@ -96,7 +96,7 @@ class V8_EXPORT_PRIVATE RawHeap final {
return const_cast<RawHeap&>(*this).Space(space_index);
}
- Heap* main_heap_;
+ HeapBase* main_heap_;
Spaces spaces_;
};
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.cc b/chromium/v8/src/heap/cppgc/stats-collector.cc
new file mode 100644
index 00000000000..a92aba021d7
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/stats-collector.cc
@@ -0,0 +1,114 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/stats-collector.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+constexpr size_t StatsCollector::kAllocationThresholdBytes;
+
+void StatsCollector::RegisterObserver(AllocationObserver* observer) {
+ DCHECK_EQ(allocation_observers_.end(),
+ std::find(allocation_observers_.begin(),
+ allocation_observers_.end(), observer));
+ allocation_observers_.push_back(observer);
+}
+
+void StatsCollector::UnregisterObserver(AllocationObserver* observer) {
+ auto it = std::find(allocation_observers_.begin(),
+ allocation_observers_.end(), observer);
+ DCHECK_NE(allocation_observers_.end(), it);
+ allocation_observers_.erase(it);
+}
+
+void StatsCollector::NotifyAllocation(size_t bytes) {
+ // The current GC may not have been started. This is ok as recording considers
+ // the whole time range between garbage collections.
+ allocated_bytes_since_safepoint_ += bytes;
+}
+
+void StatsCollector::NotifyExplicitFree(size_t bytes) {
+ // See IncreaseAllocatedObjectSize for lifetime of the counter.
+ explicitly_freed_bytes_since_safepoint_ += bytes;
+}
+
+void StatsCollector::NotifySafePointForConservativeCollection() {
+ if (std::abs(allocated_bytes_since_safepoint_ -
+ explicitly_freed_bytes_since_safepoint_) >=
+ static_cast<int64_t>(kAllocationThresholdBytes)) {
+ AllocatedObjectSizeSafepointImpl();
+ }
+}
+
+void StatsCollector::AllocatedObjectSizeSafepointImpl() {
+ allocated_bytes_since_end_of_marking_ +=
+ static_cast<int64_t>(allocated_bytes_since_safepoint_) -
+ static_cast<int64_t>(explicitly_freed_bytes_since_safepoint_);
+
+ // These observer methods may start or finalize GC. In case they trigger a
+ // final GC pause, the delta counters are reset there and the following
+ // observer calls are called with '0' updates.
+ ForAllAllocationObservers([this](AllocationObserver* observer) {
+ // Recompute delta here so that a GC finalization is able to clear the
+ // delta for other observer calls.
+ int64_t delta = allocated_bytes_since_safepoint_ -
+ explicitly_freed_bytes_since_safepoint_;
+ if (delta < 0) {
+ observer->AllocatedObjectSizeDecreased(static_cast<size_t>(-delta));
+ } else {
+ observer->AllocatedObjectSizeIncreased(static_cast<size_t>(delta));
+ }
+ });
+ allocated_bytes_since_safepoint_ = 0;
+ explicitly_freed_bytes_since_safepoint_ = 0;
+}
+
+void StatsCollector::NotifyMarkingStarted() {
+ DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
+ gc_state_ = GarbageCollectionState::kMarking;
+}
+
+void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
+ DCHECK_EQ(GarbageCollectionState::kMarking, gc_state_);
+ gc_state_ = GarbageCollectionState::kSweeping;
+ current_.marked_bytes = marked_bytes;
+ allocated_bytes_since_end_of_marking_ = 0;
+ allocated_bytes_since_safepoint_ = 0;
+ explicitly_freed_bytes_since_safepoint_ = 0;
+
+ ForAllAllocationObservers([marked_bytes](AllocationObserver* observer) {
+ observer->ResetAllocatedObjectSize(marked_bytes);
+ });
+}
+
+const StatsCollector::Event& StatsCollector::NotifySweepingCompleted() {
+ DCHECK_EQ(GarbageCollectionState::kSweeping, gc_state_);
+ gc_state_ = GarbageCollectionState::kNotRunning;
+ previous_ = std::move(current_);
+ current_ = Event();
+ return previous_;
+}
+
+size_t StatsCollector::allocated_object_size() const {
+ // During sweeping we refer to the current Event as that already holds the
+ // correct marking information. In all other phases, the previous event holds
+ // the most up-to-date marking information.
+ const Event& event =
+ gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
+ DCHECK_GE(static_cast<int64_t>(event.marked_bytes) +
+ allocated_bytes_since_end_of_marking_,
+ 0);
+ return static_cast<size_t>(static_cast<int64_t>(event.marked_bytes) +
+ allocated_bytes_since_end_of_marking_);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.h b/chromium/v8/src/heap/cppgc/stats-collector.h
new file mode 100644
index 00000000000..cc122a17dd5
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/stats-collector.h
@@ -0,0 +1,130 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_STATS_COLLECTOR_H_
+#define V8_HEAP_CPPGC_STATS_COLLECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+// Sink for various time and memory statistics.
+class V8_EXPORT_PRIVATE StatsCollector final {
+ public:
+ // POD to hold interesting data accumulated during a garbage collection cycle.
+ //
+ // The event is always fully populated when looking at previous events but
+ // may only be partially populated when looking at the current event.
+ struct Event final {
+ // Marked bytes collected during marking.
+ size_t marked_bytes = 0;
+ };
+
+ // Observer for allocated object size. May be used to implement heap growing
+ // heuristics.
+ class AllocationObserver {
+ public:
+ // Called after observing at least
+ // StatsCollector::kAllocationThresholdBytes changed bytes through
+ // allocation or explicit free. Reports both, negative and positive
+ // increments, to allow observer to decide whether absolute values or only
+ // the deltas is interesting.
+ //
+ // May trigger GC.
+ virtual void AllocatedObjectSizeIncreased(size_t) = 0;
+ virtual void AllocatedObjectSizeDecreased(size_t) = 0;
+
+ // Called when the exact size of allocated object size is known. In
+ // practice, this is after marking when marked bytes == allocated bytes.
+ //
+ // Must not trigger GC synchronously.
+ virtual void ResetAllocatedObjectSize(size_t) = 0;
+ };
+
+ // Observers are implemented using virtual calls. Avoid notifications below
+ // reasonably interesting sizes.
+ static constexpr size_t kAllocationThresholdBytes = 1024;
+
+ StatsCollector() = default;
+ StatsCollector(const StatsCollector&) = delete;
+ StatsCollector& operator=(const StatsCollector&) = delete;
+
+ void RegisterObserver(AllocationObserver*);
+ void UnregisterObserver(AllocationObserver*);
+
+ void NotifyAllocation(size_t);
+ void NotifyExplicitFree(size_t);
+ // Safepoints should only be invoked when garabge collections are possible.
+ // This is necessary as increments and decrements are reported as close to
+ // their actual allocation/reclamation as possible.
+ void NotifySafePointForConservativeCollection();
+
+ // Indicates a new garbage collection cycle.
+ void NotifyMarkingStarted();
+ // Indicates that marking of the current garbage collection cycle is
+ // completed.
+ void NotifyMarkingCompleted(size_t marked_bytes);
+ // Indicates the end of a garbage collection cycle. This means that sweeping
+ // is finished at this point.
+ const Event& NotifySweepingCompleted();
+
+ // Size of live objects in bytes on the heap. Based on the most recent marked
+ // bytes and the bytes allocated since last marking.
+ size_t allocated_object_size() const;
+
+ private:
+ enum class GarbageCollectionState : uint8_t {
+ kNotRunning,
+ kMarking,
+ kSweeping
+ };
+
+ // Invokes |callback| for all registered observers.
+ template <typename Callback>
+ void ForAllAllocationObservers(Callback callback);
+
+ void AllocatedObjectSizeSafepointImpl();
+
+ // Allocated bytes since the end of marking. These bytes are reset after
+ // marking as they are accounted in marked_bytes then. May be negative in case
+ // an object was explicitly freed that was marked as live in the previous
+ // cycle.
+ int64_t allocated_bytes_since_end_of_marking_ = 0;
+ // Counters for allocation and free. The individual values are never negative
+ // but their delta may be because of the same reason the overall
+ // allocated_bytes_since_end_of_marking_ may be negative. Keep integer
+ // arithmetic for simplicity.
+ int64_t allocated_bytes_since_safepoint_ = 0;
+ int64_t explicitly_freed_bytes_since_safepoint_ = 0;
+
+ // vector to allow fast iteration of observers. Register/Unregisters only
+ // happens on startup/teardown.
+ std::vector<AllocationObserver*> allocation_observers_;
+
+ GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning;
+
+ // The event being filled by the current GC cycle between NotifyMarkingStarted
+ // and NotifySweepingFinished.
+ Event current_;
+ // The previous GC event which is populated at NotifySweepingFinished.
+ Event previous_;
+};
+
+template <typename Callback>
+void StatsCollector::ForAllAllocationObservers(Callback callback) {
+ for (AllocationObserver* observer : allocation_observers_) {
+ callback(observer);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_STATS_COLLECTOR_H_
diff --git a/chromium/v8/src/heap/cppgc/sweeper.cc b/chromium/v8/src/heap/cppgc/sweeper.cc
index 77d2d3c33e7..98a3117a2d4 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.cc
+++ b/chromium/v8/src/heap/cppgc/sweeper.cc
@@ -4,8 +4,13 @@
#include "src/heap/cppgc/sweeper.h"
+#include <atomic>
+#include <memory>
#include <vector>
+#include "include/cppgc/platform.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
@@ -17,12 +22,16 @@
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sanitizers.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
namespace internal {
namespace {
+using v8::base::Optional;
+
class ObjectStartBitmapVerifier
: private HeapVisitor<ObjectStartBitmapVerifier> {
friend class HeapVisitor<ObjectStartBitmapVerifier>;
@@ -54,15 +63,126 @@ class ObjectStartBitmapVerifier
HeapObjectHeader* prev_ = nullptr;
};
+template <typename T>
+class ThreadSafeStack {
+ public:
+ ThreadSafeStack() = default;
+
+ void Push(T t) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.push_back(std::move(t));
+ }
+
+ Optional<T> Pop() {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ if (vector_.empty()) return v8::base::nullopt;
+ T top = std::move(vector_.back());
+ vector_.pop_back();
+ // std::move is redundant but is needed to avoid the bug in gcc-7.
+ return std::move(top);
+ }
+
+ template <typename It>
+ void Insert(It begin, It end) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.insert(vector_.end(), begin, end);
+ }
+
+ bool IsEmpty() const {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ return vector_.empty();
+ }
+
+ private:
+ std::vector<T> vector_;
+ mutable v8::base::Mutex mutex_;
+};
+
struct SpaceState {
- BaseSpace::Pages unswept_pages;
+ struct SweptPageState {
+ BasePage* page = nullptr;
+ std::vector<HeapObjectHeader*> unfinalized_objects;
+ FreeList cached_free_list;
+ std::vector<FreeList::Block> unfinalized_free_list;
+ bool is_empty = false;
+ };
+
+ ThreadSafeStack<BasePage*> unswept_pages;
+ ThreadSafeStack<SweptPageState> swept_unfinalized_pages;
};
+
using SpaceStates = std::vector<SpaceState>;
-bool SweepNormalPage(NormalPage* page) {
+void StickyUnmark(HeapObjectHeader* header) {
+ // Young generation in Oilpan uses sticky mark bits.
+#if !defined(CPPGC_YOUNG_GENERATION)
+ header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+#endif
+}
+
+// Builder that finalizes objects and adds freelist entries right away.
+class InlinedFinalizationBuilder final {
+ public:
+ using ResultType = bool;
+
+ explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ header->Finalize();
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ auto* space = NormalPageSpace::From(page_->space());
+ space->free_list().Add({start, size});
+ }
+
+ ResultType GetResult(bool is_empty) { return is_empty; }
+
+ private:
+ BasePage* page_;
+};
+
+// Builder that produces results for deferred processing.
+class DeferredFinalizationBuilder final {
+ public:
+ using ResultType = SpaceState::SweptPageState;
+
+ explicit DeferredFinalizationBuilder(BasePage* page) { result_.page = page; }
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ if (header->IsFinalizable()) {
+ result_.unfinalized_objects.push_back({header});
+ found_finalizer_ = true;
+ } else {
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ if (found_finalizer_) {
+ result_.unfinalized_free_list.push_back({start, size});
+ } else {
+ result_.cached_free_list.Add({start, size});
+ }
+ found_finalizer_ = false;
+ }
+
+ ResultType&& GetResult(bool is_empty) {
+ result_.is_empty = is_empty;
+ return std::move(result_);
+ }
+
+ private:
+ ResultType result_;
+ bool found_finalizer_ = false;
+};
+
+template <typename FinalizationBuilder>
+typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+ FinalizationBuilder builder(page);
- auto* space = NormalPageSpace::From(page->space());
ObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
@@ -79,18 +199,18 @@ bool SweepNormalPage(NormalPage* page) {
}
// Check if object is not marked (not reachable).
if (!header->IsMarked<kAtomicAccess>()) {
- header->Finalize();
- SET_MEMORY_INACCESIBLE(header, size);
+ builder.AddFinalizer(header, size);
begin += size;
continue;
}
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
- space->AddToFreeList(start_of_gap,
- static_cast<size_t>(header_address - start_of_gap));
+ builder.AddFreeListEntry(
+ start_of_gap, static_cast<size_t>(header_address - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
- header->Unmark<kAtomicAccess>();
+ StickyUnmark(header);
bitmap.SetBit(begin);
begin += size;
start_of_gap = begin;
@@ -98,56 +218,150 @@ bool SweepNormalPage(NormalPage* page) {
if (start_of_gap != page->PayloadStart() &&
start_of_gap != page->PayloadEnd()) {
- space->AddToFreeList(
+ builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
const bool is_empty = (start_of_gap == page->PayloadStart());
- return is_empty;
+ return builder.GetResult(is_empty);
}
-// This visitor:
-// - resets linear allocation buffers and clears free lists for all spaces;
-// - moves all Heap pages to local Sweeper's state (SpaceStates).
-class PrepareForSweepVisitor final
- : public HeapVisitor<PrepareForSweepVisitor> {
+// SweepFinalizer is responsible for heap/space/page finalization. Finalization
+// is defined as a step following concurrent sweeping which:
+// - calls finalizers;
+// - returns (unmaps) empty pages;
+// - merges freelists to the space's freelist.
+class SweepFinalizer final {
public:
- explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+ explicit SweepFinalizer(cppgc::Platform* platform) : platform_(platform) {}
- bool VisitNormalPageSpace(NormalPageSpace* space) {
- space->ResetLinearAllocationBuffer();
- space->free_list().Clear();
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
- return true;
+ void FinalizeHeap(SpaceStates* space_states) {
+ for (SpaceState& space_state : *space_states) {
+ FinalizeSpace(&space_state);
+ }
}
- bool VisitLargePageSpace(LargePageSpace* space) {
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+ void FinalizeSpace(SpaceState* space_state) {
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+ }
+ }
+
+ bool FinalizeSpaceWithDeadline(SpaceState* space_state,
+ double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+
+ page_count++;
+ }
return true;
}
+ void FinalizePage(SpaceState::SweptPageState* page_state) {
+ DCHECK(page_state);
+ DCHECK(page_state->page);
+ BasePage* page = page_state->page;
+
+ // Call finalizers.
+ for (HeapObjectHeader* object : page_state->unfinalized_objects) {
+ object->Finalize();
+ }
+
+ // Unmap page if empty.
+ if (page_state->is_empty) {
+ BasePage::Destroy(page);
+ return;
+ }
+
+ DCHECK(!page->is_large());
+
+ // Merge freelists without finalizers.
+ FreeList& space_freelist =
+ NormalPageSpace::From(page->space())->free_list();
+ space_freelist.Append(std::move(page_state->cached_free_list));
+
+ // Merge freelist with finalizers.
+ for (auto entry : page_state->unfinalized_free_list) {
+ space_freelist.Add(std::move(entry));
+ }
+
+ // Add the page to the space.
+ page->space()->AddPage(page);
+ }
+
private:
- SpaceStates* states_;
+ cppgc::Platform* platform_;
};
-class MutatorThreadSweepVisitor final
- : private HeapVisitor<MutatorThreadSweepVisitor> {
- friend class HeapVisitor<MutatorThreadSweepVisitor>;
+class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
+ friend class HeapVisitor<MutatorThreadSweeper>;
public:
- explicit MutatorThreadSweepVisitor(SpaceStates* space_states) {
- for (SpaceState& state : *space_states) {
- for (BasePage* page : state.unswept_pages) {
- Traverse(page);
+ explicit MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform)
+ : states_(states), platform_(platform) {}
+
+ void Sweep() {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ }
+ }
+ }
+
+ bool SweepWithDeadline(double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr double kSlackInSeconds = 0.001;
+ for (SpaceState& state : *states_) {
+ // FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
+ // the deadline until it sweeps 10 pages. So we give a small slack for
+ // safety.
+ const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
+ platform_->MonotonicallyIncreasingTime();
+ if (remaining_budget <= 0.) return false;
+
+ // First, prioritize finalization of pages that were swept concurrently.
+ SweepFinalizer finalizer(platform_);
+ if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
+ }
+
+ // Help out the concurrent sweeper.
+ if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
}
- state.unswept_pages.clear();
}
+ return true;
}
private:
+ bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+ while (auto page = state->unswept_pages.Pop()) {
+ Traverse(*page);
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+ page_count++;
+ }
+
+ return true;
+ }
+
bool VisitNormalPage(NormalPage* page) {
- const bool is_empty = SweepNormalPage(page);
+ const bool is_empty = SweepNormalPage<InlinedFinalizationBuilder>(page);
if (is_empty) {
NormalPage::Destroy(page);
} else {
@@ -157,23 +371,119 @@ class MutatorThreadSweepVisitor final
}
bool VisitLargePage(LargePage* page) {
- if (page->ObjectHeader()->IsMarked()) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
page->space()->AddPage(page);
} else {
- page->ObjectHeader()->Finalize();
+ header->Finalize();
LargePage::Destroy(page);
}
return true;
}
+
+ SpaceStates* states_;
+ cppgc::Platform* platform_;
+};
+
+class ConcurrentSweepTask final : public v8::JobTask,
+ private HeapVisitor<ConcurrentSweepTask> {
+ friend class HeapVisitor<ConcurrentSweepTask>;
+
+ public:
+ explicit ConcurrentSweepTask(SpaceStates* states) : states_(states) {}
+
+ void Run(v8::JobDelegate* delegate) final {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ if (delegate->ShouldYield()) return;
+ }
+ }
+ is_completed_.store(true, std::memory_order_relaxed);
+ }
+
+ size_t GetMaxConcurrency() const final {
+ return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
+ }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ SpaceState::SweptPageState sweep_result =
+ SweepNormalPage<DeferredFinalizationBuilder>(page);
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& space_state = (*states_)[space_index];
+ space_state.swept_unfinalized_pages.Push(std::move(sweep_result));
+ return true;
+ }
+
+ bool VisitLargePage(LargePage* page) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
+ page->space()->AddPage(page);
+ return true;
+ }
+ if (!header->IsFinalizable()) {
+ LargePage::Destroy(page);
+ return true;
+ }
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& state = (*states_)[space_index];
+ state.swept_unfinalized_pages.Push(
+ {page, {page->ObjectHeader()}, {}, {}, true});
+ return true;
+ }
+
+ SpaceStates* states_;
+ std::atomic_bool is_completed_{false};
+};
+
+// This visitor:
+// - resets linear allocation buffers and clears free lists for all spaces;
+// - moves all Heap pages to local Sweeper's state (SpaceStates).
+class PrepareForSweepVisitor final
+ : public HeapVisitor<PrepareForSweepVisitor> {
+ public:
+ explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ DCHECK(!space->linear_allocation_buffer().size());
+ space->free_list().Clear();
+ ExtractPages(space);
+ return true;
+ }
+
+ bool VisitLargePageSpace(LargePageSpace* space) {
+ ExtractPages(space);
+ return true;
+ }
+
+ private:
+ void ExtractPages(BaseSpace* space) {
+ BaseSpace::Pages space_pages = space->RemoveAllPages();
+ (*states_)[space->index()].unswept_pages.Insert(space_pages.begin(),
+ space_pages.end());
+ }
+
+ SpaceStates* states_;
};
} // namespace
class Sweeper::SweeperImpl final {
public:
- explicit SweeperImpl(RawHeap* heap) : heap_(heap) {
- space_states_.resize(heap_->size());
- }
+ SweeperImpl(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : heap_(heap),
+ stats_collector_(stats_collector),
+ space_states_(heap->size()),
+ platform_(platform),
+ foreground_task_runner_(platform_->GetForegroundTaskRunner()) {}
+
+ ~SweeperImpl() { CancelSweepers(); }
void Start(Config config) {
is_in_progress_ = true;
@@ -181,29 +491,114 @@ class Sweeper::SweeperImpl final {
ObjectStartBitmapVerifier().Verify(heap_);
#endif
PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+
if (config == Config::kAtomic) {
Finish();
} else {
DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
- // TODO(chromium:1056170): Schedule concurrent sweeping.
+ ScheduleIncrementalSweeping();
+ ScheduleConcurrentSweeping();
}
}
void Finish() {
if (!is_in_progress_) return;
- MutatorThreadSweepVisitor s(&space_states_);
+ // First, call finalizers on the mutator thread.
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+
+ // Then, help out the concurrent thread.
+ MutatorThreadSweeper sweeper(&space_states_, platform_);
+ sweeper.Sweep();
+
+ // Synchronize with the concurrent sweeper and call remaining finalizers.
+ SynchronizeAndFinalizeConcurrentSweeping();
is_in_progress_ = false;
+
+ stats_collector_->NotifySweepingCompleted();
}
private:
- SpaceStates space_states_;
+ class IncrementalSweepTask : public v8::IdleTask {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ explicit IncrementalSweepTask(SweeperImpl* sweeper)
+ : sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
+
+ static Handle Post(SweeperImpl* sweeper, v8::TaskRunner* runner) {
+ auto task = std::make_unique<IncrementalSweepTask>(sweeper);
+ auto handle = task->GetHandle();
+ runner->PostIdleTask(std::move(task));
+ return handle;
+ }
+
+ private:
+ void Run(double deadline_in_seconds) override {
+ if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
+
+ MutatorThreadSweeper sweeper(&sweeper_->space_states_,
+ sweeper_->platform_);
+ const bool sweep_complete =
+ sweeper.SweepWithDeadline(deadline_in_seconds);
+
+ if (sweep_complete) {
+ sweeper_->SynchronizeAndFinalizeConcurrentSweeping();
+ } else {
+ sweeper_->ScheduleIncrementalSweeping();
+ }
+ }
+
+ Handle GetHandle() const { return handle_; }
+
+ SweeperImpl* sweeper_;
+ // TODO(chromium:1056170): Change to CancelableTask.
+ Handle handle_;
+ };
+
+ void ScheduleIncrementalSweeping() {
+ if (!platform_ || !foreground_task_runner_) return;
+
+ incremental_sweeper_handle_ =
+ IncrementalSweepTask::Post(this, foreground_task_runner_.get());
+ }
+
+ void ScheduleConcurrentSweeping() {
+ if (!platform_) return;
+
+ concurrent_sweeper_handle_ = platform_->PostJob(
+ v8::TaskPriority::kUserVisible,
+ std::make_unique<ConcurrentSweepTask>(&space_states_));
+ }
+
+ void CancelSweepers() {
+ if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
+ if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Cancel();
+ }
+
+ void SynchronizeAndFinalizeConcurrentSweeping() {
+ CancelSweepers();
+
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+ }
+
RawHeap* heap_;
+ StatsCollector* stats_collector_;
+ SpaceStates space_states_;
+ cppgc::Platform* platform_;
+ std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
+ IncrementalSweepTask::Handle incremental_sweeper_handle_;
+ std::unique_ptr<v8::JobHandle> concurrent_sweeper_handle_;
bool is_in_progress_ = false;
};
-Sweeper::Sweeper(RawHeap* heap) : impl_(std::make_unique<SweeperImpl>(heap)) {}
+Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : impl_(std::make_unique<SweeperImpl>(heap, platform, stats_collector)) {}
+
Sweeper::~Sweeper() = default;
void Sweeper::Start(Config config) { impl_->Start(config); }
diff --git a/chromium/v8/src/heap/cppgc/sweeper.h b/chromium/v8/src/heap/cppgc/sweeper.h
index 3e387731686..6ce17ea8fc8 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.h
+++ b/chromium/v8/src/heap/cppgc/sweeper.h
@@ -10,20 +10,25 @@
#include "src/base/macros.h"
namespace cppgc {
+
+class Platform;
+
namespace internal {
+class StatsCollector;
class RawHeap;
class V8_EXPORT_PRIVATE Sweeper final {
public:
enum class Config { kAtomic, kIncrementalAndConcurrent };
- explicit Sweeper(RawHeap*);
+ Sweeper(RawHeap*, cppgc::Platform*, StatsCollector*);
~Sweeper();
Sweeper(const Sweeper&) = delete;
Sweeper& operator=(const Sweeper&) = delete;
+ // Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(Config);
void Finish();
diff --git a/chromium/v8/src/heap/cppgc/task-handle.h b/chromium/v8/src/heap/cppgc/task-handle.h
new file mode 100644
index 00000000000..cbd8cc4a61f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/task-handle.h
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_TASK_HANDLE_H_
+#define V8_HEAP_CPPGC_TASK_HANDLE_H_
+
+#include <memory>
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+// A handle that is used for cancelling individual tasks.
+struct SingleThreadedHandle {
+ struct NonEmptyTag {};
+
+ // Default construction results in empty handle.
+ SingleThreadedHandle() = default;
+
+ explicit SingleThreadedHandle(NonEmptyTag)
+ : is_cancelled_(std::make_shared<bool>(false)) {}
+
+ void Cancel() {
+ DCHECK(is_cancelled_);
+ *is_cancelled_ = true;
+ }
+
+ bool IsCanceled() const {
+ DCHECK(is_cancelled_);
+ return *is_cancelled_;
+ }
+
+ // A handle is active if it is non-empty and not cancelled.
+ explicit operator bool() const {
+ return is_cancelled_.get() && !*is_cancelled_.get();
+ }
+
+ private:
+ std::shared_ptr<bool> is_cancelled_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_TASK_HANDLE_H_
diff --git a/chromium/v8/src/heap/cppgc/virtual-memory.cc b/chromium/v8/src/heap/cppgc/virtual-memory.cc
new file mode 100644
index 00000000000..070baa71192
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/virtual-memory.cc
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/virtual-memory.h"
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+VirtualMemory::VirtualMemory(PageAllocator* page_allocator, size_t size,
+ size_t alignment, void* hint)
+ : page_allocator_(page_allocator) {
+ DCHECK_NOT_NULL(page_allocator);
+ DCHECK(IsAligned(size, page_allocator->CommitPageSize()));
+
+ const size_t page_size = page_allocator_->AllocatePageSize();
+ start_ = page_allocator->AllocatePages(hint, RoundUp(size, page_size),
+ RoundUp(alignment, page_size),
+ PageAllocator::kNoAccess);
+ if (start_) {
+ size_ = RoundUp(size, page_size);
+ }
+}
+
+VirtualMemory::~VirtualMemory() V8_NOEXCEPT {
+ if (IsReserved()) {
+ page_allocator_->FreePages(start_, size_);
+ }
+}
+
+VirtualMemory::VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT
+ : page_allocator_(std::move(other.page_allocator_)),
+ start_(std::move(other.start_)),
+ size_(std::move(other.size_)) {
+ other.Reset();
+}
+
+VirtualMemory& VirtualMemory::operator=(VirtualMemory&& other) V8_NOEXCEPT {
+ DCHECK(!IsReserved());
+ page_allocator_ = std::move(other.page_allocator_);
+ start_ = std::move(other.start_);
+ size_ = std::move(other.size_);
+ other.Reset();
+ return *this;
+}
+
+void VirtualMemory::Reset() {
+ start_ = nullptr;
+ size_ = 0;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/virtual-memory.h b/chromium/v8/src/heap/cppgc/virtual-memory.h
new file mode 100644
index 00000000000..1489abb9dea
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/virtual-memory.h
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
+#define V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
+
+#include <cstdint>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+// Represents and controls an area of reserved memory.
+class V8_EXPORT_PRIVATE VirtualMemory {
+ public:
+ // Empty VirtualMemory object, controlling no reserved memory.
+ VirtualMemory() = default;
+
+ // Reserves virtual memory containing an area of the given size that is
+ // aligned per |alignment| rounded up to the |page_allocator|'s allocate page
+ // size. The |size| is aligned with |page_allocator|'s commit page size.
+ VirtualMemory(PageAllocator*, size_t size, size_t alignment,
+ void* hint = nullptr);
+
+ // Releases the reserved memory, if any, controlled by this VirtualMemory
+ // object.
+ ~VirtualMemory() V8_NOEXCEPT;
+
+ VirtualMemory(VirtualMemory&&) V8_NOEXCEPT;
+ VirtualMemory& operator=(VirtualMemory&&) V8_NOEXCEPT;
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved() const { return start_ != nullptr; }
+
+ void* address() const {
+ DCHECK(IsReserved());
+ return start_;
+ }
+
+ size_t size() const {
+ DCHECK(IsReserved());
+ return size_;
+ }
+
+ private:
+ // Resets to the default state.
+ void Reset();
+
+ PageAllocator* page_allocator_ = nullptr;
+ void* start_ = nullptr;
+ size_t size_ = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
diff --git a/chromium/v8/src/heap/cppgc/visitor.cc b/chromium/v8/src/heap/cppgc/visitor.cc
new file mode 100644
index 00000000000..74cab257b6e
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/visitor.cc
@@ -0,0 +1,76 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/visitor.h"
+
+#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+
+#ifdef V8_ENABLE_CHECKS
+void Visitor::CheckObjectNotInConstruction(const void* address) {
+ // TODO(chromium:1056170): |address| is an inner pointer of an object. Check
+ // that the object is not in construction.
+}
+#endif // V8_ENABLE_CHECKS
+
+namespace internal {
+
+ConservativeTracingVisitor::ConservativeTracingVisitor(
+ HeapBase& heap, PageBackend& page_backend)
+ : heap_(heap), page_backend_(page_backend) {}
+
+namespace {
+
+void TraceConservatively(ConservativeTracingVisitor* visitor,
+ const HeapObjectHeader& header) {
+ Address* payload = reinterpret_cast<Address*>(header.Payload());
+ const size_t payload_size = header.GetSize();
+ for (size_t i = 0; i < (payload_size / sizeof(Address)); ++i) {
+ Address maybe_ptr = payload[i];
+#if defined(MEMORY_SANITIZER)
+ // |payload| may be uninitialized by design or just contain padding bytes.
+ // Copy into a local variable that is not poisoned for conservative marking.
+ // Copy into a temporary variable to maintain the original MSAN state.
+ MSAN_UNPOISON(&maybe_ptr, sizeof(maybe_ptr));
+#endif
+ if (maybe_ptr) {
+ visitor->TraceConservativelyIfNeeded(maybe_ptr);
+ }
+ }
+}
+
+} // namespace
+
+void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
+ const void* address) {
+ // TODO(chromium:1056170): Add page bloom filter
+
+ const BasePage* page = reinterpret_cast<const BasePage*>(
+ page_backend_.Lookup(static_cast<ConstAddress>(address)));
+
+ if (!page) return;
+
+ DCHECK_EQ(&heap_, page->heap());
+
+ auto* header = page->TryObjectHeaderFromInnerAddress(
+ const_cast<Address>(reinterpret_cast<ConstAddress>(address)));
+
+ if (!header) return;
+
+ if (!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
+ Visit(header->Payload(),
+ {header->Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex()).trace});
+ } else {
+ VisitConservatively(*header, TraceConservatively);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/visitor.h b/chromium/v8/src/heap/cppgc/visitor.h
index caa840b4dc3..5003e31f8f4 100644
--- a/chromium/v8/src/heap/cppgc/visitor.h
+++ b/chromium/v8/src/heap/cppgc/visitor.h
@@ -5,16 +5,50 @@
#ifndef V8_HEAP_CPPGC_VISITOR_H_
#define V8_HEAP_CPPGC_VISITOR_H_
+#include "include/cppgc/persistent.h"
#include "include/cppgc/visitor.h"
+#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
namespace internal {
+class HeapBase;
+class HeapObjectHeader;
+class PageBackend;
+
// Base visitor that is allowed to create a public cppgc::Visitor object and
// use its internals.
class VisitorBase : public cppgc::Visitor {
public:
VisitorBase() = default;
+
+ template <typename T>
+ void TraceRootForTesting(const Persistent<T>& p, const SourceLocation& loc) {
+ TraceRoot(p, loc);
+ }
+
+ template <typename T>
+ void TraceRootForTesting(const WeakPersistent<T>& p,
+ const SourceLocation& loc) {
+ TraceRoot(p, loc);
+ }
+};
+
+// Regular visitor that additionally allows for conservative tracing.
+class ConservativeTracingVisitor : public VisitorBase {
+ public:
+ ConservativeTracingVisitor(HeapBase&, PageBackend&);
+
+ void TraceConservativelyIfNeeded(const void*);
+
+ protected:
+ using TraceConservativelyCallback = void(ConservativeTracingVisitor*,
+ const HeapObjectHeader&);
+ virtual void VisitConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) {}
+
+ HeapBase& heap_;
+ PageBackend& page_backend_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/write-barrier.cc b/chromium/v8/src/heap/cppgc/write-barrier.cc
new file mode 100644
index 00000000000..683a3fc091f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/write-barrier.cc
@@ -0,0 +1,84 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/write-barrier.h"
+
+#include "include/cppgc/internal/pointer-policies.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-visitor.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#endif
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+void MarkValue(const BasePage* page, Marker* marker, const void* value) {
+#if defined(CPPGC_CAGED_HEAP)
+ DCHECK(reinterpret_cast<CagedHeapLocalData*>(
+ reinterpret_cast<uintptr_t>(value) &
+ ~(kCagedHeapReservationAlignment - 1))
+ ->is_marking_in_progress);
+#endif
+ auto& header =
+ const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
+ if (!header.TryMarkAtomic()) return;
+
+ DCHECK(marker);
+
+ if (V8_UNLIKELY(MutatorThreadMarkingVisitor::IsInConstruction(header))) {
+ // It is assumed that objects on not_fully_constructed_worklist_ are not
+ // marked.
+ header.Unmark();
+ Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist(
+ marker->not_fully_constructed_worklist(), Marker::kMutatorThreadId);
+ not_fully_constructed_worklist.Push(header.Payload());
+ return;
+ }
+
+ Marker::WriteBarrierWorklist::View write_barrier_worklist(
+ marker->write_barrier_worklist(), Marker::kMutatorThreadId);
+ write_barrier_worklist.Push(&header);
+}
+
+} // namespace
+
+void WriteBarrier::MarkingBarrierSlowWithSentinelCheck(const void* value) {
+ if (!value || value == kSentinelPointer) return;
+
+ MarkingBarrierSlow(value);
+}
+
+void WriteBarrier::MarkingBarrierSlow(const void* value) {
+ const BasePage* page = BasePage::FromPayload(value);
+ const auto* heap = page->heap();
+
+ // Marker being not set up means that no incremental/concurrent marking is in
+ // progress.
+ if (!heap->marker()) return;
+
+ MarkValue(page, heap->marker(), value);
+}
+
+#if defined(CPPGC_YOUNG_GENERATION)
+void WriteBarrier::GenerationalBarrierSlow(CagedHeapLocalData* local_data,
+ const AgeTable& age_table,
+ const void* slot,
+ uintptr_t value_offset) {
+ if (age_table[value_offset] == AgeTable::Age::kOld) return;
+ // Record slot.
+ local_data->heap_base->remembered_slots().insert(const_cast<void*>(slot));
+}
+#endif
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/factory-base.cc b/chromium/v8/src/heap/factory-base.cc
index 028949e861d..5dd88f9fa55 100644
--- a/chromium/v8/src/heap/factory-base.cc
+++ b/chromium/v8/src/heap/factory-base.cc
@@ -722,7 +722,7 @@ HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
AllocationType allocation) {
HeapObject result = AllocateRaw(size, allocation);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
return result;
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 25825f35f79..a9e11e51041 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -15,8 +15,10 @@
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/compiler.h"
#include "src/common/globals.h"
+#include "src/diagnostics/basic-block-profiler.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/protectors-inl.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
@@ -118,6 +120,22 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
data_container->set_kind_specific_flags(kind_specific_flags_);
}
+ // Basic block profiling data for builtins is stored in the JS heap rather
+ // than in separately-allocated C++ objects. Allocate that data now if
+ // appropriate.
+ Handle<OnHeapBasicBlockProfilerData> on_heap_profiler_data;
+ if (profiler_data_ && isolate_->IsGeneratingEmbeddedBuiltins()) {
+ on_heap_profiler_data = profiler_data_->CopyToJSHeap(isolate_);
+
+ // Add the on-heap data to a global list, which keeps it alive and allows
+ // iteration.
+ Handle<ArrayList> list(isolate_->heap()->basic_block_profiling_data(),
+ isolate_);
+ Handle<ArrayList> new_list =
+ ArrayList::Add(isolate_, list, on_heap_profiler_data);
+ isolate_->heap()->SetBasicBlockProfilingData(new_list);
+ }
+
Handle<Code> code;
{
int object_size = ComputeCodeObjectSize(code_desc_);
@@ -189,6 +207,14 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
*(self_reference.location()) = code->ptr();
}
+ // Likewise, any references to the basic block counters marker need to be
+ // updated to point to the newly-allocated counters array.
+ if (!on_heap_profiler_data.is_null()) {
+ isolate_->builtins_constants_table_builder()
+ ->PatchBasicBlockCountersReference(
+ handle(on_heap_profiler_data->counts(), isolate_));
+ }
+
// Migrate generated code.
// The generated code can contain embedded objects (typically from handles)
// in a pointer-to-tagged-value format (i.e. with indirection like a handle)
@@ -211,6 +237,21 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
code->FlushICache();
}
+ if (profiler_data_ && FLAG_turbo_profiling_verbose) {
+#ifdef ENABLE_DISASSEMBLER
+ std::ostringstream os;
+ code->Disassemble(nullptr, os, isolate_);
+ if (!on_heap_profiler_data.is_null()) {
+ Handle<String> disassembly =
+ isolate_->factory()->NewStringFromAsciiChecked(os.str().c_str(),
+ AllocationType::kOld);
+ on_heap_profiler_data->set_code(*disassembly);
+ } else {
+ profiler_data_->SetCode(os);
+ }
+#endif // ENABLE_DISASSEMBLER
+ }
+
return code;
}
@@ -325,6 +366,13 @@ Handle<Oddball> Factory::NewSelfReferenceMarker() {
Oddball::kSelfReferenceMarker);
}
+Handle<Oddball> Factory::NewBasicBlockCountersMarker() {
+ return NewOddball(basic_block_counters_marker_map(),
+ "basic_block_counters_marker",
+ handle(Smi::FromInt(-1), isolate()), "undefined",
+ Oddball::kBasicBlockCountersMarker);
+}
+
Handle<PropertyArray> Factory::NewPropertyArray(int length) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
@@ -347,7 +395,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
@@ -1136,8 +1184,8 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
- Handle<StringSet> blacklist) {
- STATIC_ASSERT(Context::BLACK_LIST_INDEX ==
+ Handle<StringSet> blocklist) {
+ STATIC_ASSERT(Context::BLOCK_LIST_INDEX ==
Context::MIN_CONTEXT_EXTENDED_SLOTS + 1);
DCHECK(scope_info->IsDebugEvaluateScope());
Handle<HeapObject> ext = extension.is_null()
@@ -1152,7 +1200,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
c->set_previous(*previous);
c->set_extension(*ext);
if (!wrapped.is_null()) c->set(Context::WRAPPED_CONTEXT_INDEX, *wrapped);
- if (!blacklist.is_null()) c->set(Context::BLACK_LIST_INDEX, *blacklist);
+ if (!blocklist.is_null()) c->set(Context::BLOCK_LIST_INDEX, *blocklist);
return c;
}
@@ -2772,8 +2820,12 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
LOG(isolate(), MapDetails(*map));
- return Handle<JSGlobalProxy>::cast(
+ Handle<JSGlobalProxy> proxy = Handle<JSGlobalProxy>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
+ // Create identity hash early in case there is any JS collection containing
+ // a global proxy key and needs to be rehashed after deserialization.
+ proxy->GetOrCreateIdentityHash(isolate());
+ return proxy;
}
void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
@@ -3074,9 +3126,7 @@ Handle<StackTraceFrame> Factory::NewStackTraceFrame(
frame->set_frame_index(index);
frame->set_frame_info(*undefined_value());
- int id = isolate()->last_stack_frame_info_id() + 1;
- isolate()->set_last_stack_frame_info_id(id);
- frame->set_id(id);
+ frame->set_id(isolate()->GetNextStackFrameInfoId());
return frame;
}
@@ -3100,7 +3150,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
// TODO(szuend): Adjust this, once it is decided what name to use in both
// "simple" and "detailed" stack traces. This code is for
// backwards compatibility to fullfill test expectations.
- auto function_name = frame->GetFunctionName();
+ Handle<PrimitiveHeapObject> function_name = frame->GetFunctionName();
bool is_user_java_script = false;
if (!is_wasm) {
Handle<Object> function = frame->GetFunction();
@@ -3111,11 +3161,11 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
}
}
- Handle<Object> method_name = undefined_value();
- Handle<Object> type_name = undefined_value();
- Handle<Object> eval_origin = frame->GetEvalOrigin();
- Handle<Object> wasm_module_name = frame->GetWasmModuleName();
- Handle<Object> wasm_instance = frame->GetWasmInstance();
+ Handle<PrimitiveHeapObject> method_name = undefined_value();
+ Handle<PrimitiveHeapObject> type_name = undefined_value();
+ Handle<PrimitiveHeapObject> eval_origin = frame->GetEvalOrigin();
+ Handle<PrimitiveHeapObject> wasm_module_name = frame->GetWasmModuleName();
+ Handle<HeapObject> wasm_instance = frame->GetWasmInstance();
// MethodName and TypeName are expensive to look up, so they are only
// included when they are strictly needed by the stack trace
@@ -3159,7 +3209,8 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
info->set_is_toplevel(is_toplevel);
info->set_is_async(frame->IsAsync());
info->set_is_promise_all(frame->IsPromiseAll());
- info->set_promise_all_index(frame->GetPromiseIndex());
+ info->set_is_promise_any(frame->IsPromiseAny());
+ info->set_promise_combinator_index(frame->GetPromiseIndex());
return info;
}
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index 2840c711cdf..bd1453bb441 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -26,6 +26,7 @@ namespace internal {
// Forward declarations.
class AliasedArgumentsEntry;
class ObjectBoilerplateDescription;
+class BasicBlockProfilerData;
class BreakPoint;
class BreakPointInfo;
class CallableTask;
@@ -119,6 +120,10 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Marks self references within code generation.
Handle<Oddball> NewSelfReferenceMarker();
+ // Marks references to a function's basic-block usage counters array during
+ // code generation.
+ Handle<Oddball> NewBasicBlockCountersMarker();
+
// Allocates a property array initialized with undefined values.
Handle<PropertyArray> NewPropertyArray(int length);
// Tries allocating a fixed array initialized with undefined values.
@@ -342,7 +347,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension,
Handle<Context> wrapped,
- Handle<StringSet> whitelist);
+ Handle<StringSet> blocklist);
// Create a block context.
Handle<Context> NewBlockContext(Handle<Context> previous,
@@ -861,6 +866,11 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return *this;
}
+ CodeBuilder& set_profiler_data(BasicBlockProfilerData* profiler_data) {
+ profiler_data_ = profiler_data;
+ return *this;
+ }
+
private:
MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
@@ -875,6 +885,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<ByteArray> source_position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
DeoptimizationData::Empty(isolate_);
+ BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
bool is_movable_ = true;
diff --git a/chromium/v8/src/heap/finalization-registry-cleanup-task.h b/chromium/v8/src/heap/finalization-registry-cleanup-task.h
index bb25c1abec9..e05c5afa957 100644
--- a/chromium/v8/src/heap/finalization-registry-cleanup-task.h
+++ b/chromium/v8/src/heap/finalization-registry-cleanup-task.h
@@ -18,12 +18,11 @@ class FinalizationRegistryCleanupTask : public CancelableTask {
public:
explicit FinalizationRegistryCleanupTask(Heap* heap);
~FinalizationRegistryCleanupTask() override = default;
-
- private:
FinalizationRegistryCleanupTask(const FinalizationRegistryCleanupTask&) =
delete;
void operator=(const FinalizationRegistryCleanupTask&) = delete;
+ private:
void RunInternal() override;
void SlowAssertNoActiveJavaScript();
diff --git a/chromium/v8/src/heap/free-list-inl.h b/chromium/v8/src/heap/free-list-inl.h
new file mode 100644
index 00000000000..bf60485fa8a
--- /dev/null
+++ b/chromium/v8/src/heap/free-list-inl.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FREE_LIST_INL_H_
+#define V8_HEAP_FREE_LIST_INL_H_
+
+#include "src/heap/free-list.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+bool FreeListCategory::is_linked(FreeList* owner) const {
+ return prev_ != nullptr || next_ != nullptr ||
+ owner->categories_[type_] == this;
+}
+
+void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
+ available_ -= allocation_size;
+}
+
+Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
+ FreeListCategory* category_top = top(type);
+ if (category_top != nullptr) {
+ DCHECK(!category_top->top().is_null());
+ return Page::FromHeapObject(category_top->top());
+ } else {
+ return nullptr;
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FREE_LIST_INL_H_
diff --git a/chromium/v8/src/heap/free-list.cc b/chromium/v8/src/heap/free-list.cc
new file mode 100644
index 00000000000..e9bf77d1711
--- /dev/null
+++ b/chromium/v8/src/heap/free-list.cc
@@ -0,0 +1,596 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/free-list.h"
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/free-list-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/memory-chunk-inl.h"
+#include "src/objects/free-space-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces implementation
+
+void FreeListCategory::Reset(FreeList* owner) {
+ if (is_linked(owner) && !top().is_null()) {
+ owner->DecreaseAvailableBytes(available_);
+ }
+ set_top(FreeSpace());
+ set_prev(nullptr);
+ set_next(nullptr);
+ available_ = 0;
+}
+
+FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
+ size_t* node_size) {
+ FreeSpace node = top();
+ DCHECK(!node.is_null());
+ DCHECK(Page::FromHeapObject(node)->CanAllocate());
+ if (static_cast<size_t>(node.Size()) < minimum_size) {
+ *node_size = 0;
+ return FreeSpace();
+ }
+ set_top(node.next());
+ *node_size = node.Size();
+ UpdateCountersAfterAllocation(*node_size);
+ return node;
+}
+
+FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
+ size_t* node_size) {
+ FreeSpace prev_non_evac_node;
+ for (FreeSpace cur_node = top(); !cur_node.is_null();
+ cur_node = cur_node.next()) {
+ DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
+ size_t size = cur_node.size();
+ if (size >= minimum_size) {
+ DCHECK_GE(available_, size);
+ UpdateCountersAfterAllocation(size);
+ if (cur_node == top()) {
+ set_top(cur_node.next());
+ }
+ if (!prev_non_evac_node.is_null()) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
+ if (chunk->owner_identity() == CODE_SPACE) {
+ chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
+ }
+ prev_non_evac_node.set_next(cur_node.next());
+ }
+ *node_size = size;
+ return cur_node;
+ }
+
+ prev_non_evac_node = cur_node;
+ }
+ return FreeSpace();
+}
+
+void FreeListCategory::Free(Address start, size_t size_in_bytes, FreeMode mode,
+ FreeList* owner) {
+ FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
+ free_space.set_next(top());
+ set_top(free_space);
+ available_ += size_in_bytes;
+ if (mode == kLinkCategory) {
+ if (is_linked(owner)) {
+ owner->IncreaseAvailableBytes(size_in_bytes);
+ } else {
+ owner->AddCategory(this);
+ }
+ }
+}
+
+void FreeListCategory::RepairFreeList(Heap* heap) {
+ Map free_space_map = ReadOnlyRoots(heap).free_space_map();
+ FreeSpace n = top();
+ while (!n.is_null()) {
+ ObjectSlot map_slot = n.map_slot();
+ if (map_slot.contains_value(kNullAddress)) {
+ map_slot.store(free_space_map);
+ } else {
+ DCHECK(map_slot.contains_value(free_space_map.ptr()));
+ }
+ n = n.next();
+ }
+}
+
+void FreeListCategory::Relink(FreeList* owner) {
+ DCHECK(!is_linked(owner));
+ owner->AddCategory(this);
+}
+
+// ------------------------------------------------
+// Generic FreeList methods (alloc/free related)
+
+FreeList* FreeList::CreateFreeList() { return new FreeListManyCachedOrigin(); }
+
+FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
+ FreeListCategory* category = categories_[type];
+ if (category == nullptr) return FreeSpace();
+ FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DecreaseAvailableBytes(*node_size);
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ if (category->is_empty()) {
+ RemoveCategory(category);
+ }
+ return node;
+}
+
+FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
+ size_t minimum_size,
+ size_t* node_size) {
+ FreeListCategoryIterator it(this, type);
+ FreeSpace node;
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ node = current->SearchForNodeInList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DecreaseAvailableBytes(*node_size);
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ if (current->is_empty()) {
+ RemoveCategory(current);
+ }
+ return node;
+ }
+ }
+ return node;
+}
+
+size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
+ Page* page = Page::FromAddress(start);
+ page->DecreaseAllocatedBytes(size_in_bytes);
+
+ // Blocks have to be a minimum size to hold free list items.
+ if (size_in_bytes < min_block_size_) {
+ page->add_wasted_memory(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
+ return size_in_bytes;
+ }
+
+ // Insert other blocks at the head of a free list of the appropriate
+ // magnitude.
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
+ DCHECK_EQ(page->AvailableInFreeList(),
+ page->AvailableInFreeListFromAllocatedBytes());
+ return 0;
+}
+
+// ------------------------------------------------
+// FreeListMany implementation
+
+constexpr unsigned int FreeListMany::categories_min[kNumberOfCategories];
+
+FreeListMany::FreeListMany() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kNumberOfCategories;
+ last_category_ = number_of_categories_ - 1;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+FreeListMany::~FreeListMany() { delete[] categories_; }
+
+size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
+ if (maximum_freed < categories_min[0]) {
+ return 0;
+ }
+ for (int cat = kFirstCategory + 1; cat <= last_category_; cat++) {
+ if (maximum_freed < categories_min[cat]) {
+ return categories_min[cat - 1];
+ }
+ }
+ return maximum_freed;
+}
+
+Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
+ FreeListCategoryType minimum_category =
+ SelectFreeListCategoryType(size_in_bytes);
+ Page* page = nullptr;
+ for (int cat = minimum_category + 1; !page && cat <= last_category_; cat++) {
+ page = GetPageForCategoryType(cat);
+ }
+ if (!page) {
+ // Might return a page in which |size_in_bytes| will not fit.
+ page = GetPageForCategoryType(minimum_category);
+ }
+ return page;
+}
+
+FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = type; i < last_category_ && node.is_null(); i++) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCached implementation
+
+FreeListManyCached::FreeListManyCached() { ResetCache(); }
+
+void FreeListManyCached::Reset() {
+ ResetCache();
+ FreeListMany::Reset();
+}
+
+bool FreeListManyCached::AddCategory(FreeListCategory* category) {
+ bool was_added = FreeList::AddCategory(category);
+
+ // Updating cache
+ if (was_added) {
+ UpdateCacheAfterAddition(category->type_);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ return was_added;
+}
+
+void FreeListManyCached::RemoveCategory(FreeListCategory* category) {
+ FreeList::RemoveCategory(category);
+
+ // Updating cache
+ int type = category->type_;
+ if (categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+}
+
+size_t FreeListManyCached::Free(Address start, size_t size_in_bytes,
+ FreeMode mode) {
+ Page* page = Page::FromAddress(start);
+ page->DecreaseAllocatedBytes(size_in_bytes);
+
+ // Blocks have to be a minimum size to hold free list items.
+ if (size_in_bytes < min_block_size_) {
+ page->add_wasted_memory(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
+ return size_in_bytes;
+ }
+
+ // Insert other blocks at the head of a free list of the appropriate
+ // magnitude.
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
+
+ // Updating cache
+ if (mode == kLinkCategory) {
+ UpdateCacheAfterAddition(type);
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+ }
+
+ DCHECK_EQ(page->AvailableInFreeList(),
+ page->AvailableInFreeListFromAllocatedBytes());
+ return 0;
+}
+
+FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ USE(origin);
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ type = next_nonempty_category[type];
+ for (; type < last_category_; type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ type = last_category_;
+ node = SearchForNodeInList(type, size_in_bytes, node_size);
+ }
+
+ // Updating cache
+ if (!node.is_null() && categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedFastPath implementation
+
+FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) {
+ USE(origin);
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+
+ // Fast path part 1: searching the last categories
+ FreeListCategoryType first_category =
+ SelectFastAllocationFreeListCategoryType(size_in_bytes);
+ FreeListCategoryType type = first_category;
+ for (type = next_nonempty_category[type]; type <= last_category_;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+
+ // Fast path part 2: searching the medium categories for tiny objects
+ if (node.is_null()) {
+ if (size_in_bytes <= kTinyObjectMaxSize) {
+ for (type = next_nonempty_category[kFastPathFallBackTiny];
+ type < kFastPathFirstCategory;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+ }
+ }
+
+ // Searching the last category
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ type = last_category_;
+ node = SearchForNodeInList(type, size_in_bytes, node_size);
+ }
+
+ // Finally, search the most precise category
+ if (node.is_null()) {
+ type = SelectFreeListCategoryType(size_in_bytes);
+ for (type = next_nonempty_category[type]; type < first_category;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+ }
+
+ // Updating cache
+ if (!node.is_null() && categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedOrigin implementation
+
+FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) {
+ if (origin == AllocationOrigin::kGC) {
+ return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
+ } else {
+ return FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size,
+ origin);
+ }
+}
+
+// ------------------------------------------------
+// FreeListMap implementation
+
+FreeListMap::FreeListMap() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = 1;
+ last_category_ = kOnlyCategory;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
+ return maximum_freed;
+}
+
+Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
+ return GetPageForCategoryType(kOnlyCategory);
+}
+
+FreeListMap::~FreeListMap() { delete[] categories_; }
+
+FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+ // The following DCHECK ensures that maps are allocated one by one (ie,
+ // without folding). This assumption currently holds. However, if it were to
+ // become untrue in the future, you'll get an error here. To fix it, I would
+ // suggest removing the DCHECK, and replacing TryFindNodeIn by
+ // SearchForNodeInList below.
+ DCHECK_EQ(size_in_bytes, Map::kSize);
+
+ FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK_IMPLIES(node.is_null(), IsEmpty());
+ return node;
+}
+
+// ------------------------------------------------
+// Generic FreeList methods (non alloc/free related)
+
+void FreeList::Reset() {
+ ForAllFreeListCategories(
+ [this](FreeListCategory* category) { category->Reset(this); });
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
+ categories_[i] = nullptr;
+ }
+ wasted_bytes_ = 0;
+ available_ = 0;
+}
+
+size_t FreeList::EvictFreeListItems(Page* page) {
+ size_t sum = 0;
+ page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+ sum += category->available();
+ RemoveCategory(category);
+ category->Reset(this);
+ });
+ return sum;
+}
+
+void FreeList::RepairLists(Heap* heap) {
+ ForAllFreeListCategories(
+ [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
+}
+
+bool FreeList::AddCategory(FreeListCategory* category) {
+ FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, number_of_categories_);
+ FreeListCategory* top = categories_[type];
+
+ if (category->is_empty()) return false;
+ DCHECK_NE(top, category);
+
+ // Common double-linked list insertion.
+ if (top != nullptr) {
+ top->set_prev(category);
+ }
+ category->set_next(top);
+ categories_[type] = category;
+
+ IncreaseAvailableBytes(category->available());
+ return true;
+}
+
+void FreeList::RemoveCategory(FreeListCategory* category) {
+ FreeListCategoryType type = category->type_;
+ DCHECK_LT(type, number_of_categories_);
+ FreeListCategory* top = categories_[type];
+
+ if (category->is_linked(this)) {
+ DecreaseAvailableBytes(category->available());
+ }
+
+ // Common double-linked list removal.
+ if (top == category) {
+ categories_[type] = category->next();
+ }
+ if (category->prev() != nullptr) {
+ category->prev()->set_next(category->next());
+ }
+ if (category->next() != nullptr) {
+ category->next()->set_prev(category->prev());
+ }
+ category->set_next(nullptr);
+ category->set_prev(nullptr);
+}
+
+void FreeList::PrintCategories(FreeListCategoryType type) {
+ FreeListCategoryIterator it(this, type);
+ PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
+ static_cast<void*>(categories_[type]), type);
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ PrintF("%p -> ", static_cast<void*>(current));
+ }
+ PrintF("null\n");
+}
+
+size_t FreeListCategory::SumFreeList() {
+ size_t sum = 0;
+ FreeSpace cur = top();
+ while (!cur.is_null()) {
+ // We can't use "cur->map()" here because both cur's map and the
+ // root can be null during bootstrapping.
+ DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
+ ->heap()
+ ->isolate()
+ ->root(RootIndex::kFreeSpaceMap)
+ .ptr()));
+ sum += cur.relaxed_read_size();
+ cur = cur.next();
+ }
+ return sum;
+}
+int FreeListCategory::FreeListLength() {
+ int length = 0;
+ FreeSpace cur = top();
+ while (!cur.is_null()) {
+ length++;
+ cur = cur.next();
+ }
+ return length;
+}
+
+#ifdef DEBUG
+bool FreeList::IsVeryLong() {
+ int len = 0;
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
+ FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
+ while (it.HasNext()) {
+ len += it.Next()->FreeListLength();
+ if (len >= FreeListCategory::kVeryLongFreeList) return true;
+ }
+ }
+ return false;
+}
+
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+size_t FreeList::SumFreeLists() {
+ size_t sum = 0;
+ ForAllFreeListCategories(
+ [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
+ return sum;
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/free-list.h b/chromium/v8/src/heap/free-list.h
new file mode 100644
index 00000000000..e2cd193905d
--- /dev/null
+++ b/chromium/v8/src/heap/free-list.h
@@ -0,0 +1,520 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_FREE_LIST_H_
+#define V8_HEAP_FREE_LIST_H_
+
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/memory-chunk.h"
+#include "src/objects/free-space.h"
+#include "src/objects/map.h"
+#include "src/utils/utils.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace v8 {
+namespace internal {
+
+namespace heap {
+class HeapTester;
+class TestCodePageAllocatorScope;
+} // namespace heap
+
+class AllocationObserver;
+class FreeList;
+class Isolate;
+class LargeObjectSpace;
+class LargePage;
+class LinearAllocationArea;
+class LocalArrayBufferTracker;
+class Page;
+class PagedSpace;
+class SemiSpace;
+
+using FreeListCategoryType = int32_t;
+
+static const FreeListCategoryType kFirstCategory = 0;
+static const FreeListCategoryType kInvalidCategory = -1;
+
+enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+
+enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
+
+// A free list category maintains a linked list of free memory blocks.
+class FreeListCategory {
+ public:
+ void Initialize(FreeListCategoryType type) {
+ type_ = type;
+ available_ = 0;
+ prev_ = nullptr;
+ next_ = nullptr;
+ }
+
+ void Reset(FreeList* owner);
+
+ void RepairFreeList(Heap* heap);
+
+ // Relinks the category into the currently owning free list. Requires that the
+ // category is currently unlinked.
+ void Relink(FreeList* owner);
+
+ void Free(Address address, size_t size_in_bytes, FreeMode mode,
+ FreeList* owner);
+
+ // Performs a single try to pick a node of at least |minimum_size| from the
+ // category. Stores the actual size in |node_size|. Returns nullptr if no
+ // node is found.
+ FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
+
+ // Picks a node of at least |minimum_size| from the category. Stores the
+ // actual size in |node_size|. Returns nullptr if no node is found.
+ FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
+
+ inline bool is_linked(FreeList* owner) const;
+ bool is_empty() { return top().is_null(); }
+ uint32_t available() const { return available_; }
+
+ size_t SumFreeList();
+ int FreeListLength();
+
+ private:
+ // For debug builds we accurately compute free lists lengths up until
+ // {kVeryLongFreeList} by manually walking the list.
+ static const int kVeryLongFreeList = 500;
+
+ // Updates |available_|, |length_| and free_list_->Available() after an
+ // allocation of size |allocation_size|.
+ inline void UpdateCountersAfterAllocation(size_t allocation_size);
+
+ FreeSpace top() { return top_; }
+ void set_top(FreeSpace top) { top_ = top; }
+ FreeListCategory* prev() { return prev_; }
+ void set_prev(FreeListCategory* prev) { prev_ = prev; }
+ FreeListCategory* next() { return next_; }
+ void set_next(FreeListCategory* next) { next_ = next; }
+
+ // |type_|: The type of this free list category.
+ FreeListCategoryType type_ = kInvalidCategory;
+
+ // |available_|: Total available bytes in all blocks of this free list
+ // category.
+ uint32_t available_ = 0;
+
+ // |top_|: Points to the top FreeSpace in the free list category.
+ FreeSpace top_;
+
+ FreeListCategory* prev_ = nullptr;
+ FreeListCategory* next_ = nullptr;
+
+ friend class FreeList;
+ friend class FreeListManyCached;
+ friend class PagedSpace;
+ friend class MapSpace;
+};
+
+// A free list maintains free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer. When the limit is hit we need to
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
+// categories would scatter allocation more.
+class FreeList {
+ public:
+ // Creates a Freelist of the default class (FreeListLegacy for now).
+ V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
+
+ virtual ~FreeList() = default;
+
+ // Returns how much memory can be allocated after freeing maximum_freed
+ // memory.
+ virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because the freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
+
+ // Allocates a free space node frome the free list of at least size_in_bytes
+ // bytes. Returns the actual node size in node_size which can be bigger than
+ // size_in_bytes. This method returns null if the allocation request cannot be
+ // handled by the free list.
+ virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) = 0;
+
+ // Returns a page containing an entry for a given type, or nullptr otherwise.
+ V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
+
+ virtual void Reset();
+
+ // Return the number of bytes available on the free list.
+ size_t Available() {
+ DCHECK(available_ == SumFreeLists());
+ return available_;
+ }
+
+ // Update number of available bytes on the Freelists.
+ void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
+ void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
+
+ bool IsEmpty() {
+ bool empty = true;
+ ForAllFreeListCategories([&empty](FreeListCategory* category) {
+ if (!category->is_empty()) empty = false;
+ });
+ return empty;
+ }
+
+ // Used after booting the VM.
+ void RepairLists(Heap* heap);
+
+ V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
+
+ int number_of_categories() { return number_of_categories_; }
+ FreeListCategoryType last_category() { return last_category_; }
+
+ size_t wasted_bytes() { return wasted_bytes_; }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+ FreeListCategory* current = categories_[type];
+ while (current != nullptr) {
+ FreeListCategory* next = current->next();
+ callback(current);
+ current = next;
+ }
+ }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < number_of_categories(); i++) {
+ ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+ }
+ }
+
+ virtual bool AddCategory(FreeListCategory* category);
+ virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
+ void PrintCategories(FreeListCategoryType type);
+
+ protected:
+ class FreeListCategoryIterator final {
+ public:
+ FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+ : current_(free_list->categories_[type]) {}
+
+ bool HasNext() const { return current_ != nullptr; }
+
+ FreeListCategory* Next() {
+ DCHECK(HasNext());
+ FreeListCategory* tmp = current_;
+ current_ = current_->next();
+ return tmp;
+ }
+
+ private:
+ FreeListCategory* current_;
+ };
+
+#ifdef DEBUG
+ V8_EXPORT_PRIVATE size_t SumFreeLists();
+ bool IsVeryLong();
+#endif
+
+ // Tries to retrieve a node from the first category in a given |type|.
+ // Returns nullptr if the category is empty or the top entry is smaller
+ // than minimum_size.
+ FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Searches a given |type| for a node of at least |minimum_size|.
+ FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Returns the smallest category in which an object of |size_in_bytes| could
+ // fit.
+ virtual FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) = 0;
+
+ FreeListCategory* top(FreeListCategoryType type) const {
+ return categories_[type];
+ }
+
+ inline Page* GetPageForCategoryType(FreeListCategoryType type);
+
+ int number_of_categories_ = 0;
+ FreeListCategoryType last_category_ = 0;
+ size_t min_block_size_ = 0;
+
+ std::atomic<size_t> wasted_bytes_{0};
+ FreeListCategory** categories_ = nullptr;
+
+ // |available_|: The number of bytes in this freelist.
+ size_t available_ = 0;
+
+ friend class FreeListCategory;
+ friend class Page;
+ friend class MemoryChunk;
+ friend class ReadOnlyPage;
+ friend class MapSpace;
+};
+
+// FreeList used for spaces that don't have freelists
+// (only the LargeObject space for now).
+class NoFreeList final : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) final {
+ FATAL("NoFreeList can't be used as a standard FreeList. ");
+ }
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ Page* GetPageForSize(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+
+ private:
+ FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+};
+
+// Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
+// larger sizes. See the variable |categories_min| for the size of each
+// Freelist. Allocation is done using a best-fit strategy (considering only the
+// first element of each category though).
+// Performances are expected to be worst than FreeListLegacy, but memory
+// consumption should be lower (since fragmentation should be lower).
+class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
+
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMany();
+ ~FreeListMany() override;
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ protected:
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = MemoryChunk::kPageSize;
+ // Largest size for which categories are still precise, and for which we can
+ // therefore compute the category in constant time.
+ static const size_t kPreciseCategoryMaxSize = 256;
+
+ // Categories boundaries generated with:
+ // perl -E '
+ // @cat = (24, map {$_*16} 2..16, 48, 64);
+ // while ($cat[-1] <= 32768) {
+ // push @cat, $cat[-1]*2
+ // }
+ // say join ", ", @cat;
+ // say "\n", scalar @cat'
+ static const int kNumberOfCategories = 24;
+ static constexpr unsigned int categories_min[kNumberOfCategories] = {
+ 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192,
+ 208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
+
+ // Return the smallest category that could hold |size_in_bytes| bytes.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ if (size_in_bytes <= kPreciseCategoryMaxSize) {
+ if (size_in_bytes < categories_min[1]) return 0;
+ return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
+ }
+ for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
+ cat++) {
+ if (size_in_bytes < categories_min[cat + 1]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
+
+ FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
+ FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
+};
+
+// Same as FreeListMany but uses a cache to know which categories are empty.
+// The cache (|next_nonempty_category|) is maintained in a way such that for
+// each category c, next_nonempty_category[c] contains the first non-empty
+// category greater or equal to c, that may hold an object of size c.
+// Allocation is done using the same strategy as FreeListMany (ie, best fit).
+class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
+ public:
+ FreeListManyCached();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
+
+ void Reset() override;
+
+ bool AddCategory(FreeListCategory* category) override;
+ void RemoveCategory(FreeListCategory* category) override;
+
+ protected:
+ // Updates the cache after adding something in the category |cat|.
+ void UpdateCacheAfterAddition(FreeListCategoryType cat) {
+ for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
+ i--) {
+ next_nonempty_category[i] = cat;
+ }
+ }
+
+ // Updates the cache after emptying category |cat|.
+ void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
+ for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
+ i--) {
+ next_nonempty_category[i] = next_nonempty_category[cat + 1];
+ }
+ }
+
+#ifdef DEBUG
+ void CheckCacheIntegrity() {
+ for (int i = 0; i <= last_category_; i++) {
+ DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
+ categories_[next_nonempty_category[i]] != nullptr);
+ for (int j = i; j < next_nonempty_category[i]; j++) {
+ DCHECK(categories_[j] == nullptr);
+ }
+ }
+ }
+#endif
+
+ // The cache is overallocated by one so that the last element is always
+ // defined, and when updating the cache, we can always use cache[i+1] as long
+ // as i is < kNumberOfCategories.
+ int next_nonempty_category[kNumberOfCategories + 1];
+
+ private:
+ void ResetCache() {
+ for (int i = 0; i < kNumberOfCategories; i++) {
+ next_nonempty_category[i] = kNumberOfCategories;
+ }
+ // Setting the after-last element as well, as explained in the cache's
+ // declaration.
+ next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
+ }
+};
+
+// Same as FreeListManyCached but uses a fast path.
+// The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
+// is: we want the fast path to always overallocate, even for larger
+// categories. Therefore, we have two choices: either overallocate by
+// "size_in_bytes * something" or overallocate by "size_in_bytes +
+// something". We choose the later, as the former will tend to overallocate too
+// much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
+// for tiny objects (size <= 128 bytes), the first category considered is the
+// 36th (which holds objects of 2k to 3k), while for larger objects, the first
+// category considered will be one that guarantees a 1.85k+ bytes
+// overallocation. Using 2k rather than 1.85k would have resulted in either a
+// more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
+// category (2k to 3k) not being used; both of which are undesirable.
+// A secondary fast path is used for tiny objects (size <= 128), in order to
+// consider categories from 256 to 2048 bytes for them.
+// Note that this class uses a precise GetPageForSize (inherited from
+// FreeListMany), which makes its fast path less fast in the Scavenger. This is
+// done on purpose, since this class's only purpose is to be used by
+// FreeListManyCachedOrigin, which is precise for the scavenger.
+class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
+ public:
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ protected:
+ // Objects in the 18th category are at least 2048 bytes
+ static const FreeListCategoryType kFastPathFirstCategory = 18;
+ static const size_t kFastPathStart = 2048;
+ static const size_t kTinyObjectMaxSize = 128;
+ static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
+ // Objects in the 15th category are at least 256 bytes
+ static const FreeListCategoryType kFastPathFallBackTiny = 15;
+
+ STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
+ STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
+ kTinyObjectMaxSize * 2);
+
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ DCHECK(size_in_bytes < kMaxBlockSize);
+
+ if (size_in_bytes >= categories_min[last_category_]) return last_category_;
+
+ size_in_bytes += kFastPathOffset;
+ for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
+ if (size_in_bytes <= categories_min[cat]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
+
+ FRIEND_TEST(
+ SpacesTest,
+ FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
+};
+
+// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
+// The reasonning behind this FreeList is the following: the GC runs in
+// parallel, and therefore, more expensive allocations there are less
+// noticeable. On the other hand, the generated code and runtime need to be very
+// fast. Therefore, the strategy for the former is one that is not very
+// efficient, but reduces fragmentation (FreeListManyCached), while the strategy
+// for the later is one that is very efficient, but introduces some
+// fragmentation (FreeListManyCachedFastPath).
+class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
+ : public FreeListManyCachedFastPath {
+ public:
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+};
+
+// FreeList for maps: since maps are all the same size, uses a single freelist.
+class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
+
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMap();
+ ~FreeListMap() override;
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ private:
+ static const size_t kMinBlockSize = Map::kSize;
+ static const size_t kMaxBlockSize = MemoryChunk::kPageSize;
+ static const FreeListCategoryType kOnlyCategory = 0;
+
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ return kOnlyCategory;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_FREE_LIST_H_
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index 39f5ec6c66e..6e42cf74527 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -23,7 +23,11 @@
// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
+#include "src/heap/code-object-registry.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
+#include "src/heap/new-spaces-inl.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -237,8 +241,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
- allocation =
- read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
+ allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else {
UNREACHABLE();
}
@@ -397,7 +400,8 @@ bool Heap::InYoungGeneration(MaybeObject object) {
// static
bool Heap::InYoungGeneration(HeapObject heap_object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return false;
- bool result = MemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
+ bool result =
+ BasicMemoryChunk::FromHeapObject(heap_object)->InYoungGeneration();
#ifdef DEBUG
// If in the young generation, then check we're either not in the middle of
// GC or the object is in to-space.
@@ -425,7 +429,7 @@ bool Heap::InFromPage(MaybeObject object) {
// static
bool Heap::InFromPage(HeapObject heap_object) {
- return MemoryChunk::FromHeapObject(heap_object)->IsFromPage();
+ return BasicMemoryChunk::FromHeapObject(heap_object)->IsFromPage();
}
// static
@@ -442,7 +446,7 @@ bool Heap::InToPage(MaybeObject object) {
// static
bool Heap::InToPage(HeapObject heap_object) {
- return MemoryChunk::FromHeapObject(heap_object)->IsToPage();
+ return BasicMemoryChunk::FromHeapObject(heap_object)->IsToPage();
}
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
@@ -452,7 +456,7 @@ Heap* Heap::FromWritableHeapObject(HeapObject obj) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return Heap::GetIsolateFromWritableObject(obj)->heap();
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
@@ -540,7 +544,7 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
DCHECK_IMPLIES(chunk->IsToPage(),
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
DCHECK_IMPLIES(!chunk->InYoungGeneration(),
@@ -709,24 +713,24 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
: chunk_(nullptr), scope_active_(false) {}
#else
CodePageMemoryModificationScope::CodePageMemoryModificationScope(Code code)
- : CodePageMemoryModificationScope(MemoryChunk::FromHeapObject(code)) {}
+ : CodePageMemoryModificationScope(BasicMemoryChunk::FromHeapObject(code)) {}
#endif
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
- MemoryChunk* chunk)
+ BasicMemoryChunk* chunk)
: chunk_(chunk),
scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
- DCHECK(chunk_->owner_identity() == CODE_SPACE ||
- (chunk_->owner_identity() == CODE_LO_SPACE));
- chunk_->SetReadAndWritable();
+ DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
+ (chunk_->owner()->identity() == CODE_LO_SPACE));
+ MemoryChunk::cast(chunk_)->SetReadAndWritable();
}
}
CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
if (scope_active_) {
- chunk_->SetDefaultCodePermissions();
+ MemoryChunk::cast(chunk_)->SetDefaultCodePermissions();
}
}
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 606ba0fe65f..4d23e084b95 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include <cinttypes>
#include <iomanip>
+#include <memory>
#include <unordered_map>
#include <unordered_set>
@@ -30,8 +31,10 @@
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
+#include "src/heap/code-object-registry.h"
#include "src/heap/code-stats.h"
#include "src/heap/combined-heap.h"
+#include "src/heap/concurrent-allocator.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/finalization-registry-cleanup-task.h"
@@ -51,8 +54,9 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
@@ -174,6 +178,10 @@ void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
set_serialized_global_proxy_sizes(sizes);
}
+void Heap::SetBasicBlockProfilingData(Handle<ArrayList> list) {
+ set_basic_block_profiling_data(*list);
+}
+
bool Heap::GCCallbackTuple::operator==(
const Heap::GCCallbackTuple& other) const {
return other.callback == callback && other.data == data;
@@ -415,7 +423,12 @@ bool Heap::CanExpandOldGeneration(size_t size) {
return memory_allocator()->Size() + size <= MaxReserved();
}
-bool Heap::HasBeenSetUp() {
+bool Heap::CanExpandOldGenerationBackground(size_t size) {
+ if (force_oom_) return false;
+ return memory_allocator()->Size() + size <= MaxReserved();
+}
+
+bool Heap::HasBeenSetUp() const {
// We will always have a new space when the heap is set up.
return new_space_ != nullptr;
}
@@ -470,8 +483,7 @@ void Heap::PrintShortHeapStatistics() {
"Read-only space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- read_only_space_->Size() / KB,
- read_only_space_->Available() / KB,
+ read_only_space_->Size() / KB, size_t{0},
read_only_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"New space, used: %6zu KB"
@@ -522,8 +534,8 @@ void Heap::PrintShortHeapStatistics() {
"All spaces, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- (this->SizeOfObjects() + ro_space->SizeOfObjects()) / KB,
- (this->Available() + ro_space->Available()) / KB,
+ (this->SizeOfObjects() + ro_space->Size()) / KB,
+ (this->Available()) / KB,
(this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
PrintIsolate(isolate_,
"Unmapper buffering %zu chunks of committed: %6zu KB\n",
@@ -631,7 +643,8 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
std::stringstream stream;
stream << DICT(
MEMBER("name")
- << ESCAPE(GetSpaceName(static_cast<AllocationSpace>(space_index)))
+ << ESCAPE(BaseSpace::GetSpaceName(
+ static_cast<AllocationSpace>(space_index)))
<< ","
MEMBER("size") << space_stats.space_size() << ","
MEMBER("used_size") << space_stats.space_used_size() << ","
@@ -849,7 +862,6 @@ void Heap::GarbageCollectionPrologue() {
} else {
maximum_size_scavenges_ = 0;
}
- CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
if (FLAG_track_retaining_path) {
retainer_.clear();
@@ -859,6 +871,10 @@ void Heap::GarbageCollectionPrologue() {
memory_allocator()->unmapper()->PrepareForGC();
}
+void Heap::GarbageCollectionPrologueInSafepoint() {
+ CheckNewSpaceExpansionCriteria();
+}
+
size_t Heap::SizeOfObjects() {
size_t total = 0;
@@ -876,29 +892,6 @@ size_t Heap::UsedGlobalHandlesSize() {
return isolate_->global_handles()->UsedSize();
}
-// static
-const char* Heap::GetSpaceName(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE:
- return "new_space";
- case OLD_SPACE:
- return "old_space";
- case MAP_SPACE:
- return "map_space";
- case CODE_SPACE:
- return "code_space";
- case LO_SPACE:
- return "large_object_space";
- case NEW_LO_SPACE:
- return "new_large_object_space";
- case CODE_LO_SPACE:
- return "code_large_object_space";
- case RO_SPACE:
- return "read_only_space";
- }
- UNREACHABLE();
-}
-
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
AllocationSite site;
@@ -1651,8 +1644,12 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR) {
- size_t committed_memory_after = CommittedOldGenerationMemory();
+ // Calculate used memory first, then committed memory. Following code
+ // assumes that committed >= used, which might not hold when this is
+ // calculated in the wrong order and background threads allocate
+ // in-between.
size_t used_memory_after = OldGenerationSizeOfObjects();
+ size_t committed_memory_after = CommittedOldGenerationMemory();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1681,7 +1678,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
- collection_barrier_.Increment();
+ collection_barrier_.CollectionPerformed();
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
@@ -1750,10 +1747,12 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
}
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
- const size_t global_memory_available = GlobalMemoryAvailable();
+ const base::Optional<size_t> global_memory_available =
+ GlobalMemoryAvailable();
if (old_generation_space_available < new_space_->Capacity() ||
- global_memory_available < new_space_->Capacity()) {
+ (global_memory_available &&
+ *global_memory_available < new_space_->Capacity())) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
@@ -1955,6 +1954,9 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
#else
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
+ } else if (space == RO_SPACE) {
+ allocation = read_only_space()->AllocateRaw(
+ size, AllocationAlignment::kWordAligned);
} else {
// The deserializer will update the skip list.
allocation = paged_space(space)->AllocateRawUnaligned(size);
@@ -2013,21 +2015,29 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
-void Heap::CollectionBarrier::Increment() {
+void Heap::CollectionBarrier::CollectionPerformed() {
base::MutexGuard guard(&mutex_);
- requested_ = false;
+ gc_requested_ = false;
+ cond_.NotifyAll();
+}
+
+void Heap::CollectionBarrier::ShutdownRequested() {
+ base::MutexGuard guard(&mutex_);
+ shutdown_requested_ = true;
cond_.NotifyAll();
}
void Heap::CollectionBarrier::Wait() {
base::MutexGuard guard(&mutex_);
- if (!requested_) {
+ if (shutdown_requested_) return;
+
+ if (!gc_requested_) {
heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
- requested_ = true;
+ gc_requested_ = true;
}
- while (requested_) {
+ while (gc_requested_ && !shutdown_requested_) {
cond_.Wait(&mutex_);
}
}
@@ -2062,9 +2072,6 @@ size_t Heap::PerformGarbageCollection(
base::Optional<SafepointScope> optional_safepoint_scope;
if (FLAG_local_heaps) {
optional_safepoint_scope.emplace(this);
- // Fill and reset all LABs
- safepoint()->IterateLocalHeaps(
- [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -2073,6 +2080,8 @@ size_t Heap::PerformGarbageCollection(
#endif
tracer()->StartInSafepoint();
+ GarbageCollectionPrologueInSafepoint();
+
EnsureFromSpaceIsCommitted();
size_t start_young_generation_size =
@@ -3000,10 +3009,12 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
#ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(start);
+ BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
+ if (basic_chunk->InReadOnlySpace()) return;
+ MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
// TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
- Space* space = chunk->owner();
+ BaseSpace* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
@@ -3024,6 +3035,13 @@ HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
return filler;
}
+void Heap::CreateFillerObjectAtBackground(
+ Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
+ CreateFillerObjectAtImpl(ReadOnlyRoots(this), addr, size, clear_memory_mode);
+ // Do not verify whether slots are cleared here: the concurrent sweeper is not
+ // allowed to access the main thread's remembered set.
+}
+
HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode) {
if (size == 0) return HeapObject();
@@ -3059,7 +3077,7 @@ bool Heap::InOffThreadSpace(HeapObject heap_object) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return false; // currently unsupported
#else
- Space* owner = MemoryChunk::FromHeapObject(heap_object)->owner();
+ BaseSpace* owner = BasicMemoryChunk::FromHeapObject(heap_object)->owner();
if (owner->identity() == OLD_SPACE) {
// TODO(leszeks): Should we exclude compaction spaces here?
return static_cast<PagedSpace*>(owner)->is_off_thread_space();
@@ -3078,12 +3096,12 @@ bool Heap::IsImmovable(HeapObject object) {
return true;
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
return chunk->NeverEvacuate() || IsLargeObject(object);
}
bool Heap::IsLargeObject(HeapObject object) {
- return MemoryChunk::FromHeapObject(object)->IsLargePage();
+ return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -3112,8 +3130,9 @@ class LeftTrimmerVerifierRootVisitor : public RootVisitor {
namespace {
bool MayContainRecordedSlots(HeapObject object) {
// New space object do not have recorded slots.
- if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
- // Whitelist objects that definitely do not have pointers.
+ if (BasicMemoryChunk::FromHeapObject(object)->InYoungGeneration())
+ return false;
+ // Allowlist objects that definitely do not have pointers.
if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
// Conservatively return true for other objects.
return true;
@@ -3225,6 +3244,7 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
if (FLAG_enable_slow_asserts) {
// Make sure the stack or other roots (e.g., Handles) don't contain pointers
// to the original FixedArray (which is now the filler object).
+ SafepointScope scope(this);
LeftTrimmerVerifierRootVisitor root_visitor(object);
ReadOnlyRoots(this).Iterate(&root_visitor);
IterateRoots(&root_visitor, {});
@@ -3335,6 +3355,15 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
+
+ MakeLocalHeapLabsIterable();
+}
+
+void Heap::MakeLocalHeapLabsIterable() {
+ if (!FLAG_local_heaps) return;
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MakeLinearAllocationAreaIterable();
+ });
}
namespace {
@@ -4079,7 +4108,7 @@ const char* Heap::GarbageCollectionReasonToString(
UNREACHABLE();
}
-bool Heap::Contains(HeapObject value) {
+bool Heap::Contains(HeapObject value) const {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
@@ -4096,7 +4125,7 @@ bool Heap::Contains(HeapObject value) {
new_lo_space_->Contains(value));
}
-bool Heap::InSpace(HeapObject value, AllocationSpace space) {
+bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -4123,7 +4152,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) {
UNREACHABLE();
}
-bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
+bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
}
@@ -4167,32 +4196,13 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
}
#ifdef VERIFY_HEAP
-class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
- public:
- explicit VerifyReadOnlyPointersVisitor(Heap* heap)
- : VerifyPointersVisitor(heap) {}
-
- protected:
- void VerifyPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override {
- if (!host.is_null()) {
- CHECK(ReadOnlyHeap::Contains(host.map()));
- }
- VerifyPointersVisitor::VerifyPointers(host, start, end);
-
- for (MaybeObjectSlot current = start; current < end; ++current) {
- HeapObject heap_object;
- if ((*current)->GetHeapObject(&heap_object)) {
- CHECK(ReadOnlyHeap::Contains(heap_object));
- }
- }
- }
-};
-
void Heap::Verify() {
CHECK(HasBeenSetUp());
+ SafepointScope safepoint_scope(this);
HandleScope scope(isolate());
+ MakeLocalHeapLabsIterable();
+
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
array_buffer_sweeper()->EnsureFinished();
@@ -4225,8 +4235,7 @@ void Heap::Verify() {
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
- VerifyReadOnlyPointersVisitor read_only_visitor(this);
- read_only_space_->Verify(isolate(), &read_only_visitor);
+ read_only_space_->Verify(isolate());
}
class SlotVerifyingVisitor : public ObjectVisitor {
@@ -4311,21 +4320,20 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot target) override {
VisitPointer(host, target);
- if (FLAG_minor_mc) {
- VisitPointer(host, target);
- } else {
- // Keys are handled separately and should never appear in this set.
- CHECK(!InUntypedSet(key));
- Object k = *key;
- if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
- EphemeronHashTable table = EphemeronHashTable::cast(host);
- auto it = ephemeron_remembered_set_->find(table);
- CHECK(it != ephemeron_remembered_set_->end());
- int slot_index =
- EphemeronHashTable::SlotToIndex(table.address(), key.address());
- InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
- CHECK(it->second.find(entry.as_int()) != it->second.end());
- }
+#ifdef ENABLE_MINOR_MC
+ if (FLAG_minor_mc) return VisitPointer(host, target);
+#endif
+ // Keys are handled separately and should never appear in this set.
+ CHECK(!InUntypedSet(key));
+ Object k = *key;
+ if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
+ EphemeronHashTable table = EphemeronHashTable::cast(host);
+ auto it = ephemeron_remembered_set_->find(table);
+ CHECK(it != ephemeron_remembered_set_->end());
+ int slot_index =
+ EphemeronHashTable::SlotToIndex(table.address(), key.address());
+ InternalIndex entry = EphemeronHashTable::IndexToEntry(slot_index);
+ CHECK(it->second.find(entry.as_int()) != it->second.end());
}
}
@@ -4390,12 +4398,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- if (FLAG_local_heaps) {
- // Ensure heap is iterable
- safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
- local_heap->MakeLinearAllocationAreaIterable();
- });
- }
+ MakeLocalHeapLabsIterable();
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
@@ -4768,6 +4771,10 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
RoundDown<Page::kPageSize>(initial_semispace_size_);
}
+ if (FLAG_lazy_new_space_shrinking) {
+ initial_semispace_size_ = max_semi_space_size_;
+ }
+
// Initialize initial_old_space_size_.
{
initial_old_generation_size_ = kMaxInitialOldGenerationSize;
@@ -4993,6 +5000,11 @@ bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
return local_heap->allocation_failed_;
}
+void Heap::AlwaysAllocateAfterTearDownStarted() {
+ always_allocate_scope_count_++;
+ collection_barrier_.ShutdownRequested();
+}
+
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
@@ -5009,12 +5021,15 @@ Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
return Heap::HeapGrowingMode::kDefault;
}
-size_t Heap::GlobalMemoryAvailable() {
- return UseGlobalMemoryScheduling()
- ? GlobalSizeOfObjects() < global_allocation_limit_
- ? global_allocation_limit_ - GlobalSizeOfObjects()
- : 0
- : new_space_->Capacity() + 1;
+base::Optional<size_t> Heap::GlobalMemoryAvailable() {
+ if (!UseGlobalMemoryScheduling()) return {};
+
+ size_t global_size = GlobalSizeOfObjects();
+
+ if (global_size < global_allocation_limit_)
+ return global_allocation_limit_ - global_size;
+
+ return 0;
}
double Heap::PercentToOldGenerationLimit() {
@@ -5097,10 +5112,12 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
}
size_t old_generation_space_available = OldGenerationSpaceAvailable();
- const size_t global_memory_available = GlobalMemoryAvailable();
+ const base::Optional<size_t> global_memory_available =
+ GlobalMemoryAvailable();
if (old_generation_space_available > new_space_->Capacity() &&
- (global_memory_available > new_space_->Capacity())) {
+ (!global_memory_available ||
+ global_memory_available > new_space_->Capacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -5112,7 +5129,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
- if (global_memory_available == 0) {
+ if (global_memory_available && *global_memory_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
@@ -5155,7 +5172,7 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
code_space_->first_page()->Contains(heap_object.address())) {
- MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
+ BasicMemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
// be moved.
@@ -5298,13 +5315,15 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space());
- space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
+ space_[RO_SPACE] = nullptr;
+ read_only_space_ = ro_heap->read_only_space();
}
void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
CHECK(V8_SHARED_RO_HEAP_BOOL);
delete read_only_space_;
- space_[RO_SPACE] = read_only_space_ = space;
+
+ read_only_space_ = space;
}
void Heap::SetUpSpaces() {
@@ -5432,6 +5451,10 @@ void Heap::NotifyDeserializationComplete() {
#endif // DEBUG
}
+ if (FLAG_stress_concurrent_allocation) {
+ StressConcurrentAllocatorTask::Schedule(isolate());
+ }
+
deserialization_complete_ = true;
}
@@ -5443,7 +5466,15 @@ void Heap::NotifyBootstrapComplete() {
}
}
-void Heap::NotifyOldGenerationExpansion() {
+void Heap::NotifyOldGenerationExpansion(AllocationSpace space,
+ MemoryChunk* chunk) {
+ // Pages created during bootstrapping may contain immortal immovable objects.
+ if (!deserialization_complete()) {
+ chunk->MarkNeverEvacuate();
+ }
+ if (space == CODE_SPACE || space == CODE_LO_SPACE) {
+ isolate()->AddCodeMemoryChunk(chunk);
+ }
const size_t kMemoryReducerActivationThreshold = 1 * MB;
if (old_generation_capacity_after_bootstrap_ && ms_count_ == 0 &&
OldGenerationCapacity() >= old_generation_capacity_after_bootstrap_ +
@@ -5494,6 +5525,14 @@ void Heap::RegisterExternallyReferencedObject(Address* location) {
void Heap::StartTearDown() {
SetGCState(TEAR_DOWN);
+
+ // Background threads may allocate and block until GC is performed. However
+ // this might never happen when the main thread tries to quit and doesn't
+ // process the event queue anymore. Avoid this deadlock by allowing all
+ // allocations after tear down was requested to make sure all background
+ // threads finish.
+ AlwaysAllocateAfterTearDownStarted();
+
#ifdef VERIFY_HEAP
// {StartTearDown} is called fairly early during Isolate teardown, so it's
// a good time to run heap verification (if requested), before starting to
@@ -5585,7 +5624,7 @@ void Heap::TearDown() {
tracer_.reset();
isolate()->read_only_heap()->OnHeapTearDown();
- space_[RO_SPACE] = read_only_space_ = nullptr;
+ read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
@@ -5919,14 +5958,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject object) override {
if (object.IsFreeSpaceOrFiller()) return true;
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0;
}
private:
bool MarkAsReachable(HeapObject object) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) {
reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
}
@@ -6008,7 +6047,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
Heap* heap_;
DisallowHeapAllocation no_allocation_;
- std::unordered_map<MemoryChunk*,
+ std::unordered_map<BasicMemoryChunk*,
std::unordered_set<HeapObject, Object::Hasher>*>
reachable_;
};
@@ -6016,6 +6055,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
HeapObjectIterator::HeapObjectIterator(
Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
: heap_(heap),
+ safepoint_scope_(std::make_unique<SafepointScope>(heap)),
filtering_(filtering),
filter_(nullptr),
space_iterator_(nullptr),
@@ -6794,7 +6834,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return true;
}
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
heap_internals::MemoryChunk* slim_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -6803,7 +6843,7 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking());
- AllocationSpace identity = chunk->owner_identity();
+ AllocationSpace identity = chunk->owner()->identity();
// Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 888d174c02f..91214f40398 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -63,6 +63,7 @@ using v8::MemoryPressureLevel;
class AllocationObserver;
class ArrayBufferCollector;
class ArrayBufferSweeper;
+class BasicMemoryChunk;
class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeHandler;
@@ -88,6 +89,7 @@ class Page;
class PagedSpace;
class ReadOnlyHeap;
class RootVisitor;
+class SafepointScope;
class ScavengeJob;
class Scavenger;
class ScavengerCollector;
@@ -449,7 +451,7 @@ class Heap {
void NotifyBootstrapComplete();
- void NotifyOldGenerationExpansion();
+ void NotifyOldGenerationExpansion(AllocationSpace space, MemoryChunk* chunk);
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
@@ -458,8 +460,9 @@ class Heap {
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
- void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
- ObjectSlot src_slot, int len, WriteBarrierMode mode);
+ V8_EXPORT_PRIVATE void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
+ ObjectSlot src_slot, int len,
+ WriteBarrierMode mode);
// Copy len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges must not overlap.
@@ -474,6 +477,9 @@ class Heap {
V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
Address addr, int size, ClearRecordedSlots clear_slots_mode);
+ void CreateFillerObjectAtBackground(Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode);
+
template <typename T>
void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
@@ -663,6 +669,8 @@ class Heap {
void SetSerializedObjects(FixedArray objects);
void SetSerializedGlobalProxySizes(FixedArray sizes);
+ void SetBasicBlockProfilingData(Handle<ArrayList> list);
+
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -749,7 +757,7 @@ class Heap {
void TearDown();
// Returns whether SetUp has been called.
- bool HasBeenSetUp();
+ bool HasBeenSetUp() const;
// ===========================================================================
// Getters for spaces. =======================================================
@@ -769,9 +777,6 @@ class Heap {
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
- // Returns name of the space.
- V8_EXPORT_PRIVATE static const char* GetSpaceName(AllocationSpace space);
-
// ===========================================================================
// Getters to other components. ==============================================
// ===========================================================================
@@ -779,6 +784,9 @@ class Heap {
GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
+ const MemoryAllocator* memory_allocator() const {
+ return memory_allocator_.get();
+ }
inline Isolate* isolate();
@@ -1056,7 +1064,7 @@ class Heap {
return local_embedder_heap_tracer_.get();
}
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+ V8_EXPORT_PRIVATE void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
void RegisterExternallyReferencedObject(Address* location);
@@ -1107,15 +1115,15 @@ class Heap {
// Checks whether an address/object is in the non-read-only heap (including
// auxiliary area and unused area). Use IsValidHeapObject if checking both
// heaps is required.
- V8_EXPORT_PRIVATE bool Contains(HeapObject value);
+ V8_EXPORT_PRIVATE bool Contains(HeapObject value) const;
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
- V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space);
+ V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const;
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
- V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space);
+ V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const;
static inline Heap* FromWritableHeapObject(HeapObject obj);
@@ -1539,12 +1547,15 @@ class Heap {
Heap* heap_;
base::Mutex mutex_;
base::ConditionVariable cond_;
- bool requested_;
+ bool gc_requested_;
+ bool shutdown_requested_;
public:
- explicit CollectionBarrier(Heap* heap) : heap_(heap), requested_(false) {}
+ explicit CollectionBarrier(Heap* heap)
+ : heap_(heap), gc_requested_(false), shutdown_requested_(false) {}
- void Increment();
+ void CollectionPerformed();
+ void ShutdownRequested();
void Wait();
};
@@ -1635,6 +1646,9 @@ class Heap {
// over all objects. May cause a GC.
void MakeHeapIterable();
+ // Ensure that LABs of local heaps are iterable.
+ void MakeLocalHeapLabsIterable();
+
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
size_t PerformGarbageCollection(
@@ -1771,6 +1785,7 @@ class Heap {
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue();
+ void GarbageCollectionPrologueInSafepoint();
void GarbageCollectionEpilogue();
void GarbageCollectionEpilogueInSafepoint();
@@ -1851,11 +1866,14 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
+ V8_EXPORT_PRIVATE bool CanExpandOldGenerationBackground(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation(
LocalHeap* local_heap = nullptr);
bool IsRetryOfFailedAllocation(LocalHeap* local_heap);
+ void AlwaysAllocateAfterTearDownStarted();
+
HeapGrowingMode CurrentHeapGrowingMode();
double PercentToOldGenerationLimit();
@@ -1867,7 +1885,7 @@ class Heap {
return FLAG_global_gc_scheduling && local_embedder_heap_tracer();
}
- size_t GlobalMemoryAvailable();
+ base::Optional<size_t> GlobalMemoryAvailable();
void RecomputeLimits(GarbageCollector collector);
@@ -2269,6 +2287,7 @@ class Heap {
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class OffThreadHeap;
+ friend class OffThreadSpace;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
@@ -2389,12 +2408,12 @@ class CodePageCollectionMemoryModificationScope {
// was registered to be executable. It can be used by concurrent threads.
class CodePageMemoryModificationScope {
public:
- explicit inline CodePageMemoryModificationScope(MemoryChunk* chunk);
+ explicit inline CodePageMemoryModificationScope(BasicMemoryChunk* chunk);
explicit inline CodePageMemoryModificationScope(Code object);
inline ~CodePageMemoryModificationScope();
private:
- MemoryChunk* chunk_;
+ BasicMemoryChunk* chunk_;
bool scope_active_;
// Disallow any GCs inside this scope, as a relocation of the underlying
@@ -2497,6 +2516,7 @@ class V8_EXPORT_PRIVATE HeapObjectIterator {
DISALLOW_HEAP_ALLOCATION(no_heap_allocation_)
Heap* heap_;
+ std::unique_ptr<SafepointScope> safepoint_scope_;
HeapObjectsFiltering filtering_;
HeapObjectsFilter* filter_;
// Space iterator for iterating all the spaces.
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index 8fb1492fe16..cb1eff27b27 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -93,6 +93,13 @@ void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
collector_->VisitObject(obj);
}
+void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
+ MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
+ Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
+ IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
+}
+
void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
DCHECK(IsMarking());
DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
@@ -367,6 +374,11 @@ void IncrementalMarking::StartBlackAllocation() {
heap()->old_space()->MarkLinearAllocationAreaBlack();
heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack();
+ if (FLAG_local_heaps) {
+ heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MarkLinearAllocationAreaBlack();
+ });
+ }
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation started\n");
@@ -378,6 +390,11 @@ void IncrementalMarking::PauseBlackAllocation() {
heap()->old_space()->UnmarkLinearAllocationArea();
heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea();
+ if (FLAG_local_heaps) {
+ heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->UnmarkLinearAllocationArea();
+ });
+ }
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation paused\n");
@@ -728,10 +745,13 @@ StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
}
}
}
+ // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative,
+ // which means |local_tracer| won't do any actual tracing, so there is no
+ // need to check for |deadline <= heap_->MonotonicallyIncreasingTimeInMs()|.
bool remote_tracing_done =
local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
double current = heap_->MonotonicallyIncreasingTimeInMs();
- local_tracer->SetEmbedderWorklistEmpty(true);
+ local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
*duration_ms = current - start;
return (empty_worklist && remote_tracing_done)
? StepResult::kNoImmediateWork
@@ -790,6 +810,20 @@ void IncrementalMarking::Stop() {
SetState(STOPPED);
is_compacting_ = false;
FinishBlackAllocation();
+
+ if (FLAG_local_heaps) {
+ // Merge live bytes counters of background threads
+ for (auto pair : background_live_bytes_) {
+ MemoryChunk* memory_chunk = pair.first;
+ intptr_t live_bytes = pair.second;
+
+ if (live_bytes) {
+ marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
+ }
+ }
+
+ background_live_bytes_.clear();
+ }
}
@@ -958,24 +992,32 @@ StepResult IncrementalMarking::AdvanceWithDeadline(
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
-#ifdef DEBUG
- // Enforce safepoint here such that background threads cannot allocate between
- // completing sweeping and VerifyCountersAfterSweeping().
- SafepointScope scope(heap());
-#endif
- if (collector_->sweeping_in_progress() &&
- (!FLAG_concurrent_sweeping ||
- !collector_->sweeper()->AreSweeperTasksRunning())) {
- collector_->EnsureSweepingCompleted();
+ if (ContinueConcurrentSweeping()) {
+ if (FLAG_stress_incremental_marking) {
+ // To start concurrent marking a bit earlier, support concurrent sweepers
+ // from main thread by sweeping some pages.
+ SupportConcurrentSweeping();
+ }
+ return;
}
- if (!collector_->sweeping_in_progress()) {
+
+ SafepointScope scope(heap());
+ collector_->EnsureSweepingCompleted();
+ DCHECK(!collector_->sweeping_in_progress());
#ifdef DEBUG
- heap_->VerifyCountersAfterSweeping();
-#else
- SafepointScope scope(heap());
+ heap_->VerifyCountersAfterSweeping();
#endif
- StartMarking();
- }
+ StartMarking();
+}
+
+bool IncrementalMarking::ContinueConcurrentSweeping() {
+ if (!collector_->sweeping_in_progress()) return false;
+ return FLAG_concurrent_sweeping &&
+ collector_->sweeper()->AreSweeperTasksRunning();
+}
+
+void IncrementalMarking::SupportConcurrentSweeping() {
+ collector_->sweeper()->SupportConcurrentSweeping();
}
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
diff --git a/chromium/v8/src/heap/incremental-marking.h b/chromium/v8/src/heap/incremental-marking.h
index 7d06c086499..c507c022a70 100644
--- a/chromium/v8/src/heap/incremental-marking.h
+++ b/chromium/v8/src/heap/incremental-marking.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
#define V8_HEAP_INCREMENTAL_MARKING_H_
+#include "src/base/platform/mutex.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-job.h"
#include "src/heap/mark-compact.h"
@@ -168,6 +169,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
StepOrigin step_origin);
void FinalizeSweeping();
+ bool ContinueConcurrentSweeping();
+ void SupportConcurrentSweeping();
StepResult Step(double max_step_size_in_ms, CompletionAction action,
StepOrigin step_origin);
@@ -205,6 +208,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// the concurrent marker.
void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
+ void MarkBlackBackground(HeapObject obj, int object_size);
+
bool IsCompacting() { return IsMarking() && is_compacting_; }
void ProcessBlackAllocatedObject(HeapObject obj);
@@ -235,6 +240,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
bool IsBelowActivationThresholds() const;
+ void IncrementLiveBytesBackground(MemoryChunk* chunk, intptr_t by) {
+ base::MutexGuard guard(&background_live_bytes_mutex_);
+ background_live_bytes_[chunk] += by;
+ }
+
private:
class Observer : public AllocationObserver {
public:
@@ -337,6 +347,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
AtomicMarkingState atomic_marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
+ base::Mutex background_live_bytes_mutex_;
+ std::unordered_map<MemoryChunk*, intptr_t> background_live_bytes_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
} // namespace internal
diff --git a/chromium/v8/src/heap/large-spaces.cc b/chromium/v8/src/heap/large-spaces.cc
index 40363919497..0becaec35a5 100644
--- a/chromium/v8/src/heap/large-spaces.cc
+++ b/chromium/v8/src/heap/large-spaces.cc
@@ -9,8 +9,9 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/list.h"
#include "src/heap/marking.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"
#include "src/logging/log.h"
@@ -134,7 +135,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->black_allocation(),
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
- heap()->NotifyOldGenerationExpansion();
+ heap()->NotifyOldGenerationExpansion(identity(), page);
AllocationStep(object_size, object.address(), object_size);
return object;
}
@@ -163,7 +164,7 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
}
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
- const Address key = MemoryChunk::FromAddress(a)->address();
+ const Address key = BasicMemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
if (it != chunk_map_.end()) {
LargePage* page = it->second;
@@ -223,7 +224,8 @@ void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
page_count_++;
memory_chunk_list_.PushBack(page);
page->set_owner(this);
- page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->SetOldGenerationPageFlags(!is_off_thread() &&
+ heap()->incremental_marking()->IsMarking());
}
void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
@@ -273,7 +275,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
bool LargeObjectSpace::Contains(HeapObject object) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
bool owned = (chunk->owner() == this);
@@ -514,7 +516,6 @@ AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
OldLargeObjectSpace::AddPage(page, object_size);
InsertChunkMapEntries(page);
- heap()->isolate()->AddCodeMemoryChunk(page);
}
void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
diff --git a/chromium/v8/src/heap/list.h b/chromium/v8/src/heap/list.h
index 5ab9a03610f..a8a75045074 100644
--- a/chromium/v8/src/heap/list.h
+++ b/chromium/v8/src/heap/list.h
@@ -68,8 +68,8 @@ class List {
element->list_node().set_next(nullptr);
}
- bool Contains(T* element) {
- T* it = front_;
+ bool Contains(T* element) const {
+ const T* it = front_;
while (it) {
if (it == element) return true;
it = it->list_node().next();
@@ -77,11 +77,14 @@ class List {
return false;
}
- bool Empty() { return !front_ && !back_; }
+ bool Empty() const { return !front_ && !back_; }
T* front() { return front_; }
T* back() { return back_; }
+ const T* front() const { return front_; }
+ const T* back() const { return back_; }
+
private:
void AddFirstElement(T* element) {
DCHECK(!back_);
@@ -129,6 +132,9 @@ class ListNode {
T* next() { return next_; }
T* prev() { return prev_; }
+ const T* next() const { return next_; }
+ const T* prev() const { return prev_; }
+
void Initialize() {
next_ = nullptr;
prev_ = nullptr;
diff --git a/chromium/v8/src/heap/local-allocator.h b/chromium/v8/src/heap/local-allocator.h
index ba8cd2e610b..9e4d5f688cb 100644
--- a/chromium/v8/src/heap/local-allocator.h
+++ b/chromium/v8/src/heap/local-allocator.h
@@ -7,6 +7,8 @@
#include "src/common/globals.h"
#include "src/heap/heap.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
namespace v8 {
diff --git a/chromium/v8/src/heap/local-heap.cc b/chromium/v8/src/heap/local-heap.cc
index 3aea67411dd..55076bee25d 100644
--- a/chromium/v8/src/heap/local-heap.cc
+++ b/chromium/v8/src/heap/local-heap.cc
@@ -107,5 +107,13 @@ void LocalHeap::MakeLinearAllocationAreaIterable() {
old_space_allocator_.MakeLinearAllocationAreaIterable();
}
+void LocalHeap::MarkLinearAllocationAreaBlack() {
+ old_space_allocator_.MarkLinearAllocationAreaBlack();
+}
+
+void LocalHeap::UnmarkLinearAllocationArea() {
+ old_space_allocator_.UnmarkLinearAllocationArea();
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/local-heap.h b/chromium/v8/src/heap/local-heap.h
index 31c66bc2be5..8406c39042d 100644
--- a/chromium/v8/src/heap/local-heap.h
+++ b/chromium/v8/src/heap/local-heap.h
@@ -48,6 +48,17 @@ class LocalHeap {
ConcurrentAllocator* old_space_allocator() { return &old_space_allocator_; }
+ // Mark/Unmark linear allocation areas black. Used for black allocation.
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
+
+ // Give up linear allocation areas. Used for mark-compact GC.
+ void FreeLinearAllocationArea();
+
+ // Create filler object in linear allocation areas. Verifying requires
+ // iterable heap.
+ void MakeLinearAllocationAreaIterable();
+
private:
enum class ThreadState {
// Threads in this state need to be stopped in a safepoint.
@@ -68,9 +79,6 @@ class LocalHeap {
void EnterSafepoint();
- void FreeLinearAllocationArea();
- void MakeLinearAllocationAreaIterable();
-
Heap* heap_;
base::Mutex state_mutex_;
@@ -107,6 +115,19 @@ class ParkedScope {
LocalHeap* local_heap_;
};
+class ParkedMutexGuard {
+ base::Mutex* guard_;
+
+ public:
+ explicit ParkedMutexGuard(LocalHeap* local_heap, base::Mutex* guard)
+ : guard_(guard) {
+ ParkedScope scope(local_heap);
+ guard_->Lock();
+ }
+
+ ~ParkedMutexGuard() { guard_->Unlock(); }
+};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index 7c06286f97a..e554601b4a4 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -65,7 +65,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
- MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
- MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(source_page,
slot.address());
@@ -215,7 +215,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// Note that we know that we are at a one word filler when
// object_start + object_size - kTaggedSize == object_start.
if (addr != end) {
- DCHECK_EQ(chunk_, MemoryChunk::FromAddress(end));
+ DCHECK_EQ(chunk_, BasicMemoryChunk::FromAddress(end));
uint32_t end_mark_bit_index = chunk_->AddressToMarkbitIndex(end);
unsigned int end_cell_index =
end_mark_bit_index >> Bitmap::kBitsPerCellLog2;
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 7b609ab22a4..4e594c7f5d1 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -16,6 +16,7 @@
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/code-object-registry.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h"
@@ -31,6 +32,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
+#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
@@ -868,6 +870,13 @@ void MarkCompactCollector::Prepare() {
space = spaces.Next()) {
space->PrepareForMarkCompact();
}
+
+ if (FLAG_local_heaps) {
+ // Fill and reset all background thread LABs
+ heap_->safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
+ }
+
heap()->account_external_memory_concurrently_freed();
}
@@ -1223,7 +1232,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
- MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
+ BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
@@ -2713,8 +2722,6 @@ static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- PointersUpdatingVisitor() {}
-
void VisitPointer(HeapObject host, ObjectSlot p) override {
UpdateStrongSlotInternal(p);
}
@@ -4410,7 +4417,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
Address slot) final {
if (value->IsStrongOrWeak()) {
- MemoryChunk* p = MemoryChunk::FromAddress(value.ptr());
+ BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
if (p->InYoungGeneration()) {
DCHECK_IMPLIES(
p->IsToPage(),
@@ -4712,6 +4719,7 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
PageRange(new_space->first_allocatable_address(), new_space->top())) {
new_space_evacuation_pages_.push_back(p);
}
+
new_space->Flip();
new_space->ResetLinearAllocationArea();
@@ -4984,6 +4992,10 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
&root_visitor, &IsUnmarkedObjectForYoungGeneration);
DrainMarkingWorklist();
}
+
+ if (FLAG_minor_mc_trace_fragmentation) {
+ TraceFragmentation();
+ }
}
void MinorMarkCompactCollector::DrainMarkingWorklist() {
@@ -4999,6 +5011,57 @@ void MinorMarkCompactCollector::DrainMarkingWorklist() {
DCHECK(marking_worklist.IsLocalEmpty());
}
+void MinorMarkCompactCollector::TraceFragmentation() {
+ NewSpace* new_space = heap()->new_space();
+ const std::array<size_t, 4> free_size_class_limits = {0, 1024, 2048, 4096};
+ size_t free_bytes_of_class[free_size_class_limits.size()] = {0};
+ size_t live_bytes = 0;
+ size_t allocatable_bytes = 0;
+ for (Page* p :
+ PageRange(new_space->first_allocatable_address(), new_space->top())) {
+ Address free_start = p->area_start();
+ for (auto object_and_size : LiveObjectRange<kGreyObjects>(
+ p, non_atomic_marking_state()->bitmap(p))) {
+ HeapObject const object = object_and_size.first;
+ Address free_end = object.address();
+ if (free_end != free_start) {
+ size_t free_bytes = free_end - free_start;
+ int free_bytes_index = 0;
+ for (auto free_size_class_limit : free_size_class_limits) {
+ if (free_bytes >= free_size_class_limit) {
+ free_bytes_of_class[free_bytes_index] += free_bytes;
+ }
+ free_bytes_index++;
+ }
+ }
+ Map map = object.synchronized_map();
+ int size = object.SizeFromMap(map);
+ live_bytes += size;
+ free_start = free_end + size;
+ }
+ size_t area_end =
+ p->Contains(new_space->top()) ? new_space->top() : p->area_end();
+ if (free_start != area_end) {
+ size_t free_bytes = area_end - free_start;
+ int free_bytes_index = 0;
+ for (auto free_size_class_limit : free_size_class_limits) {
+ if (free_bytes >= free_size_class_limit) {
+ free_bytes_of_class[free_bytes_index] += free_bytes;
+ }
+ free_bytes_index++;
+ }
+ }
+ allocatable_bytes += area_end - p->area_start();
+ CHECK_EQ(allocatable_bytes, live_bytes + free_bytes_of_class[0]);
+ }
+ PrintIsolate(
+ isolate(),
+ "Minor Mark-Compact Fragmentation: allocatable_bytes=%zu live_bytes=%zu "
+ "free_bytes=%zu free_bytes_1K=%zu free_bytes_2K=%zu free_bytes_4K=%zu\n",
+ allocatable_bytes, live_bytes, free_bytes_of_class[0],
+ free_bytes_of_class[1], free_bytes_of_class[2], free_bytes_of_class[3]);
+}
+
void MinorMarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
base::MutexGuard guard(heap()->relocation_mutex());
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index 30723ede385..35a5a85e91f 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -247,8 +247,10 @@ class MarkCompactCollectorBase {
class MinorMarkingState final
: public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
public:
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
- return chunk->young_generation_bitmap<AccessMode::ATOMIC>();
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const {
+ return MemoryChunk::cast(chunk)
+ ->young_generation_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -269,8 +271,9 @@ class MinorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> {
public:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
- const MemoryChunk* chunk) const {
- return chunk->young_generation_bitmap<AccessMode::NON_ATOMIC>();
+ const BasicMemoryChunk* chunk) const {
+ return MemoryChunk::cast(chunk)
+ ->young_generation_bitmap<AccessMode::NON_ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -293,25 +296,26 @@ class MinorNonAtomicMarkingState final
class MajorMarkingState final
: public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
public:
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
// Concurrent marking uses local live bytes so we may do these accesses
// non-atomically.
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_ += by;
+ chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
}
intptr_t live_bytes(MemoryChunk* chunk) const {
- return chunk->live_byte_count_;
+ return chunk->live_byte_count_.load(std::memory_order_relaxed);
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_ = value;
+ chunk->live_byte_count_.store(value, std::memory_order_relaxed);
}
};
@@ -320,16 +324,16 @@ class MajorMarkingState final
class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
+ ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- std::atomic_fetch_add(
- reinterpret_cast<std::atomic<intptr_t>*>(&chunk->live_byte_count_), by);
+ chunk->live_byte_count_.fetch_add(by);
}
};
@@ -338,23 +342,23 @@ class MajorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> {
public:
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
- const MemoryChunk* chunk) const {
+ const BasicMemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
- MemoryChunk::kMarkBitmapOffset);
+ BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_ += by;
+ chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
}
intptr_t live_bytes(MemoryChunk* chunk) const {
- return chunk->live_byte_count_;
+ return chunk->live_byte_count_.load(std::memory_order_relaxed);
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_ = value;
+ chunk->live_byte_count_.store(value, std::memory_order_relaxed);
}
};
@@ -515,9 +519,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
HeapObjectSlot slot, HeapObject target);
void RecordLiveSlotsOnPage(Page* page);
- void UpdateSlots(SlotsBuffer* buffer);
- void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
-
bool is_compacting() const { return compacting_; }
// Ensures that sweeping is finished.
@@ -567,7 +568,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VerifyMarking();
#ifdef VERIFY_HEAP
- void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
@@ -856,6 +856,7 @@ class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
V8_INLINE void MarkRootObject(HeapObject obj);
void DrainMarkingWorklist() override;
+ void TraceFragmentation();
void ClearNonLiveReferences() override;
void EvacuatePrologue() override;
diff --git a/chromium/v8/src/heap/marking-visitor.h b/chromium/v8/src/heap/marking-visitor.h
index a4c2a9f522c..3010445eefa 100644
--- a/chromium/v8/src/heap/marking-visitor.h
+++ b/chromium/v8/src/heap/marking-visitor.h
@@ -73,11 +73,11 @@ template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
- return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr());
+ return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
}
// {addr} may be tagged or aligned.
- V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
+ V8_INLINE MarkBit MarkBitFrom(BasicMemoryChunk* p, Address addr) {
return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
p->AddressToMarkbitIndex(addr));
}
@@ -115,10 +115,11 @@ class MarkingStateBase {
}
V8_INLINE bool GreyToBlack(HeapObject obj) {
- MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
- MarkBit markbit = MarkBitFrom(p, obj.address());
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
+ MarkBit markbit = MarkBitFrom(chunk, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size());
+ static_cast<ConcreteState*>(this)->IncrementLiveBytes(
+ MemoryChunk::cast(chunk), obj.Size());
return true;
}
diff --git a/chromium/v8/src/heap/memory-allocator.cc b/chromium/v8/src/heap/memory-allocator.cc
new file mode 100644
index 00000000000..f1047e2248f
--- /dev/null
+++ b/chromium/v8/src/heap/memory-allocator.cc
@@ -0,0 +1,778 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-allocator.h"
+
+#include <cinttypes>
+
+#include "src/base/address-region.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/read-only-spaces.h"
+#include "src/logging/log.h"
+
+namespace v8 {
+namespace internal {
+
+static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
+ LAZY_INSTANCE_INITIALIZER;
+
+Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ auto it = recently_freed_.find(code_range_size);
+ if (it == recently_freed_.end() || it->second.empty()) {
+ return reinterpret_cast<Address>(GetRandomMmapAddr());
+ }
+ Address result = it->second.back();
+ it->second.pop_back();
+ return result;
+}
+
+void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ recently_freed_[code_range_size].push_back(code_range_start);
+}
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+//
+
+MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
+ size_t code_range_size)
+ : isolate_(isolate),
+ data_page_allocator_(isolate->page_allocator()),
+ code_page_allocator_(nullptr),
+ capacity_(RoundUp(capacity, Page::kPageSize)),
+ size_(0),
+ size_executable_(0),
+ lowest_ever_allocated_(static_cast<Address>(-1ll)),
+ highest_ever_allocated_(kNullAddress),
+ unmapper_(isolate->heap(), this) {
+ InitializeCodePageAllocator(data_page_allocator_, code_range_size);
+}
+
+void MemoryAllocator::InitializeCodePageAllocator(
+ v8::PageAllocator* page_allocator, size_t requested) {
+ DCHECK_NULL(code_page_allocator_instance_.get());
+
+ code_page_allocator_ = page_allocator;
+
+ if (requested == 0) {
+ if (!isolate_->RequiresCodeRange()) return;
+ // When a target requires the code range feature, we put all code objects
+ // in a kMaximalCodeRangeSize range of virtual address space, so that
+ // they can call each other with near calls.
+ requested = kMaximalCodeRangeSize;
+ } else if (requested <= kMinimumCodeRangeSize) {
+ requested = kMinimumCodeRangeSize;
+ }
+
+ const size_t reserved_area =
+ kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
+ if (requested < (kMaximalCodeRangeSize - reserved_area)) {
+ requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
+ // Fullfilling both reserved pages requirement and huge code area
+ // alignments is not supported (requires re-implementation).
+ DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
+ }
+ DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
+
+ Address hint =
+ RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
+ page_allocator->AllocatePageSize());
+ VirtualMemory reservation(
+ page_allocator, requested, reinterpret_cast<void*>(hint),
+ Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
+ if (!reservation.IsReserved()) {
+ V8::FatalProcessOutOfMemory(isolate_,
+ "CodeRange setup: allocate virtual memory");
+ }
+ code_range_ = reservation.region();
+ isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
+
+ // We are sure that we have mapped a block of requested addresses.
+ DCHECK_GE(reservation.size(), requested);
+ Address base = reservation.address();
+
+ // On some platforms, specifically Win64, we need to reserve some pages at
+ // the beginning of an executable space. See
+ // https://cs.chromium.org/chromium/src/components/crash/content/
+ // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
+ // for details.
+ if (reserved_area > 0) {
+ if (!reservation.SetPermissions(base, reserved_area,
+ PageAllocator::kReadWrite))
+ V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
+
+ base += reserved_area;
+ }
+ Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
+ size_t size =
+ RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
+ MemoryChunk::kPageSize);
+ DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
+
+ LOG(isolate_,
+ NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
+ requested));
+
+ code_reservation_ = std::move(reservation);
+ code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
+ page_allocator, aligned_base, size,
+ static_cast<size_t>(MemoryChunk::kAlignment));
+ code_page_allocator_ = code_page_allocator_instance_.get();
+}
+
+void MemoryAllocator::TearDown() {
+ unmapper()->TearDown();
+
+ // Check that spaces were torn down before MemoryAllocator.
+ DCHECK_EQ(size_, 0u);
+ // TODO(gc) this will be true again when we fix FreeMemory.
+ // DCHECK_EQ(0, size_executable_);
+ capacity_ = 0;
+
+ if (last_chunk_.IsReserved()) {
+ last_chunk_.Free();
+ }
+
+ if (code_page_allocator_instance_.get()) {
+ DCHECK(!code_range_.is_empty());
+ code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
+ code_range_.size());
+ code_range_ = base::AddressRegion();
+ code_page_allocator_instance_.reset();
+ }
+ code_page_allocator_ = nullptr;
+ data_page_allocator_ = nullptr;
+}
+
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
+ public:
+ explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
+ : CancelableTask(isolate),
+ unmapper_(unmapper),
+ tracer_(isolate->heap()->tracer()) {}
+
+ private:
+ void RunInternal() override {
+ TRACE_BACKGROUND_GC(tracer_,
+ GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
+ unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ unmapper_->active_unmapping_tasks_--;
+ unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(unmapper_->heap_->isolate(),
+ "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
+ }
+ }
+
+ Unmapper* const unmapper_;
+ GCTracer* const tracer_;
+ DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+};
+
+void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+ if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
+ if (!MakeRoomForNewTasks()) {
+ // kMaxUnmapperTasks are already running. Avoid creating any more.
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
+ kMaxUnmapperTasks);
+ }
+ return;
+ }
+ auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(heap_->isolate(),
+ "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
+ task->id());
+ }
+ DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
+ DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
+ DCHECK_GE(active_unmapping_tasks_, 0);
+ active_unmapping_tasks_++;
+ task_ids_[pending_unmapping_tasks_++] = task->id();
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ } else {
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
+ }
+}
+
+void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
+ for (int i = 0; i < pending_unmapping_tasks_; i++) {
+ if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
+ TryAbortResult::kTaskAborted) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ }
+ }
+ pending_unmapping_tasks_ = 0;
+ active_unmapping_tasks_ = 0;
+
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(
+ heap_->isolate(),
+ "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
+ }
+}
+
+void MemoryAllocator::Unmapper::PrepareForGC() {
+ // Free non-regular chunks because they cannot be re-used.
+ PerformFreeMemoryOnQueuedNonRegularChunks();
+}
+
+void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
+ CancelAndWaitForPendingTasks();
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+}
+
+bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
+ DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
+
+ if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
+ // All previous unmapping tasks have been run to completion.
+ // Finalize those tasks to make room for new ones.
+ CancelAndWaitForPendingTasks();
+ }
+ return pending_unmapping_tasks_ != kMaxUnmapperTasks;
+}
+
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
+ MemoryChunk* chunk = nullptr;
+ while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
+ allocator_->PerformFreeMemory(chunk);
+ }
+}
+
+template <MemoryAllocator::Unmapper::FreeMode mode>
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
+ MemoryChunk* chunk = nullptr;
+ if (FLAG_trace_unmapper) {
+ PrintIsolate(
+ heap_->isolate(),
+ "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
+ NumberOfChunks());
+ }
+ // Regular chunks.
+ while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
+ bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
+ allocator_->PerformFreeMemory(chunk);
+ if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+ }
+ if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
+ // The previous loop uncommitted any pages marked as pooled and added them
+ // to the pooled list. In case of kReleasePooled we need to free them
+ // though.
+ while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
+ allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ }
+ }
+ PerformFreeMemoryOnQueuedNonRegularChunks();
+}
+
+void MemoryAllocator::Unmapper::TearDown() {
+ CHECK_EQ(0, pending_unmapping_tasks_);
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ DCHECK(chunks_[i].empty());
+ }
+}
+
+size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
+ base::MutexGuard guard(&mutex_);
+ return chunks_[kRegular].size() + chunks_[kNonRegular].size();
+}
+
+int MemoryAllocator::Unmapper::NumberOfChunks() {
+ base::MutexGuard guard(&mutex_);
+ size_t result = 0;
+ for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ result += chunks_[i].size();
+ }
+ return static_cast<int>(result);
+}
+
+size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
+ base::MutexGuard guard(&mutex_);
+
+ size_t sum = 0;
+ // kPooled chunks are already uncommited. We only have to account for
+ // kRegular and kNonRegular chunks.
+ for (auto& chunk : chunks_[kRegular]) {
+ sum += chunk->size();
+ }
+ for (auto& chunk : chunks_[kNonRegular]) {
+ sum += chunk->size();
+ }
+ return sum;
+}
+
+bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
+ Address base = reservation->address();
+ size_t size = reservation->size();
+ if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
+ return false;
+ }
+ UpdateAllocatedSpaceLimits(base, base + size);
+ return true;
+}
+
+bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
+ size_t size = reservation->size();
+ if (!reservation->SetPermissions(reservation->address(), size,
+ PageAllocator::kNoAccess)) {
+ return false;
+ }
+ return true;
+}
+
+void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
+ Address base, size_t size) {
+ CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
+}
+
+Address MemoryAllocator::AllocateAlignedMemory(
+ size_t reserve_size, size_t commit_size, size_t alignment,
+ Executability executable, void* hint, VirtualMemory* controller) {
+ v8::PageAllocator* page_allocator = this->page_allocator(executable);
+ DCHECK(commit_size <= reserve_size);
+ VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
+ if (!reservation.IsReserved()) return kNullAddress;
+ Address base = reservation.address();
+ size_ += reservation.size();
+
+ if (executable == EXECUTABLE) {
+ if (!CommitExecutableMemory(&reservation, base, commit_size,
+ reserve_size)) {
+ base = kNullAddress;
+ }
+ } else {
+ if (reservation.SetPermissions(base, commit_size,
+ PageAllocator::kReadWrite)) {
+ UpdateAllocatedSpaceLimits(base, base + commit_size);
+ } else {
+ base = kNullAddress;
+ }
+ }
+
+ if (base == kNullAddress) {
+ // Failed to commit the body. Free the mapping and any partially committed
+ // regions inside it.
+ reservation.Free();
+ size_ -= reserve_size;
+ return kNullAddress;
+ }
+
+ *controller = std::move(reservation);
+ return base;
+}
+
+V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
+ size_t reserve_area_size, size_t commit_area_size, Executability executable,
+ BaseSpace* owner) {
+ DCHECK_LE(commit_area_size, reserve_area_size);
+
+ size_t chunk_size;
+ Heap* heap = isolate_->heap();
+ Address base = kNullAddress;
+ VirtualMemory reservation;
+ Address area_start = kNullAddress;
+ Address area_end = kNullAddress;
+ void* address_hint =
+ AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
+
+ //
+ // MemoryChunk layout:
+ //
+ // Executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- base + CodePageGuardStartOffset
+ // | Guard |
+ // +----------------------------+<- area_start_
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Guard |
+ // +----------------------------+<- base + chunk_size
+ //
+ // Non-executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- area_start_ (base + area_start_)
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- base + chunk_size
+ //
+
+ if (executable == EXECUTABLE) {
+ chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
+ reserve_area_size +
+ MemoryChunkLayout::CodePageGuardSize(),
+ GetCommitPageSize());
+
+ // Size of header (not executable) plus area (executable).
+ size_t commit_size = ::RoundUp(
+ MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
+ GetCommitPageSize());
+ base =
+ AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+ executable, address_hint, &reservation);
+ if (base == kNullAddress) return nullptr;
+ // Update executable memory size.
+ size_executable_ += reservation.size();
+
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
+ ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
+ commit_area_size, kZapValue);
+ }
+
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ area_end = area_start + commit_area_size;
+ } else {
+ chunk_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
+ GetCommitPageSize());
+ size_t commit_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ GetCommitPageSize());
+ base =
+ AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+ executable, address_hint, &reservation);
+
+ if (base == kNullAddress) return nullptr;
+
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(
+ base,
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ kZapValue);
+ }
+
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ area_end = area_start + commit_area_size;
+ }
+
+ // Use chunk_size for statistics because we assume that treat reserved but
+ // not-yet committed memory regions of chunks as allocated.
+ LOG(isolate_,
+ NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
+
+ // We cannot use the last chunk in the address space because we would
+ // overflow when comparing top and limit if this chunk is used for a
+ // linear allocation area.
+ if ((base + chunk_size) == 0u) {
+ CHECK(!last_chunk_.IsReserved());
+ last_chunk_ = std::move(reservation);
+ UncommitMemory(&last_chunk_);
+ size_ -= chunk_size;
+ if (executable == EXECUTABLE) {
+ size_executable_ -= chunk_size;
+ }
+ CHECK(last_chunk_.IsReserved());
+ return AllocateBasicChunk(reserve_area_size, commit_area_size, executable,
+ owner);
+ }
+
+ BasicMemoryChunk* chunk =
+ BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+ owner, std::move(reservation));
+
+ return chunk;
+}
+
+MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
+ size_t commit_area_size,
+ Executability executable,
+ BaseSpace* owner) {
+ BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
+ reserve_area_size, commit_area_size, executable, owner);
+
+ if (basic_chunk == nullptr) return nullptr;
+
+ MemoryChunk* chunk =
+ MemoryChunk::Initialize(basic_chunk, isolate_->heap(), executable);
+
+ if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
+ return chunk;
+}
+
+void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
+ Address start_free,
+ size_t bytes_to_free,
+ Address new_area_end) {
+ VirtualMemory* reservation = chunk->reserved_memory();
+ DCHECK(reservation->IsReserved());
+ chunk->set_size(chunk->size() - bytes_to_free);
+ chunk->set_area_end(new_area_end);
+ if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
+ // Add guard page at the end.
+ size_t page_size = GetCommitPageSize();
+ DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
+ DCHECK_EQ(chunk->address() + chunk->size(),
+ chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
+ reservation->SetPermissions(chunk->area_end(), page_size,
+ PageAllocator::kNoAccess);
+ }
+ // On e.g. Windows, a reservation may be larger than a page and releasing
+ // partially starting at |start_free| will also release the potentially
+ // unused part behind the current page.
+ const size_t released_bytes = reservation->Release(start_free);
+ DCHECK_GE(size_, released_bytes);
+ size_ -= released_bytes;
+}
+
+void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
+ Executability executable) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
+ VirtualMemory* reservation = chunk->reserved_memory();
+ const size_t size =
+ reservation->IsReserved() ? reservation->size() : chunk->size();
+ DCHECK_GE(size_, static_cast<size_t>(size));
+ size_ -= size;
+ if (executable == EXECUTABLE) {
+ DCHECK_GE(size_executable_, size);
+ size_executable_ -= size;
+ UnregisterExecutableMemoryChunk(static_cast<MemoryChunk*>(chunk));
+ }
+ chunk->SetFlag(MemoryChunk::UNREGISTERED);
+}
+
+void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
+ UnregisterMemory(chunk, chunk->executable());
+}
+
+void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+ UnregisterMemory(chunk);
+ chunk->SetFlag(MemoryChunk::PRE_FREED);
+ chunk->ReleaseMarkingBitmap();
+
+ VirtualMemory* reservation = chunk->reserved_memory();
+ if (reservation->IsReserved()) {
+ reservation->Free();
+ } else {
+ // Only read-only pages can have non-initialized reservation object.
+ FreeMemory(page_allocator(NOT_EXECUTABLE), chunk->address(), chunk->size());
+ }
+}
+
+void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+ UnregisterMemory(chunk);
+ isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
+ chunk->IsEvacuationCandidate());
+ chunk->SetFlag(MemoryChunk::PRE_FREED);
+}
+
+void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
+ DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ DCHECK(!chunk->InReadOnlySpace());
+ chunk->ReleaseAllAllocatedMemory();
+
+ VirtualMemory* reservation = chunk->reserved_memory();
+ if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
+ UncommitMemory(reservation);
+ } else {
+ DCHECK(reservation->IsReserved());
+ reservation->Free();
+ }
+}
+
+template <MemoryAllocator::FreeMode mode>
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+ switch (mode) {
+ case kFull:
+ PreFreeMemory(chunk);
+ PerformFreeMemory(chunk);
+ break;
+ case kAlreadyPooled:
+ // Pooled pages cannot be touched anymore as their memory is uncommitted.
+ // Pooled pages are not-executable.
+ FreeMemory(data_page_allocator(), chunk->address(),
+ static_cast<size_t>(MemoryChunk::kPageSize));
+ break;
+ case kPooledAndQueue:
+ DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
+ DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
+ chunk->SetFlag(MemoryChunk::POOLED);
+ V8_FALLTHROUGH;
+ case kPreFreeAndQueue:
+ PreFreeMemory(chunk);
+ // The chunks added to this queue will be freed by a concurrent thread.
+ unmapper()->AddMemoryChunkSafe(chunk);
+ break;
+ }
+}
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kFull>(MemoryChunk* chunk);
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
+
+template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
+Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
+ Executability executable) {
+ MemoryChunk* chunk = nullptr;
+ if (alloc_mode == kPooled) {
+ DCHECK_EQ(size, static_cast<size_t>(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ owner->identity())));
+ DCHECK_EQ(executable, NOT_EXECUTABLE);
+ chunk = AllocatePagePooled(owner);
+ }
+ if (chunk == nullptr) {
+ chunk = AllocateChunk(size, size, executable, owner);
+ }
+ if (chunk == nullptr) return nullptr;
+ return owner->InitializePage(chunk);
+}
+
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+
+ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
+ ReadOnlySpace* owner) {
+ BasicMemoryChunk* chunk = nullptr;
+ if (chunk == nullptr) {
+ chunk = AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
+ }
+ if (chunk == nullptr) return nullptr;
+ return owner->InitializePage(chunk);
+}
+
+LargePage* MemoryAllocator::AllocateLargePage(size_t size,
+ LargeObjectSpace* owner,
+ Executability executable) {
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+ if (chunk == nullptr) return nullptr;
+ return LargePage::Initialize(isolate_->heap(), chunk, executable);
+}
+
+template <typename SpaceType>
+MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
+ MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
+ if (chunk == nullptr) return nullptr;
+ const int size = MemoryChunk::kPageSize;
+ const Address start = reinterpret_cast<Address>(chunk);
+ const Address area_start =
+ start +
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
+ const Address area_end = start + size;
+ // Pooled pages are always regular data pages.
+ DCHECK_NE(CODE_SPACE, owner->identity());
+ VirtualMemory reservation(data_page_allocator(), start, size);
+ if (!CommitMemory(&reservation)) return nullptr;
+ if (Heap::ShouldZapGarbage()) {
+ ZapBlock(start, size, kZapValue);
+ }
+ BasicMemoryChunk* basic_chunk =
+ BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
+ area_end, owner, std::move(reservation));
+ MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE);
+ size_ += size;
+ return chunk;
+}
+
+void MemoryAllocator::ZapBlock(Address start, size_t size,
+ uintptr_t zap_value) {
+ DCHECK(IsAligned(start, kTaggedSize));
+ DCHECK(IsAligned(size, kTaggedSize));
+ MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
+ size >> kTaggedSizeLog2);
+}
+
+intptr_t MemoryAllocator::GetCommitPageSize() {
+ if (FLAG_v8_os_page_size != 0) {
+ DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
+ return FLAG_v8_os_page_size * KB;
+ } else {
+ return CommitPageSize();
+ }
+}
+
+base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
+ size_t size) {
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ if (size < page_size + FreeSpace::kSize) {
+ return base::AddressRegion(0, 0);
+ }
+ Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
+ Address discardable_end = RoundDown(addr + size, page_size);
+ if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
+ return base::AddressRegion(discardable_start,
+ discardable_end - discardable_start);
+}
+
+bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
+ size_t commit_size,
+ size_t reserved_size) {
+ const size_t page_size = GetCommitPageSize();
+ // All addresses and sizes must be aligned to the commit page size.
+ DCHECK(IsAligned(start, page_size));
+ DCHECK_EQ(0, commit_size % page_size);
+ DCHECK_EQ(0, reserved_size % page_size);
+ const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
+ const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
+ const size_t code_area_offset =
+ MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ // reserved_size includes two guard regions, commit_size does not.
+ DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
+ const Address pre_guard_page = start + pre_guard_offset;
+ const Address code_area = start + code_area_offset;
+ const Address post_guard_page = start + reserved_size - guard_size;
+ // Commit the non-executable header, from start to pre-code guard page.
+ if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
+ // Create the pre-code guard page, following the header.
+ if (vm->SetPermissions(pre_guard_page, page_size,
+ PageAllocator::kNoAccess)) {
+ // Commit the executable code body.
+ if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
+ PageAllocator::kReadWrite)) {
+ // Create the post-code guard page.
+ if (vm->SetPermissions(post_guard_page, page_size,
+ PageAllocator::kNoAccess)) {
+ UpdateAllocatedSpaceLimits(start, code_area + commit_size);
+ return true;
+ }
+ vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
+ }
+ }
+ vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
+ }
+ return false;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/memory-allocator.h b/chromium/v8/src/heap/memory-allocator.h
new file mode 100644
index 00000000000..558e11aa02e
--- /dev/null
+++ b/chromium/v8/src/heap/memory-allocator.h
@@ -0,0 +1,451 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MEMORY_ALLOCATOR_H_
+#define V8_HEAP_MEMORY_ALLOCATOR_H_
+
+#include <atomic>
+#include <memory>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/export-template.h"
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/heap/heap.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/spaces.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+class ReadOnlyPage;
+
+// The process-wide singleton that keeps track of code range regions with the
+// intention to reuse free code range regions as a workaround for CFG memory
+// leaks (see crbug.com/870054).
+class CodeRangeAddressHint {
+ public:
+ // Returns the most recently freed code range start address for the given
+ // size. If there is no such entry, then a random address is returned.
+ V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
+
+ V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size);
+
+ private:
+ base::Mutex mutex_;
+ // A map from code range size to an array of recently freed code range
+ // addresses. There should be O(1) different code range sizes.
+ // The length of each array is limited by the peak number of code ranges,
+ // which should be also O(1).
+ std::unordered_map<size_t, std::vector<Address>> recently_freed_;
+};
+
+// ----------------------------------------------------------------------------
+// A space acquires chunks of memory from the operating system. The memory
+// allocator allocates and deallocates pages for the paged heap spaces and large
+// pages for large object space.
+class MemoryAllocator {
+ public:
+ // Unmapper takes care of concurrently unmapping and uncommitting memory
+ // chunks.
+ class Unmapper {
+ public:
+ class UnmapFreeMemoryTask;
+
+ Unmapper(Heap* heap, MemoryAllocator* allocator)
+ : heap_(heap),
+ allocator_(allocator),
+ pending_unmapping_tasks_semaphore_(0),
+ pending_unmapping_tasks_(0),
+ active_unmapping_tasks_(0) {
+ chunks_[kRegular].reserve(kReservedQueueingSlots);
+ chunks_[kPooled].reserve(kReservedQueueingSlots);
+ }
+
+ void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
+ AddMemoryChunkSafe<kRegular>(chunk);
+ } else {
+ AddMemoryChunkSafe<kNonRegular>(chunk);
+ }
+ }
+
+ MemoryChunk* TryGetPooledMemoryChunkSafe() {
+ // Procedure:
+ // (1) Try to get a chunk that was declared as pooled and already has
+ // been uncommitted.
+ // (2) Try to steal any memory chunk of kPageSize that would've been
+ // unmapped.
+ MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
+ if (chunk == nullptr) {
+ chunk = GetMemoryChunkSafe<kRegular>();
+ if (chunk != nullptr) {
+ // For stolen chunks we need to manually free any allocated memory.
+ chunk->ReleaseAllAllocatedMemory();
+ }
+ }
+ return chunk;
+ }
+
+ V8_EXPORT_PRIVATE void FreeQueuedChunks();
+ void CancelAndWaitForPendingTasks();
+ void PrepareForGC();
+ V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
+ V8_EXPORT_PRIVATE void TearDown();
+ size_t NumberOfCommittedChunks();
+ V8_EXPORT_PRIVATE int NumberOfChunks();
+ size_t CommittedBufferedMemory();
+
+ private:
+ static const int kReservedQueueingSlots = 64;
+ static const int kMaxUnmapperTasks = 4;
+
+ enum ChunkQueueType {
+ kRegular, // Pages of kPageSize that do not live in a CodeRange and
+ // can thus be used for stealing.
+ kNonRegular, // Large chunks and executable chunks.
+ kPooled, // Pooled chunks, already uncommited and ready for reuse.
+ kNumberOfChunkQueues,
+ };
+
+ enum class FreeMode {
+ kUncommitPooled,
+ kReleasePooled,
+ };
+
+ template <ChunkQueueType type>
+ void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ base::MutexGuard guard(&mutex_);
+ chunks_[type].push_back(chunk);
+ }
+
+ template <ChunkQueueType type>
+ MemoryChunk* GetMemoryChunkSafe() {
+ base::MutexGuard guard(&mutex_);
+ if (chunks_[type].empty()) return nullptr;
+ MemoryChunk* chunk = chunks_[type].back();
+ chunks_[type].pop_back();
+ return chunk;
+ }
+
+ bool MakeRoomForNewTasks();
+
+ template <FreeMode mode>
+ void PerformFreeMemoryOnQueuedChunks();
+
+ void PerformFreeMemoryOnQueuedNonRegularChunks();
+
+ Heap* const heap_;
+ MemoryAllocator* const allocator_;
+ base::Mutex mutex_;
+ std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+ CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
+ base::Semaphore pending_unmapping_tasks_semaphore_;
+ intptr_t pending_unmapping_tasks_;
+ std::atomic<intptr_t> active_unmapping_tasks_;
+
+ friend class MemoryAllocator;
+ };
+
+ enum AllocationMode {
+ kRegular,
+ kPooled,
+ };
+
+ enum FreeMode {
+ kFull,
+ kAlreadyPooled,
+ kPreFreeAndQueue,
+ kPooledAndQueue,
+ };
+
+ V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
+
+ // Computes the memory area of discardable memory within a given memory area
+ // [addr, addr+size) and returns the result as base::AddressRegion. If the
+ // memory is not discardable base::AddressRegion is an empty region.
+ V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
+ Address addr, size_t size);
+
+ V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
+ size_t code_range_size);
+
+ V8_EXPORT_PRIVATE void TearDown();
+
+ // Allocates a Page from the allocator. AllocationMode is used to indicate
+ // whether pooled allocation, which only works for MemoryChunk::kPageSize,
+ // should be tried first.
+ template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
+ typename SpaceType>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
+
+ LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
+ Executability executable);
+
+ ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
+
+ template <MemoryAllocator::FreeMode mode = kFull>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ void Free(MemoryChunk* chunk);
+ void FreeReadOnlyPage(ReadOnlyPage* chunk);
+
+ // Returns allocated spaces in bytes.
+ size_t Size() const { return size_; }
+
+ // Returns allocated executable spaces in bytes.
+ size_t SizeExecutable() const { return size_executable_; }
+
+ // Returns the maximum available bytes of heaps.
+ size_t Available() const {
+ const size_t size = Size();
+ return capacity_ < size ? 0 : capacity_ - size;
+ }
+
+ // Returns an indication of whether a pointer is in a space that has
+ // been allocated by this MemoryAllocator.
+ V8_INLINE bool IsOutsideAllocatedSpace(Address address) const {
+ return address < lowest_ever_allocated_ ||
+ address >= highest_ever_allocated_;
+ }
+
+ // Returns a BasicMemoryChunk in which the memory region from commit_area_size
+ // to reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
+ size_t reserve_area_size, size_t commit_area_size,
+ Executability executable, BaseSpace* space);
+
+ // Returns a MemoryChunk in which the memory region from commit_area_size to
+ // reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
+ size_t commit_area_size,
+ Executability executable,
+ BaseSpace* space);
+
+ Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+ size_t alignment, Executability executable,
+ void* hint, VirtualMemory* controller);
+
+ void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
+
+ // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
+ // internally memory is freed from |start_free| to the end of the reservation.
+ // Additional memory beyond the page is not accounted though, so
+ // |bytes_to_free| is computed by the caller.
+ void PartialFreeMemory(BasicMemoryChunk* chunk, Address start_free,
+ size_t bytes_to_free, Address new_area_end);
+
+ // Checks if an allocated MemoryChunk was intended to be used for executable
+ // memory.
+ bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
+ return executable_memory_.find(chunk) != executable_memory_.end();
+ }
+
+ // Commit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool CommitMemory(VirtualMemory* reservation);
+
+ // Uncommit memory region owned by given reservation object. Returns true if
+ // it succeeded and false otherwise.
+ bool UncommitMemory(VirtualMemory* reservation);
+
+ // Zaps a contiguous block of memory [start..(start+size)[ with
+ // a given zap value.
+ void ZapBlock(Address start, size_t size, uintptr_t zap_value);
+
+ V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
+
+ // Page allocator instance for allocating non-executable pages.
+ // Guaranteed to be a valid pointer.
+ v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
+
+ // Page allocator instance for allocating executable pages.
+ // Guaranteed to be a valid pointer.
+ v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
+
+ // Returns page allocator suitable for allocating pages with requested
+ // executability.
+ v8::PageAllocator* page_allocator(Executability executable) {
+ return executable == EXECUTABLE ? code_page_allocator_
+ : data_page_allocator_;
+ }
+
+ // A region of memory that may contain executable code including reserved
+ // OS page with read-write access in the beginning.
+ const base::AddressRegion& code_range() const {
+ // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
+ DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
+ DCHECK_IMPLIES(!code_range_.is_empty(),
+ code_range_.contains(code_page_allocator_instance_->begin(),
+ code_page_allocator_instance_->size()));
+ return code_range_;
+ }
+
+ Unmapper* unmapper() { return &unmapper_; }
+
+ // Performs all necessary bookkeeping to free the memory, but does not free
+ // it.
+ void UnregisterMemory(MemoryChunk* chunk);
+ void UnregisterMemory(BasicMemoryChunk* chunk,
+ Executability executable = NOT_EXECUTABLE);
+
+ private:
+ void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
+ size_t requested);
+
+ // PreFreeMemory logically frees the object, i.e., it unregisters the
+ // memory, logs a delete event and adds the chunk to remembered unmapped
+ // pages.
+ void PreFreeMemory(MemoryChunk* chunk);
+
+ // PerformFreeMemory can be called concurrently when PreFree was executed
+ // before.
+ void PerformFreeMemory(MemoryChunk* chunk);
+
+ // See AllocatePage for public interface. Note that currently we only
+ // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
+ template <typename SpaceType>
+ MemoryChunk* AllocatePagePooled(SpaceType* owner);
+
+ // Initializes pages in a chunk. Returns the first page address.
+ // This function and GetChunkId() are provided for the mark-compact
+ // collector to rebuild page headers in the from space, which is
+ // used as a marking stack and its page headers are destroyed.
+ Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+ PagedSpace* owner);
+
+ void UpdateAllocatedSpaceLimits(Address low, Address high) {
+ // The use of atomic primitives does not guarantee correctness (wrt.
+ // desired semantics) by default. The loop here ensures that we update the
+ // values only if they did not change in between.
+ Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
+ while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
+ ptr, low, std::memory_order_acq_rel)) {
+ }
+ ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
+ while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
+ ptr, high, std::memory_order_acq_rel)) {
+ }
+ }
+
+ void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ base::MutexGuard guard(&executable_memory_mutex_);
+ DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.insert(chunk);
+ }
+
+ void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
+ base::MutexGuard guard(&executable_memory_mutex_);
+ DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
+ executable_memory_.erase(chunk);
+ chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
+ }
+
+ Isolate* isolate_;
+
+ // This object controls virtual space reserved for code on the V8 heap. This
+ // is only valid for 64-bit architectures where kRequiresCodeRange.
+ VirtualMemory code_reservation_;
+
+ // Page allocator used for allocating data pages. Depending on the
+ // configuration it may be a page allocator instance provided by
+ // v8::Platform or a BoundedPageAllocator (when pointer compression is
+ // enabled).
+ v8::PageAllocator* data_page_allocator_;
+
+ // Page allocator used for allocating code pages. Depending on the
+ // configuration it may be a page allocator instance provided by
+ // v8::Platform or a BoundedPageAllocator (when pointer compression is
+ // enabled or on those 64-bit architectures where pc-relative 32-bit
+ // displacement can be used for call and jump instructions).
+ v8::PageAllocator* code_page_allocator_;
+
+ // A part of the |code_reservation_| that may contain executable code
+ // including reserved page with read-write access in the beginning.
+ // See details below.
+ base::AddressRegion code_range_;
+
+ // This unique pointer owns the instance of bounded code allocator
+ // that controls executable pages allocation. It does not control the
+ // optionally existing page in the beginning of the |code_range_|.
+ // So, summarizing all above, the following conditions hold:
+ // 1) |code_reservation_| >= |code_range_|
+ // 2) |code_range_| >= |optional RW pages| +
+ // |code_page_allocator_instance_|. 3) |code_reservation_| is
+ // AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
+ // MemoryChunk::kAlignment-aligned 5) |code_range_| is
+ // CommitPageSize()-aligned
+ std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
+
+ // Maximum space size in bytes.
+ size_t capacity_;
+
+ // Allocated space size in bytes.
+ std::atomic<size_t> size_;
+ // Allocated executable space size in bytes.
+ std::atomic<size_t> size_executable_;
+
+ // We keep the lowest and highest addresses allocated as a quick way
+ // of determining that pointers are outside the heap. The estimate is
+ // conservative, i.e. not all addresses in 'allocated' space are allocated
+ // to our heap. The range is [lowest, highest[, inclusive on the low end
+ // and exclusive on the high end.
+ std::atomic<Address> lowest_ever_allocated_;
+ std::atomic<Address> highest_ever_allocated_;
+
+ VirtualMemory last_chunk_;
+ Unmapper unmapper_;
+
+ // Data structure to remember allocated executable memory chunks.
+ std::unordered_set<MemoryChunk*> executable_memory_;
+ base::Mutex executable_memory_mutex_;
+
+ friend class heap::TestCodePageAllocatorScope;
+ friend class heap::TestMemoryAllocatorScope;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
+};
+
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MEMORY_ALLOCATOR_H_
diff --git a/chromium/v8/src/heap/memory-chunk.cc b/chromium/v8/src/heap/memory-chunk.cc
index 865e6f1a72b..4e10719fc3c 100644
--- a/chromium/v8/src/heap/memory-chunk.cc
+++ b/chromium/v8/src/heap/memory-chunk.cc
@@ -4,8 +4,13 @@
#include "src/heap/memory-chunk.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/code-object-registry.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/spaces.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
@@ -77,14 +82,6 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
-#ifdef THREAD_SANITIZER
-void MemoryChunk::SynchronizedHeapLoad() {
- CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
- reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
- InReadOnlySpace());
-}
-#endif
-
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
#ifdef THREAD_SANITIZER
@@ -153,5 +150,299 @@ void MemoryChunk::SetReadAndWritable() {
}
}
+namespace {
+
+PageAllocator::Permission DefaultWritableCodePermissions() {
+ return FLAG_jitless ? PageAllocator::kReadWrite
+ : PageAllocator::kReadWriteExecute;
+}
+
+} // namespace
+
+MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
+ Executability executable) {
+ MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
+
+ base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
+ nullptr);
+ base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
+ nullptr);
+ chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
+ chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
+ chunk->progress_bar_ = 0;
+ chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
+ chunk->page_protection_change_mutex_ = new base::Mutex();
+ chunk->write_unprotect_counter_ = 0;
+ chunk->mutex_ = new base::Mutex();
+ chunk->young_generation_bitmap_ = nullptr;
+ chunk->local_tracker_ = nullptr;
+
+ chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
+ 0;
+ chunk->external_backing_store_bytes_
+ [ExternalBackingStoreType::kExternalString] = 0;
+
+ chunk->categories_ = nullptr;
+
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ 0);
+ if (executable == EXECUTABLE) {
+ chunk->SetFlag(IS_EXECUTABLE);
+ if (heap->write_protect_code_memory()) {
+ chunk->write_unprotect_counter_ =
+ heap->code_space_memory_modification_scope_depth();
+ } else {
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAligned(chunk->area_start(), page_size));
+ size_t area_size =
+ RoundUp(chunk->area_end() - chunk->area_start(), page_size);
+ CHECK(chunk->reservation_.SetPermissions(
+ chunk->area_start(), area_size, DefaultWritableCodePermissions()));
+ }
+ }
+
+ if (chunk->owner()->identity() == CODE_SPACE) {
+ chunk->code_object_registry_ = new CodeObjectRegistry();
+ } else {
+ chunk->code_object_registry_ = nullptr;
+ }
+
+ chunk->possibly_empty_buckets_.Initialize();
+
+ return chunk;
+}
+
+size_t MemoryChunk::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
+ return size();
+ return high_water_mark_;
+}
+
+void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
+ }
+}
+
+void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
+ SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ if (is_marking) {
+ SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ SetFlag(MemoryChunk::INCREMENTAL_MARKING);
+ } else {
+ ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
+ }
+}
+// -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
+ if (mutex_ != nullptr) {
+ delete mutex_;
+ mutex_ = nullptr;
+ }
+ if (page_protection_change_mutex_ != nullptr) {
+ delete page_protection_change_mutex_;
+ page_protection_change_mutex_ = nullptr;
+ }
+ if (code_object_registry_ != nullptr) {
+ delete code_object_registry_;
+ code_object_registry_ = nullptr;
+ }
+
+ possibly_empty_buckets_.Release();
+ ReleaseSlotSet<OLD_TO_NEW>();
+ ReleaseSweepingSlotSet();
+ ReleaseSlotSet<OLD_TO_OLD>();
+ ReleaseTypedSlotSet<OLD_TO_NEW>();
+ ReleaseTypedSlotSet<OLD_TO_OLD>();
+ ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ ReleaseInvalidatedSlots<OLD_TO_OLD>();
+
+ if (local_tracker_ != nullptr) ReleaseLocalTracker();
+ if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
+
+ if (!IsLargePage()) {
+ Page* page = static_cast<Page*>(this);
+ page->ReleaseFreeListCategories();
+ }
+}
+
+void MemoryChunk::ReleaseAllAllocatedMemory() {
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
+}
+
+template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
+template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+SlotSet* MemoryChunk::AllocateSlotSet() {
+ return AllocateSlotSet(&slot_set_[type]);
+}
+
+SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
+ return AllocateSlotSet(&sweeping_slot_set_);
+}
+
+SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
+ SlotSet* new_slot_set = SlotSet::Allocate(buckets());
+ SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
+ slot_set, nullptr, new_slot_set);
+ if (old_slot_set != nullptr) {
+ SlotSet::Delete(new_slot_set, buckets());
+ new_slot_set = old_slot_set;
+ }
+ DCHECK(new_slot_set);
+ return new_slot_set;
+}
+
+template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+void MemoryChunk::ReleaseSlotSet() {
+ ReleaseSlotSet(&slot_set_[type]);
+}
+
+void MemoryChunk::ReleaseSweepingSlotSet() {
+ ReleaseSlotSet(&sweeping_slot_set_);
+}
+
+void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
+ if (*slot_set) {
+ SlotSet::Delete(*slot_set, buckets());
+ *slot_set = nullptr;
+ }
+}
+
+template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
+template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
+ TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
+ TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
+ &typed_slot_set_[type], nullptr, typed_slot_set);
+ if (old_value != nullptr) {
+ delete typed_slot_set;
+ typed_slot_set = old_value;
+ }
+ DCHECK(typed_slot_set);
+ return typed_slot_set;
+}
+
+template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+void MemoryChunk::ReleaseTypedSlotSet() {
+ TypedSlotSet* typed_slot_set = typed_slot_set_[type];
+ if (typed_slot_set) {
+ typed_slot_set_[type] = nullptr;
+ delete typed_slot_set;
+ }
+}
+
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
+ DCHECK_NULL(invalidated_slots_[type]);
+ invalidated_slots_[type] = new InvalidatedSlots();
+ return invalidated_slots_[type];
+}
+
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
+void MemoryChunk::ReleaseInvalidatedSlots() {
+ if (invalidated_slots_[type]) {
+ delete invalidated_slots_[type];
+ invalidated_slots_[type] = nullptr;
+ }
+}
+
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
+
+template <RememberedSetType type>
+void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
+ bool skip_slot_recording;
+
+ if (type == OLD_TO_NEW) {
+ skip_slot_recording = InYoungGeneration();
+ } else {
+ skip_slot_recording = ShouldSkipEvacuationSlotRecording();
+ }
+
+ if (skip_slot_recording) {
+ return;
+ }
+
+ if (invalidated_slots<type>() == nullptr) {
+ AllocateInvalidatedSlots<type>();
+ }
+
+ invalidated_slots<type>()->insert(object);
+}
+
+void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
+ if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
+ if (heap()->incremental_marking()->IsCompacting()) {
+ // We cannot check slot_set_[OLD_TO_OLD] here, since the
+ // concurrent markers might insert slots concurrently.
+ RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
+ }
+
+ if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
+ RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
+}
+
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
+ HeapObject object);
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
+ HeapObject object);
+
+template <RememberedSetType type>
+bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
+ if (invalidated_slots<type>() == nullptr) {
+ return false;
+ }
+ return invalidated_slots<type>()->find(object) !=
+ invalidated_slots<type>()->end();
+}
+
+void MemoryChunk::ReleaseLocalTracker() {
+ DCHECK_NOT_NULL(local_tracker_);
+ delete local_tracker_;
+ local_tracker_ = nullptr;
+}
+
+void MemoryChunk::AllocateYoungGenerationBitmap() {
+ DCHECK_NULL(young_generation_bitmap_);
+ young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+}
+
+void MemoryChunk::ReleaseYoungGenerationBitmap() {
+ DCHECK_NOT_NULL(young_generation_bitmap_);
+ free(young_generation_bitmap_);
+ young_generation_bitmap_ = nullptr;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/memory-chunk.h b/chromium/v8/src/heap/memory-chunk.h
index 4381a229ab2..3fffbcb7d7b 100644
--- a/chromium/v8/src/heap/memory-chunk.h
+++ b/chromium/v8/src/heap/memory-chunk.h
@@ -5,14 +5,17 @@
#ifndef V8_HEAP_MEMORY_CHUNK_H_
#define V8_HEAP_MEMORY_CHUNK_H_
-#include <set>
-#include <vector>
+#include <atomic>
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
+#include "src/heap/marking.h"
+#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
@@ -34,36 +37,18 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
+enum RememberedSetType {
+ OLD_TO_NEW,
+ OLD_TO_OLD,
+ NUMBER_OF_REMEMBERED_SET_TYPES
+};
+
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
class MemoryChunk : public BasicMemoryChunk {
public:
- // Use with std data structures.
- struct Hasher {
- size_t operator()(MemoryChunk* const chunk) const {
- return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
- }
- };
-
- using Flags = uintptr_t;
-
- static const Flags kPointersToHereAreInterestingMask =
- POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const Flags kPointersFromHereAreInterestingMask =
- POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
-
- static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
-
- static const Flags kIsLargePageMask = LARGE_PAGE;
-
- static const Flags kSkipEvacuationSlotsRecordingMask =
- kEvacuationCandidateMask | kIsInYoungGenerationMask;
-
// |kDone|: The page state when sweeping is complete or sweeping must not be
// performed on that page. Sweeper threads that are done with their work
// will set this value and not touch the page anymore.
@@ -76,17 +61,15 @@ class MemoryChunk : public BasicMemoryChunk {
};
static const size_t kHeaderSize =
- BasicMemoryChunk::kHeaderSize // Parent size.
- + 3 * kSystemPointerSize // VirtualMemory reservation_
- + kSystemPointerSize // Address owner_
- + kSizetSize // size_t progress_bar_
- + kIntptrSize // intptr_t live_byte_count_
- + kSystemPointerSize // SlotSet* sweeping_slot_set_
+ BasicMemoryChunk::kHeaderSize // Parent size.
+ + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ + kSizetSize // size_t progress_bar_
+ + kIntptrSize // intptr_t live_byte_count_
+ + kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
- + kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
// concurrent_sweeping_
@@ -94,8 +77,6 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // unitptr_t write_unprotect_counter_
+ kSizetSize * ExternalBackingStoreType::kNumTypes
// std::atomic<size_t> external_backing_store_bytes_
- + kSizetSize // size_t allocated_bytes_
- + kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // heap::ListNode
+ kSystemPointerSize // FreeListCategory** categories__
+ kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
@@ -104,6 +85,8 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
+ static const intptr_t kOldToNewSlotSetOffset = BasicMemoryChunk::kHeaderSize;
+
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -112,32 +95,30 @@ class MemoryChunk : public BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
- DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
- return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
+ return cast(BasicMemoryChunk::FromAddress(a));
}
+
// Only works if the object is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromHeapObject(HeapObject o) {
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
- return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
+ return cast(BasicMemoryChunk::FromHeapObject(o));
}
- void SetOldGenerationPageFlags(bool is_marking);
- void SetYoungGenerationPageFlags(bool is_marking);
+ static MemoryChunk* cast(BasicMemoryChunk* chunk) {
+ SLOW_DCHECK(!chunk->InReadOnlySpace());
+ return static_cast<MemoryChunk*>(chunk);
+ }
- static inline void UpdateHighWaterMark(Address mark) {
- if (mark == kNullAddress) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationAreaAddress.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
- intptr_t old_mark = chunk->high_water_mark_.load(std::memory_order_relaxed);
- while ((new_mark > old_mark) &&
- !chunk->high_water_mark_.compare_exchange_weak(
- old_mark, new_mark, std::memory_order_acq_rel)) {
- }
+ static const MemoryChunk* cast(const BasicMemoryChunk* chunk) {
+ SLOW_DCHECK(!chunk->InReadOnlySpace());
+ return static_cast<const MemoryChunk*>(chunk);
}
+ size_t buckets() const { return SlotSet::BucketsForSize(size()); }
+
+ void SetOldGenerationPageFlags(bool is_marking);
+ void SetYoungGenerationPageFlags(bool is_marking);
+
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
size_t amount);
@@ -158,18 +139,6 @@ class MemoryChunk : public BasicMemoryChunk {
return concurrent_sweeping_ == ConcurrentSweepingState::kDone;
}
- inline Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
-#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race in
- // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
- // release store.
- void SynchronizedHeapLoad();
-#endif
-
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
@@ -237,8 +206,6 @@ class MemoryChunk : public BasicMemoryChunk {
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
- Address HighWaterMark() { return address() + high_water_mark_; }
-
size_t ProgressBar() {
DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
return progress_bar_.load(std::memory_order_acquire);
@@ -266,64 +233,8 @@ class MemoryChunk : public BasicMemoryChunk {
return external_backing_store_bytes_[type];
}
- // Some callers rely on the fact that this can operate on both
- // tagged and aligned object addresses.
- inline uint32_t AddressToMarkbitIndex(Address addr) const {
- return static_cast<uint32_t>(addr - this->address()) >> kTaggedSizeLog2;
- }
-
- inline Address MarkbitIndexToAddress(uint32_t index) const {
- return this->address() + (index << kTaggedSizeLog2);
- }
-
- bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
-
- void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
-
- bool CanAllocate() {
- return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsEvacuationCandidate() {
- DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
- IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
- return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool ShouldSkipEvacuationSlotRecording() {
- uintptr_t flags = GetFlags<access_mode>();
- return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
- ((flags & COMPACTION_WAS_ABORTED) == 0);
- }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
- bool IsToPage() const { return IsFlagSet(TO_PAGE); }
- bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
- bool InYoungGeneration() const {
- return (GetFlags() & kIsInYoungGenerationMask) != 0;
- }
- bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
- bool InNewLargeObjectSpace() const {
- return InYoungGeneration() && IsLargePage();
- }
- bool InOldSpace() const;
- V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
-
- // Gets the chunk's owner or null if the space has been detached.
- Space* owner() const { return owner_; }
-
- void set_owner(Space* space) { owner_ = space; }
-
- bool IsWritable() const {
- // If this is a read-only space chunk but heap_ is non-null, it has not yet
- // been sealed and can be written to.
- return !InReadOnlySpace() || heap_ != nullptr;
+ Space* owner() const {
+ return reinterpret_cast<Space*>(BasicMemoryChunk::owner());
}
// Gets the chunk's allocation space, potentially dealing with a null owner_
@@ -347,6 +258,7 @@ class MemoryChunk : public BasicMemoryChunk {
}
heap::ListNode<MemoryChunk>& list_node() { return list_node_; }
+ const heap::ListNode<MemoryChunk>& list_node() const { return list_node_; }
CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
@@ -359,10 +271,8 @@ class MemoryChunk : public BasicMemoryChunk {
void ReleaseAllocatedMemoryNeededForWritableChunk();
protected:
- static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner,
- VirtualMemory reservation);
+ static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
+ Executability executable);
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
@@ -373,30 +283,22 @@ class MemoryChunk : public BasicMemoryChunk {
void DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission);
- VirtualMemory* reserved_memory() { return &reservation_; }
-
- template <AccessMode mode>
- ConcurrentBitmap<mode>* marking_bitmap() const {
- return reinterpret_cast<ConcurrentBitmap<mode>*>(marking_bitmap_);
- }
-
template <AccessMode mode>
ConcurrentBitmap<mode>* young_generation_bitmap() const {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
- // If the chunk needs to remember its memory reservation, it is stored here.
- VirtualMemory reservation_;
-
- // The space owning this memory chunk.
- std::atomic<Space*> owner_;
+ // A single slot set for small pages (of size kPageSize) or an array of slot
+ // set for large pages. In the latter case the number of entries in the array
+ // is ceil(size() / kPageSize).
+ SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
// Count of bytes marked black on page.
- intptr_t live_byte_count_;
+ std::atomic<intptr_t> live_byte_count_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
@@ -405,10 +307,6 @@ class MemoryChunk : public BasicMemoryChunk {
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
- // Assuming the initial allocation on a page is sequential,
- // count highest number of bytes ever allocated on the page.
- std::atomic<intptr_t> high_water_mark_;
-
base::Mutex* mutex_;
std::atomic<ConcurrentSweepingState> concurrent_sweeping_;
@@ -429,16 +327,9 @@ class MemoryChunk : public BasicMemoryChunk {
// counter.
uintptr_t write_unprotect_counter_;
- // Byte allocated on the page, which includes all objects on the page
- // and the linear allocation area.
- size_t allocated_bytes_;
-
// Tracks off-heap memory used by this memory chunk.
std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
- // Freed memory that was not added to the free list.
- size_t wasted_memory_;
-
heap::ListNode<MemoryChunk> list_node_;
FreeListCategory** categories_;
@@ -453,8 +344,6 @@ class MemoryChunk : public BasicMemoryChunk {
PossiblyEmptyBuckets possibly_empty_buckets_;
private:
- void InitializeReservedMemory() { reservation_.Reset(); }
-
friend class ConcurrentMarkingState;
friend class MajorMarkingState;
friend class MajorAtomicMarkingState;
diff --git a/chromium/v8/src/heap/memory-measurement.cc b/chromium/v8/src/heap/memory-measurement.cc
index 2a59ac5a4d1..e3661da45ab 100644
--- a/chromium/v8/src/heap/memory-measurement.cc
+++ b/chromium/v8/src/heap/memory-measurement.cc
@@ -95,7 +95,7 @@ class V8_EXPORT_PRIVATE MeasureMemoryDelegate
public:
MeasureMemoryDelegate(Isolate* isolate, Handle<NativeContext> context,
Handle<JSPromise> promise, v8::MeasureMemoryMode mode);
- ~MeasureMemoryDelegate();
+ ~MeasureMemoryDelegate() override;
// v8::MeasureMemoryDelegate overrides:
bool ShouldMeasure(v8::Local<v8::Context> context) override;
@@ -165,7 +165,12 @@ void MeasureMemoryDelegate::MeasurementComplete(
JSPromise::Resolve(promise_, result).ToHandleChecked();
}
-MemoryMeasurement::MemoryMeasurement(Isolate* isolate) : isolate_(isolate) {}
+MemoryMeasurement::MemoryMeasurement(Isolate* isolate)
+ : isolate_(isolate), random_number_generator_() {
+ if (FLAG_random_seed) {
+ random_number_generator_.SetSeed(FLAG_random_seed);
+ }
+}
bool MemoryMeasurement::EnqueueRequest(
std::unique_ptr<v8::MeasureMemoryDelegate> delegate,
@@ -286,10 +291,15 @@ void MemoryMeasurement::ScheduleGCTask(v8::MeasureMemoryExecution execution) {
if (execution == v8::MeasureMemoryExecution::kEager) {
taskrunner->PostTask(std::move(task));
} else {
- taskrunner->PostDelayedTask(std::move(task), kGCTaskDelayInSeconds);
+ taskrunner->PostDelayedTask(std::move(task), NextGCTaskDelayInSeconds());
}
}
+int MemoryMeasurement::NextGCTaskDelayInSeconds() {
+ return kGCTaskDelayInSeconds +
+ random_number_generator_.NextInt(kGCTaskDelayInSeconds);
+}
+
void MemoryMeasurement::ReportResults() {
while (!done_.empty()) {
Request request = std::move(done_.front());
diff --git a/chromium/v8/src/heap/memory-measurement.h b/chromium/v8/src/heap/memory-measurement.h
index d72dd1eba97..e71bdc1cfe8 100644
--- a/chromium/v8/src/heap/memory-measurement.h
+++ b/chromium/v8/src/heap/memory-measurement.h
@@ -9,6 +9,7 @@
#include <unordered_map>
#include "src/base/platform/elapsed-timer.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
#include "src/objects/map.h"
@@ -49,6 +50,7 @@ class MemoryMeasurement {
bool IsGCTaskPending(v8::MeasureMemoryExecution execution);
void SetGCTaskPending(v8::MeasureMemoryExecution execution);
void SetGCTaskDone(v8::MeasureMemoryExecution execution);
+ int NextGCTaskDelayInSeconds();
std::list<Request> received_;
std::list<Request> processing_;
@@ -57,6 +59,7 @@ class MemoryMeasurement {
bool reporting_task_pending_ = false;
bool delayed_gc_task_pending_ = false;
bool eager_gc_task_pending_ = false;
+ base::RandomNumberGenerator random_number_generator_;
};
// Infers the native context for some of the heap objects.
diff --git a/chromium/v8/src/heap/new-spaces-inl.h b/chromium/v8/src/heap/new-spaces-inl.h
new file mode 100644
index 00000000000..8020c0dfddb
--- /dev/null
+++ b/chromium/v8/src/heap/new-spaces-inl.h
@@ -0,0 +1,179 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_NEW_SPACES_INL_H_
+#define V8_HEAP_NEW_SPACES_INL_H_
+
+#include "src/heap/new-spaces.h"
+#include "src/heap/spaces-inl.h"
+#include "src/objects/tagged-impl.h"
+#include "src/sanitizer/msan.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// SemiSpace
+
+bool SemiSpace::Contains(HeapObject o) const {
+ BasicMemoryChunk* memory_chunk = BasicMemoryChunk::FromHeapObject(o);
+ if (memory_chunk->IsLargePage()) return false;
+ return id_ == kToSpace ? memory_chunk->IsToPage()
+ : memory_chunk->IsFromPage();
+}
+
+bool SemiSpace::Contains(Object o) const {
+ return o.IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool SemiSpace::ContainsSlow(Address a) const {
+ for (const Page* p : *this) {
+ if (p == BasicMemoryChunk::FromAddress(a)) return true;
+ }
+ return false;
+}
+
+// --------------------------------------------------------------------------
+// NewSpace
+
+bool NewSpace::Contains(Object o) const {
+ return o.IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool NewSpace::Contains(HeapObject o) const {
+ return BasicMemoryChunk::FromHeapObject(o)->InNewSpace();
+}
+
+bool NewSpace::ContainsSlow(Address a) const {
+ return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContainsSlow(Address a) const {
+ return to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContains(Object o) const { return to_space_.Contains(o); }
+bool NewSpace::FromSpaceContains(Object o) const {
+ return from_space_.Contains(o);
+}
+
+// -----------------------------------------------------------------------------
+// SemiSpaceObjectIterator
+
+HeapObject SemiSpaceObjectIterator::Next() {
+ while (current_ != limit_) {
+ if (Page::IsAlignedToPageSize(current_)) {
+ Page* page = Page::FromAllocationAreaAddress(current_);
+ page = page->next_page();
+ DCHECK(page);
+ current_ = page->area_start();
+ if (current_ == limit_) return HeapObject();
+ }
+ HeapObject object = HeapObject::FromAddress(current_);
+ current_ += object.Size();
+ if (!object.IsFreeSpaceOrFiller()) {
+ return object;
+ }
+ }
+ return HeapObject();
+}
+
+// -----------------------------------------------------------------------------
+// NewSpace
+
+AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ Address top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+ if (allocation_info_.limit() - top <
+ static_cast<uintptr_t>(aligned_size_in_bytes)) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, alignment)) {
+ return AllocationResult::Retry();
+ }
+
+ top = allocation_info_.top();
+ filler_size = Heap::GetFillToAlign(top, alignment);
+ aligned_size_in_bytes = size_in_bytes + filler_size;
+ }
+
+ HeapObject obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + aligned_size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ if (filler_size > 0) {
+ obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
+ }
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return obj;
+}
+
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
+ Address top = allocation_info_.top();
+ if (allocation_info_.limit() < top + size_in_bytes) {
+ // See if we can create room.
+ if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+ return AllocationResult::Retry();
+ }
+
+ top = allocation_info_.top();
+ }
+
+ HeapObject obj = HeapObject::FromAddress(top);
+ allocation_info_.set_top(top + size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return obj;
+}
+
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ if (top() < top_on_previous_step_) {
+ // Generated code decreased the top() pointer to do folded allocations
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_));
+ top_on_previous_step_ = top();
+ }
+#ifdef V8_HOST_ARCH_32_BIT
+ return alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
+#else
+#ifdef V8_COMPRESS_POINTERS
+ // TODO(ishell, v8:8875): Consider using aligned allocations once the
+ // allocation alignment inconsistency is fixed. For now we keep using
+ // unaligned access since both x64 and arm64 architectures (where pointer
+ // compression is supported) allow unaligned access to doubles and full words.
+#endif // V8_COMPRESS_POINTERS
+ return AllocateRawUnaligned(size_in_bytes, origin);
+#endif
+}
+
+V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
+ base::MutexGuard guard(&mutex_);
+ return AllocateRaw(size_in_bytes, alignment, origin);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_NEW_SPACES_INL_H_
diff --git a/chromium/v8/src/heap/new-spaces.cc b/chromium/v8/src/heap/new-spaces.cc
new file mode 100644
index 00000000000..4b4b04a1111
--- /dev/null
+++ b/chromium/v8/src/heap/new-spaces.cc
@@ -0,0 +1,653 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/new-spaces.h"
+
+#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/memory-allocator.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/spaces-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
+ bool in_to_space = (id() != kFromSpace);
+ chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
+ Page* page = static_cast<Page*>(chunk);
+ page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ page->AllocateLocalTracker();
+ page->list_node().Initialize();
+#ifdef ENABLE_MINOR_MC
+ if (FLAG_minor_mc) {
+ page->AllocateYoungGenerationBitmap();
+ heap()
+ ->minor_mark_compact_collector()
+ ->non_atomic_marking_state()
+ ->ClearLiveness(page);
+ }
+#endif // ENABLE_MINOR_MC
+ page->InitializationMemoryFence();
+ return page;
+}
+
+bool SemiSpace::EnsureCurrentCapacity() {
+ if (is_committed()) {
+ const int expected_pages =
+ static_cast<int>(current_capacity_ / Page::kPageSize);
+ MemoryChunk* current_page = first_page();
+ int actual_pages = 0;
+
+ // First iterate through the pages list until expected pages if so many
+ // pages exist.
+ while (current_page != nullptr && actual_pages < expected_pages) {
+ actual_pages++;
+ current_page = current_page->list_node().next();
+ }
+
+ // Free all overallocated pages which are behind current_page.
+ while (current_page) {
+ MemoryChunk* next_current = current_page->list_node().next();
+ memory_chunk_list_.Remove(current_page);
+ // Clear new space flags to avoid this page being treated as a new
+ // space page that is potentially being swept.
+ current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
+ current_page);
+ current_page = next_current;
+ }
+
+ // Add more pages if we have less than expected_pages.
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ while (actual_pages < expected_pages) {
+ actual_pages++;
+ current_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
+ if (current_page == nullptr) return false;
+ DCHECK_NOT_NULL(current_page);
+ memory_chunk_list_.PushBack(current_page);
+ marking_state->ClearLiveness(current_page);
+ current_page->SetFlags(first_page()->GetFlags(),
+ static_cast<uintptr_t>(Page::kCopyAllFlags));
+ heap()->CreateFillerObjectAt(current_page->area_start(),
+ static_cast<int>(current_page->area_size()),
+ ClearRecordedSlots::kNo);
+ }
+ }
+ return true;
+}
+
+// -----------------------------------------------------------------------------
+// SemiSpace implementation
+
+void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
+ DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
+ minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+ current_capacity_ = minimum_capacity_;
+ maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+ committed_ = false;
+}
+
+void SemiSpace::TearDown() {
+ // Properly uncommit memory to keep the allocator counters in sync.
+ if (is_committed()) {
+ Uncommit();
+ }
+ current_capacity_ = maximum_capacity_ = 0;
+}
+
+bool SemiSpace::Commit() {
+ DCHECK(!is_committed());
+ const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
+ for (int pages_added = 0; pages_added < num_pages; pages_added++) {
+ // Pages in the new spaces can be moved to the old space by the full
+ // collector. Therefore, they must be initialized with the same FreeList as
+ // old pages.
+ Page* new_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
+ if (new_page == nullptr) {
+ if (pages_added) RewindPages(pages_added);
+ return false;
+ }
+ memory_chunk_list_.PushBack(new_page);
+ }
+ Reset();
+ AccountCommitted(current_capacity_);
+ if (age_mark_ == kNullAddress) {
+ age_mark_ = first_page()->area_start();
+ }
+ committed_ = true;
+ return true;
+}
+
+bool SemiSpace::Uncommit() {
+ DCHECK(is_committed());
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
+ }
+ current_page_ = nullptr;
+ AccountUncommitted(current_capacity_);
+ committed_ = false;
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+ return true;
+}
+
+size_t SemiSpace::CommittedPhysicalMemory() {
+ if (!is_committed()) return 0;
+ size_t size = 0;
+ for (Page* p : *this) {
+ size += p->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+bool SemiSpace::GrowTo(size_t new_capacity) {
+ if (!is_committed()) {
+ if (!Commit()) return false;
+ }
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
+ DCHECK_LE(new_capacity, maximum_capacity_);
+ DCHECK_GT(new_capacity, current_capacity_);
+ const size_t delta = new_capacity - current_capacity_;
+ DCHECK(IsAligned(delta, AllocatePageSize()));
+ const int delta_pages = static_cast<int>(delta / Page::kPageSize);
+ DCHECK(last_page());
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
+ Page* new_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
+ if (new_page == nullptr) {
+ if (pages_added) RewindPages(pages_added);
+ return false;
+ }
+ memory_chunk_list_.PushBack(new_page);
+ marking_state->ClearLiveness(new_page);
+ // Duplicate the flags that was set on the old page.
+ new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
+ }
+ AccountCommitted(delta);
+ current_capacity_ = new_capacity;
+ return true;
+}
+
+void SemiSpace::RewindPages(int num_pages) {
+ DCHECK_GT(num_pages, 0);
+ DCHECK(last_page());
+ while (num_pages > 0) {
+ MemoryChunk* last = last_page();
+ memory_chunk_list_.Remove(last);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
+ num_pages--;
+ }
+}
+
+bool SemiSpace::ShrinkTo(size_t new_capacity) {
+ DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
+ DCHECK_GE(new_capacity, minimum_capacity_);
+ DCHECK_LT(new_capacity, current_capacity_);
+ if (is_committed()) {
+ const size_t delta = current_capacity_ - new_capacity;
+ DCHECK(IsAligned(delta, Page::kPageSize));
+ int delta_pages = static_cast<int>(delta / Page::kPageSize);
+ RewindPages(delta_pages);
+ AccountUncommitted(delta);
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+ }
+ current_capacity_ = new_capacity;
+ return true;
+}
+
+void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
+ for (Page* page : *this) {
+ page->set_owner(this);
+ page->SetFlags(flags, mask);
+ if (id_ == kToSpace) {
+ page->ClearFlag(MemoryChunk::FROM_PAGE);
+ page->SetFlag(MemoryChunk::TO_PAGE);
+ page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+ heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
+ page, 0);
+ } else {
+ page->SetFlag(MemoryChunk::FROM_PAGE);
+ page->ClearFlag(MemoryChunk::TO_PAGE);
+ }
+ DCHECK(page->InYoungGeneration());
+ }
+}
+
+void SemiSpace::Reset() {
+ DCHECK(first_page());
+ DCHECK(last_page());
+ current_page_ = first_page();
+ pages_used_ = 0;
+}
+
+void SemiSpace::RemovePage(Page* page) {
+ if (current_page_ == page) {
+ if (page->prev_page()) {
+ current_page_ = page->prev_page();
+ }
+ }
+ memory_chunk_list_.Remove(page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+}
+
+void SemiSpace::PrependPage(Page* page) {
+ page->SetFlags(current_page()->GetFlags(),
+ static_cast<uintptr_t>(Page::kCopyAllFlags));
+ page->set_owner(this);
+ memory_chunk_list_.PushFront(page);
+ pages_used_++;
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+}
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+ // We won't be swapping semispaces without data in them.
+ DCHECK(from->first_page());
+ DCHECK(to->first_page());
+
+ intptr_t saved_to_space_flags = to->current_page()->GetFlags();
+
+ // We swap all properties but id_.
+ std::swap(from->current_capacity_, to->current_capacity_);
+ std::swap(from->maximum_capacity_, to->maximum_capacity_);
+ std::swap(from->minimum_capacity_, to->minimum_capacity_);
+ std::swap(from->age_mark_, to->age_mark_);
+ std::swap(from->committed_, to->committed_);
+ std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
+ std::swap(from->current_page_, to->current_page_);
+ std::swap(from->external_backing_store_bytes_,
+ to->external_backing_store_bytes_);
+
+ to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
+ from->FixPagesFlags(0, 0);
+}
+
+void SemiSpace::set_age_mark(Address mark) {
+ DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
+ age_mark_ = mark;
+ // Mark all pages up to the one containing mark.
+ for (Page* p : PageRange(space_start(), mark)) {
+ p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+ }
+}
+
+std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
+ // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
+ UNREACHABLE();
+}
+
+#ifdef DEBUG
+void SemiSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void SemiSpace::Verify() {
+ bool is_from_space = (id_ == kFromSpace);
+ size_t external_backing_store_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (Page* page : *this) {
+ CHECK_EQ(page->owner(), this);
+ CHECK(page->InNewSpace());
+ CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
+ : MemoryChunk::TO_PAGE));
+ CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
+ : MemoryChunk::FROM_PAGE));
+ CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+ if (!is_from_space) {
+ // The pointers-from-here-are-interesting flag isn't updated dynamically
+ // on from-space pages, so it might be out of sync with the marking state.
+ if (page->heap()->incremental_marking()->IsMarking()) {
+ CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+ } else {
+ CHECK(
+ !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
+ }
+
+ CHECK_IMPLIES(page->list_node().prev(),
+ page->list_node().prev()->list_node().next() == page);
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
+ }
+}
+#endif
+
+#ifdef DEBUG
+void SemiSpace::AssertValidRange(Address start, Address end) {
+ // Addresses belong to same semi-space
+ Page* page = Page::FromAllocationAreaAddress(start);
+ Page* end_page = Page::FromAllocationAreaAddress(end);
+ SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
+ DCHECK_EQ(space, end_page->owner());
+ // Start address is before end address, either on same page,
+ // or end address is on a later page in the linked list of
+ // semi-space pages.
+ if (page == end_page) {
+ DCHECK_LE(start, end);
+ } else {
+ while (page != end_page) {
+ page = page->next_page();
+ }
+ DCHECK(page);
+ }
+}
+#endif
+
+// -----------------------------------------------------------------------------
+// SemiSpaceObjectIterator implementation.
+
+SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
+ Initialize(space->first_allocatable_address(), space->top());
+}
+
+void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
+ SemiSpace::AssertValidRange(start, end);
+ current_ = start;
+ limit_ = end;
+}
+
+size_t NewSpace::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ size_t size = to_space_.CommittedPhysicalMemory();
+ if (from_space_.is_committed()) {
+ size += from_space_.CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+// -----------------------------------------------------------------------------
+// NewSpace implementation
+
+NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
+ size_t initial_semispace_capacity,
+ size_t max_semispace_capacity)
+ : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
+ to_space_(heap, kToSpace),
+ from_space_(heap, kFromSpace) {
+ DCHECK(initial_semispace_capacity <= max_semispace_capacity);
+
+ to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
+ from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
+ if (!to_space_.Commit()) {
+ V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
+ }
+ DCHECK(!from_space_.is_committed()); // No need to use memory yet.
+ ResetLinearAllocationArea();
+}
+
+void NewSpace::TearDown() {
+ allocation_info_.Reset(kNullAddress, kNullAddress);
+
+ to_space_.TearDown();
+ from_space_.TearDown();
+}
+
+void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
+
+void NewSpace::Grow() {
+ // Double the semispace size but only up to maximum capacity.
+ DCHECK(TotalCapacity() < MaximumCapacity());
+ size_t new_capacity =
+ Min(MaximumCapacity(),
+ static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
+ if (to_space_.GrowTo(new_capacity)) {
+ // Only grow from space if we managed to grow to-space.
+ if (!from_space_.GrowTo(new_capacity)) {
+ // If we managed to grow to-space but couldn't grow from-space,
+ // attempt to shrink to-space.
+ if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
+ // We are in an inconsistent state because we could not
+ // commit/uncommit memory from new space.
+ FATAL("inconsistent state");
+ }
+ }
+ }
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+void NewSpace::Shrink() {
+ size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
+ size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
+ if (rounded_new_capacity < TotalCapacity() &&
+ to_space_.ShrinkTo(rounded_new_capacity)) {
+ // Only shrink from-space if we managed to shrink to-space.
+ from_space_.Reset();
+ if (!from_space_.ShrinkTo(rounded_new_capacity)) {
+ // If we managed to shrink to-space but couldn't shrink from
+ // space, attempt to grow to-space again.
+ if (!to_space_.GrowTo(from_space_.current_capacity())) {
+ // We are in an inconsistent state because we could not
+ // commit/uncommit memory from new space.
+ FATAL("inconsistent state");
+ }
+ }
+ }
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+bool NewSpace::Rebalance() {
+ // Order here is important to make use of the page pool.
+ return to_space_.EnsureCurrentCapacity() &&
+ from_space_.EnsureCurrentCapacity();
+}
+
+void NewSpace::UpdateLinearAllocationArea() {
+ // Make sure there is no unaccounted allocations.
+ DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
+
+ Address new_top = to_space_.page_low();
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.Reset(new_top, to_space_.page_high());
+ // The order of the following two stores is important.
+ // See the corresponding loads in ConcurrentMarking::Run.
+ original_limit_.store(limit(), std::memory_order_relaxed);
+ original_top_.store(top(), std::memory_order_release);
+ StartNextInlineAllocationStep();
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+void NewSpace::ResetLinearAllocationArea() {
+ // Do a step to account for memory allocated so far before resetting.
+ InlineAllocationStep(top(), top(), kNullAddress, 0);
+ to_space_.Reset();
+ UpdateLinearAllocationArea();
+ // Clear all mark-bits in the to-space.
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ for (Page* p : to_space_) {
+ marking_state->ClearLiveness(p);
+ // Concurrent marking may have local live bytes for this page.
+ heap()->concurrent_marking()->ClearMemoryChunkData(p);
+ }
+}
+
+void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
+ allocation_info_.set_limit(new_limit);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+bool NewSpace::AddFreshPage() {
+ Address top = allocation_info_.top();
+ DCHECK(!OldSpace::IsAtPageStart(top));
+
+ // Do a step to account for memory allocated on previous page.
+ InlineAllocationStep(top, top, kNullAddress, 0);
+
+ if (!to_space_.AdvancePage()) {
+ // No more pages left to advance.
+ return false;
+ }
+
+ // Clear remainder of current page.
+ Address limit = Page::FromAllocationAreaAddress(top)->area_end();
+ int remaining_in_page = static_cast<int>(limit - top);
+ heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
+ UpdateLinearAllocationArea();
+
+ return true;
+}
+
+bool NewSpace::AddFreshPageSynchronized() {
+ base::MutexGuard guard(&mutex_);
+ return AddFreshPage();
+}
+
+bool NewSpace::EnsureAllocation(int size_in_bytes,
+ AllocationAlignment alignment) {
+ Address old_top = allocation_info_.top();
+ Address high = to_space_.page_high();
+ int filler_size = Heap::GetFillToAlign(old_top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+ if (old_top + aligned_size_in_bytes > high) {
+ // Not enough room in the page, try to allocate a new one.
+ if (!AddFreshPage()) {
+ return false;
+ }
+
+ old_top = allocation_info_.top();
+ high = to_space_.page_high();
+ filler_size = Heap::GetFillToAlign(old_top, alignment);
+ }
+
+ DCHECK(old_top + aligned_size_in_bytes <= high);
+
+ if (allocation_info_.limit() < high) {
+ // Either the limit has been lowered because linear allocation was disabled
+ // or because incremental marking wants to get a chance to do a step,
+ // or because idle scavenge job wants to get a chance to post a task.
+ // Set the new limit accordingly.
+ Address new_top = old_top + aligned_size_in_bytes;
+ Address soon_object = old_top + filler_size;
+ InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
+ UpdateInlineAllocationLimit(aligned_size_in_bytes);
+ }
+ return true;
+}
+
+std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
+ return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
+}
+
+#ifdef VERIFY_HEAP
+// We do not use the SemiSpaceObjectIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void NewSpace::Verify(Isolate* isolate) {
+ // The allocation pointer should be in the space or at the very end.
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ // There should be objects packed in from the low address up to the
+ // allocation pointer.
+ Address current = to_space_.first_page()->area_start();
+ CHECK_EQ(current, to_space_.space_start());
+
+ size_t external_space_bytes[kNumTypes];
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ while (current != top()) {
+ if (!Page::IsAlignedToPageSize(current)) {
+ // The allocation pointer should not be in the middle of an object.
+ CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
+ current < top());
+
+ HeapObject object = HeapObject::FromAddress(current);
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space or read-only space.
+ Map map = object.map();
+ CHECK(map.IsMap());
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
+
+ // The object should not be code or a map.
+ CHECK(!object.IsMap());
+ CHECK(!object.IsAbstractCode());
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate);
+
+ // All the interior pointers should be contained in the heap.
+ VerifyPointersVisitor visitor(heap());
+ int size = object.Size();
+ object.IterateBody(map, size, &visitor);
+
+ if (object.IsExternalString()) {
+ ExternalString external_string = ExternalString::cast(object);
+ size_t size = external_string.ExternalPayloadSize();
+ external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object.IsJSArrayBuffer()) {
+ JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
+ ->PerIsolateAccountingLength();
+ external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+
+ current += size;
+ } else {
+ // At end of page, switch to next page.
+ Page* page = Page::FromAllocationAreaAddress(current)->next_page();
+ current = page->area_start();
+ }
+ }
+
+ for (int i = 0; i < kNumTypes; i++) {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ i == ExternalBackingStoreType::kArrayBuffer)
+ continue;
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+ size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
+
+ // Check semi-spaces.
+ CHECK_EQ(from_space_.id(), kFromSpace);
+ CHECK_EQ(to_space_.id(), kToSpace);
+ from_space_.Verify();
+ to_space_.Verify();
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/new-spaces.h b/chromium/v8/src/heap/new-spaces.h
new file mode 100644
index 00000000000..73613152fa0
--- /dev/null
+++ b/chromium/v8/src/heap/new-spaces.h
@@ -0,0 +1,501 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_NEW_SPACES_H_
+#define V8_HEAP_NEW_SPACES_H_
+
+#include <atomic>
+#include <memory>
+
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
+#include "src/logging/log.h"
+#include "src/objects/heap-object.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class MemoryChunk;
+
+enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
+
+// -----------------------------------------------------------------------------
+// SemiSpace in young generation
+//
+// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
+// The mark-compact collector uses the memory of the first page in the from
+// space as a marking stack when tracing live objects.
+class SemiSpace : public Space {
+ public:
+ using iterator = PageIterator;
+ using const_iterator = ConstPageIterator;
+
+ static void Swap(SemiSpace* from, SemiSpace* to);
+
+ SemiSpace(Heap* heap, SemiSpaceId semispace)
+ : Space(heap, NEW_SPACE, new NoFreeList()),
+ current_capacity_(0),
+ maximum_capacity_(0),
+ minimum_capacity_(0),
+ age_mark_(kNullAddress),
+ committed_(false),
+ id_(semispace),
+ current_page_(nullptr),
+ pages_used_(0) {}
+
+ inline bool Contains(HeapObject o) const;
+ inline bool Contains(Object o) const;
+ inline bool ContainsSlow(Address a) const;
+
+ void SetUp(size_t initial_capacity, size_t maximum_capacity);
+ void TearDown();
+
+ bool Commit();
+ bool Uncommit();
+ bool is_committed() { return committed_; }
+
+ // Grow the semispace to the new capacity. The new capacity requested must
+ // be larger than the current capacity and less than the maximum capacity.
+ bool GrowTo(size_t new_capacity);
+
+ // Shrinks the semispace to the new capacity. The new capacity requested
+ // must be more than the amount of used memory in the semispace and less
+ // than the current capacity.
+ bool ShrinkTo(size_t new_capacity);
+
+ bool EnsureCurrentCapacity();
+
+ Address space_end() { return memory_chunk_list_.back()->area_end(); }
+
+ // Returns the start address of the first page of the space.
+ Address space_start() {
+ DCHECK_NE(memory_chunk_list_.front(), nullptr);
+ return memory_chunk_list_.front()->area_start();
+ }
+
+ Page* current_page() { return current_page_; }
+ int pages_used() { return pages_used_; }
+
+ // Returns the start address of the current page of the space.
+ Address page_low() { return current_page_->area_start(); }
+
+ // Returns one past the end address of the current page of the space.
+ Address page_high() { return current_page_->area_end(); }
+
+ bool AdvancePage() {
+ Page* next_page = current_page_->next_page();
+ // We cannot expand if we reached the maximum number of pages already. Note
+ // that we need to account for the next page already for this check as we
+ // could potentially fill the whole page after advancing.
+ const bool reached_max_pages = (pages_used_ + 1) == max_pages();
+ if (next_page == nullptr || reached_max_pages) {
+ return false;
+ }
+ current_page_ = next_page;
+ pages_used_++;
+ return true;
+ }
+
+ // Resets the space to using the first page.
+ void Reset();
+
+ void RemovePage(Page* page);
+ void PrependPage(Page* page);
+
+ Page* InitializePage(MemoryChunk* chunk);
+
+ // Age mark accessors.
+ Address age_mark() { return age_mark_; }
+ void set_age_mark(Address mark);
+
+ // Returns the current capacity of the semispace.
+ size_t current_capacity() { return current_capacity_; }
+
+ // Returns the maximum capacity of the semispace.
+ size_t maximum_capacity() { return maximum_capacity_; }
+
+ // Returns the initial capacity of the semispace.
+ size_t minimum_capacity() { return minimum_capacity_; }
+
+ SemiSpaceId id() { return id_; }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() override;
+
+ // If we don't have these here then SemiSpace will be abstract. However
+ // they should never be called:
+
+ size_t Size() override { UNREACHABLE(); }
+
+ size_t SizeOfObjects() override { return Size(); }
+
+ size_t Available() override { UNREACHABLE(); }
+
+ Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
+ Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
+
+ const Page* first_page() const {
+ return reinterpret_cast<const Page*>(Space::first_page());
+ }
+ const Page* last_page() const {
+ return reinterpret_cast<const Page*>(Space::last_page());
+ }
+
+ iterator begin() { return iterator(first_page()); }
+ iterator end() { return iterator(nullptr); }
+
+ const_iterator begin() const { return const_iterator(first_page()); }
+ const_iterator end() const { return const_iterator(nullptr); }
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+
+#ifdef DEBUG
+ V8_EXPORT_PRIVATE void Print() override;
+ // Validate a range of of addresses in a SemiSpace.
+ // The "from" address must be on a page prior to the "to" address,
+ // in the linked page order, or it must be earlier on the same page.
+ static void AssertValidRange(Address from, Address to);
+#else
+ // Do nothing.
+ inline static void AssertValidRange(Address from, Address to) {}
+#endif
+
+#ifdef VERIFY_HEAP
+ virtual void Verify();
+#endif
+
+ private:
+ void RewindPages(int num_pages);
+
+ inline int max_pages() {
+ return static_cast<int>(current_capacity_ / Page::kPageSize);
+ }
+
+ // Copies the flags into the masked positions on all pages in the space.
+ void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
+
+ // The currently committed space capacity.
+ size_t current_capacity_;
+
+ // The maximum capacity that can be used by this space. A space cannot grow
+ // beyond that size.
+ size_t maximum_capacity_;
+
+ // The minimum capacity for the space. A space cannot shrink below this size.
+ size_t minimum_capacity_;
+
+ // Used to govern object promotion during mark-compact collection.
+ Address age_mark_;
+
+ bool committed_;
+ SemiSpaceId id_;
+
+ Page* current_page_;
+
+ int pages_used_;
+
+ friend class NewSpace;
+ friend class SemiSpaceObjectIterator;
+};
+
+// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
+// semispace of the heap's new space. It iterates over the objects in the
+// semispace from a given start address (defaulting to the bottom of the
+// semispace) to the top of the semispace. New objects allocated after the
+// iterator is created are not iterated.
+class SemiSpaceObjectIterator : public ObjectIterator {
+ public:
+ // Create an iterator over the allocated objects in the given to-space.
+ explicit SemiSpaceObjectIterator(NewSpace* space);
+
+ inline HeapObject Next() override;
+
+ private:
+ void Initialize(Address start, Address end);
+
+ // The current iteration point.
+ Address current_;
+ // The end of iteration.
+ Address limit_;
+};
+
+// -----------------------------------------------------------------------------
+// The young generation space.
+//
+// The new space consists of a contiguous pair of semispaces. It simply
+// forwards most functions to the appropriate semispace.
+
+class V8_EXPORT_PRIVATE NewSpace
+ : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
+ public:
+ using iterator = PageIterator;
+ using const_iterator = ConstPageIterator;
+
+ NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
+ size_t initial_semispace_capacity, size_t max_semispace_capacity);
+
+ ~NewSpace() override { TearDown(); }
+
+ inline bool ContainsSlow(Address a) const;
+ inline bool Contains(Object o) const;
+ inline bool Contains(HeapObject o) const;
+
+ // Tears down the space. Heap memory was not allocated by the space, so it
+ // is not deallocated here.
+ void TearDown();
+
+ // Flip the pair of spaces.
+ void Flip();
+
+ // Grow the capacity of the semispaces. Assumes that they are not at
+ // their maximum capacity.
+ void Grow();
+
+ // Shrink the capacity of the semispaces.
+ void Shrink();
+
+ // Return the allocated bytes in the active semispace.
+ size_t Size() final {
+ DCHECK_GE(top(), to_space_.page_low());
+ return to_space_.pages_used() *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() +
+ static_cast<size_t>(top() - to_space_.page_low());
+ }
+
+ size_t SizeOfObjects() final { return Size(); }
+
+ // Return the allocatable capacity of a semispace.
+ size_t Capacity() {
+ SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return (to_space_.current_capacity() / Page::kPageSize) *
+ MemoryChunkLayout::AllocatableMemoryInDataPage();
+ }
+
+ // Return the current size of a semispace, allocatable and non-allocatable
+ // memory.
+ size_t TotalCapacity() {
+ DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return to_space_.current_capacity();
+ }
+
+ // Committed memory for NewSpace is the committed memory of both semi-spaces
+ // combined.
+ size_t CommittedMemory() final {
+ return from_space_.CommittedMemory() + to_space_.CommittedMemory();
+ }
+
+ size_t MaximumCommittedMemory() final {
+ return from_space_.MaximumCommittedMemory() +
+ to_space_.MaximumCommittedMemory();
+ }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() final;
+
+ // Return the available bytes without growing.
+ size_t Available() final {
+ DCHECK_GE(Capacity(), Size());
+ return Capacity() - Size();
+ }
+
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ type == ExternalBackingStoreType::kArrayBuffer)
+ return heap()->YoungArrayBufferBytes();
+ DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
+ return to_space_.ExternalBackingStoreBytes(type);
+ }
+
+ size_t ExternalBackingStoreBytes() {
+ size_t result = 0;
+ for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ result +=
+ ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
+ }
+ return result;
+ }
+
+ size_t AllocatedSinceLastGC() {
+ const Address age_mark = to_space_.age_mark();
+ DCHECK_NE(age_mark, kNullAddress);
+ DCHECK_NE(top(), kNullAddress);
+ Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
+ Page* const last_page = Page::FromAllocationAreaAddress(top());
+ Page* current_page = age_mark_page;
+ size_t allocated = 0;
+ if (current_page != last_page) {
+ DCHECK_EQ(current_page, age_mark_page);
+ DCHECK_GE(age_mark_page->area_end(), age_mark);
+ allocated += age_mark_page->area_end() - age_mark;
+ current_page = current_page->next_page();
+ } else {
+ DCHECK_GE(top(), age_mark);
+ return top() - age_mark;
+ }
+ while (current_page != last_page) {
+ DCHECK_NE(current_page, age_mark_page);
+ allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
+ current_page = current_page->next_page();
+ }
+ DCHECK_GE(top(), current_page->area_start());
+ allocated += top() - current_page->area_start();
+ DCHECK_LE(allocated, Size());
+ return allocated;
+ }
+
+ void MovePageFromSpaceToSpace(Page* page) {
+ DCHECK(page->IsFromPage());
+ from_space_.RemovePage(page);
+ to_space_.PrependPage(page);
+ }
+
+ bool Rebalance();
+
+ // Return the maximum capacity of a semispace.
+ size_t MaximumCapacity() {
+ DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
+ return to_space_.maximum_capacity();
+ }
+
+ bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
+
+ // Returns the initial capacity of a semispace.
+ size_t InitialTotalCapacity() {
+ DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
+ return to_space_.minimum_capacity();
+ }
+
+ void ResetOriginalTop() {
+ DCHECK_GE(top(), original_top_);
+ DCHECK_LE(top(), original_limit_);
+ original_top_.store(top(), std::memory_order_release);
+ }
+
+ Address original_top_acquire() {
+ return original_top_.load(std::memory_order_acquire);
+ }
+ Address original_limit_relaxed() {
+ return original_limit_.load(std::memory_order_relaxed);
+ }
+
+ // Return the address of the first allocatable address in the active
+ // semispace. This may be the address where the first object resides.
+ Address first_allocatable_address() { return to_space_.space_start(); }
+
+ // Get the age mark of the inactive semispace.
+ Address age_mark() { return from_space_.age_mark(); }
+ // Set the age mark in the active semispace.
+ void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
+ AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Reset the allocation pointer to the beginning of the active semispace.
+ void ResetLinearAllocationArea();
+
+ // When inline allocation stepping is active, either because of incremental
+ // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
+ // inline allocation every once in a while. This is done by setting
+ // allocation_info_.limit to be lower than the actual limit and and increasing
+ // it in steps to guarantee that the observers are notified periodically.
+ void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
+
+ inline bool ToSpaceContainsSlow(Address a) const;
+ inline bool ToSpaceContains(Object o) const;
+ inline bool FromSpaceContains(Object o) const;
+
+ // Try to switch the active semispace to a new, empty, page.
+ // Returns false if this isn't possible or reasonable (i.e., there
+ // are no pages, or the current page is already empty), or true
+ // if successful.
+ bool AddFreshPage();
+ bool AddFreshPageSynchronized();
+
+#ifdef VERIFY_HEAP
+ // Verify the active semispace.
+ virtual void Verify(Isolate* isolate);
+#endif
+
+#ifdef DEBUG
+ // Print the active semispace.
+ void Print() override { to_space_.Print(); }
+#endif
+
+ // Return whether the operation succeeded.
+ bool CommitFromSpaceIfNeeded() {
+ if (from_space_.is_committed()) return true;
+ return from_space_.Commit();
+ }
+
+ bool UncommitFromSpace() {
+ if (!from_space_.is_committed()) return true;
+ return from_space_.Uncommit();
+ }
+
+ bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
+
+ SemiSpace* active_space() { return &to_space_; }
+
+ Page* first_page() { return to_space_.first_page(); }
+ Page* last_page() { return to_space_.last_page(); }
+
+ iterator begin() { return to_space_.begin(); }
+ iterator end() { return to_space_.end(); }
+
+ const_iterator begin() const { return to_space_.begin(); }
+ const_iterator end() const { return to_space_.end(); }
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+
+ SemiSpace& from_space() { return from_space_; }
+ SemiSpace& to_space() { return to_space_; }
+
+ private:
+ // Update linear allocation area to match the current to-space page.
+ void UpdateLinearAllocationArea();
+
+ base::Mutex mutex_;
+
+ // The top and the limit at the time of setting the linear allocation area.
+ // These values can be accessed by background tasks.
+ std::atomic<Address> original_top_;
+ std::atomic<Address> original_limit_;
+
+ // The semispaces.
+ SemiSpace to_space_;
+ SemiSpace from_space_;
+ VirtualMemory reservation_;
+
+ bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
+ bool SupportsInlineAllocation() override { return true; }
+
+ friend class SemiSpaceObjectIterator;
+};
+
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
+ SLOW_DCHECK((space).page_low() <= (info).top() && \
+ (info).top() <= (space).page_high() && \
+ (info).limit() <= (space).page_high())
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_NEW_SPACES_H_
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index bd15b50b96a..05929acb973 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -426,7 +426,7 @@ class ObjectStatsCollectorImpl {
bool CanRecordFixedArray(FixedArrayBase array);
bool IsCowArray(FixedArrayBase array);
- // Blacklist for objects that should not be recorded using
+ // Blocklist for objects that should not be recorded using
// VirtualObjectStats and RecordSimpleVirtualObjectStats. For recording those
// objects dispatch to the low level ObjectStats::RecordObjectStats manually.
bool ShouldRecordObject(HeapObject object, CowMode check_cow_array);
@@ -839,7 +839,6 @@ void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject obj,
bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase array) {
ReadOnlyRoots roots(heap_);
return array != roots.empty_fixed_array() &&
- array != roots.empty_sloppy_arguments_elements() &&
array != roots.empty_slow_element_dictionary() &&
array != roots.empty_property_dictionary();
}
diff --git a/chromium/v8/src/heap/off-thread-heap.cc b/chromium/v8/src/heap/off-thread-heap.cc
index fec93f80685..584fe349717 100644
--- a/chromium/v8/src/heap/off-thread-heap.cc
+++ b/chromium/v8/src/heap/off-thread-heap.cc
@@ -4,10 +4,14 @@
#include "src/heap/off-thread-heap.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/handles/off-thread-transfer-handle-storage-inl.h"
+#include "src/heap/paged-spaces-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/spaces.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/roots/roots.h"
+#include "src/snapshot/references.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -15,7 +19,16 @@
namespace v8 {
namespace internal {
-OffThreadHeap::OffThreadHeap(Heap* heap) : space_(heap), lo_space_(heap) {}
+OffThreadHeap::~OffThreadHeap() = default;
+
+OffThreadHeap::OffThreadHeap(Heap* heap)
+ : space_(heap),
+ lo_space_(heap),
+ off_thread_transfer_handles_head_(nullptr) {}
+
+bool OffThreadHeap::Contains(HeapObject obj) {
+ return space_.Contains(obj) || lo_space_.Contains(obj);
+}
class OffThreadHeap::StringSlotCollectingVisitor : public ObjectVisitor {
public:
@@ -74,6 +87,13 @@ void OffThreadHeap::FinishOffThread() {
string_slots_ = std::move(string_slot_collector.string_slots);
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertFromOffThreadHandleOnFinish();
+ storage = storage->next();
+ }
+
is_finished = true;
}
@@ -82,25 +102,70 @@ void OffThreadHeap::Publish(Heap* heap) {
Isolate* isolate = heap->isolate();
ReadOnlyRoots roots(isolate);
+ // Before we do anything else, ensure that the old-space can expand to the
+ // size needed for the off-thread objects. Use capacity rather than size since
+ // we're adding entire pages.
+ size_t off_thread_size = space_.Capacity() + lo_space_.Size();
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ if (!heap->CanExpandOldGeneration(off_thread_size)) {
+ heap->FatalProcessOutOfMemory(
+ "Can't expand old-space enough to merge off-thread pages.");
+ }
+ }
+
+ // Merging and transferring handles should be atomic from the point of view
+ // of the GC, since we neither want the GC to walk main-thread handles that
+ // point into off-thread pages, nor do we want the GC to move the raw
+ // pointers we have into off-thread pages before we've had a chance to turn
+ // them into real handles.
+ // TODO(leszeks): This could be a stronger assertion, that we don't GC at
+ // all.
+ DisallowHeapAllocation no_gc;
+
+ // Merge the spaces.
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.OffThreadFinalization.Publish.Merge");
+
+ heap->old_space()->MergeLocalSpace(&space_);
+ heap->lo_space()->MergeOffThreadSpace(&lo_space_);
+
+ DCHECK(heap->CanExpandOldGeneration(0));
+ }
+
+ // Transfer all the transfer handles to be real handles. Make sure to do this
+ // before creating any handle scopes, to allow these handles to live in the
+ // caller's handle scope.
+ OffThreadTransferHandleStorage* storage =
+ off_thread_transfer_handles_head_.get();
+ while (storage != nullptr) {
+ storage->ConvertToHandleOnPublish(isolate, &no_gc);
+ storage = storage->next();
+ }
+
+ // Create a new handle scope after transferring handles, for the slot holder
+ // handles below.
HandleScope handle_scope(isolate);
- // First, handlify all the string slot holder objects, so that we can keep
- // track of them if they move.
+ // Handlify all the string slot holder objects, so that we can keep track of
+ // them if they move.
//
// TODO(leszeks): We might be able to create a HandleScope-compatible
- // structure off-thread and merge it into the current handle scope all in one
- // go (DeferredHandles maybe?).
- std::vector<Handle<HeapObject>> heap_object_handles;
+ // structure off-thread and merge it into the current handle scope all in
+ // one go (DeferredHandles maybe?).
+ std::vector<std::pair<Handle<HeapObject>, Handle<Map>>> heap_object_handles;
std::vector<Handle<Script>> script_handles;
{
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish.CollectHandles");
heap_object_handles.reserve(string_slots_.size());
for (RelativeSlot relative_slot : string_slots_) {
- // TODO(leszeks): Group slots in the same parent object to avoid creating
- // multiple duplicate handles.
+ // TODO(leszeks): Group slots in the same parent object to avoid
+ // creating multiple duplicate handles.
HeapObject obj = HeapObject::FromAddress(relative_slot.object_address);
- heap_object_handles.push_back(handle(obj, isolate));
+ heap_object_handles.push_back(
+ {handle(obj, isolate), handle(obj.map(), isolate)});
// De-internalize the string so that we can re-internalize it later.
String string =
@@ -116,46 +181,20 @@ void OffThreadHeap::Publish(Heap* heap) {
}
}
- // Then merge the spaces. At this point, we are allowed to point between (no
- // longer) off-thread pages and main-thread heap pages, and objects in the
- // previously off-thread page can move.
- {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.OffThreadFinalization.Publish.Merge");
- Heap* heap = isolate->heap();
+ // After this point, all objects are transferred and all handles are valid,
+ // so we can GC again.
+ no_gc.Release();
- // Ensure that the old-space can expand do the size needed for the
- // off-thread objects. Use capacity rather than size since we're adding
- // entire pages.
- size_t off_thread_size = space_.Capacity() + lo_space_.Size();
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->InvokeNearHeapLimitCallback();
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
- if (!heap->CanExpandOldGeneration(off_thread_size)) {
- heap->FatalProcessOutOfMemory(
- "Can't expand old-space enough to merge off-thread pages.");
- }
- }
- }
+ // Possibly trigger a GC if we're close to exhausting the old generation.
+ // TODO(leszeks): Adjust the heuristics here.
+ heap->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
- heap->old_space()->MergeLocalSpace(&space_);
- heap->lo_space()->MergeOffThreadSpace(&lo_space_);
-
- DCHECK(heap->CanExpandOldGeneration(0));
- heap->NotifyOldGenerationExpansion();
-
- // Possibly trigger a GC if we're close to exhausting the old generation.
- // TODO(leszeks): Adjust the heuristics here.
- heap->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
-
- if (!heap->ShouldExpandOldGenerationOnSlowAllocation() ||
- !heap->CanExpandOldGeneration(1 * MB)) {
- heap->CollectGarbage(OLD_SPACE,
- GarbageCollectionReason::kAllocationFailure);
- }
+ if (!heap->ShouldExpandOldGenerationOnSlowAllocation() ||
+ !heap->CanExpandOldGeneration(1 * MB)) {
+ heap->CollectGarbage(OLD_SPACE,
+ GarbageCollectionReason::kAllocationFailure);
}
// Iterate the string slots, as an offset from the holders we have handles to.
@@ -163,12 +202,13 @@ void OffThreadHeap::Publish(Heap* heap) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish.UpdateHandles");
for (size_t i = 0; i < string_slots_.size(); ++i) {
- HeapObject obj = *heap_object_handles[i];
+ HeapObject obj = *heap_object_handles[i].first;
int slot_offset = string_slots_[i].slot_offset;
// There's currently no cases where the holder object could have been
// resized.
- DCHECK_LT(slot_offset, obj.Size());
+ CHECK_EQ(obj.map(), *heap_object_handles[i].second);
+ CHECK_LT(slot_offset, obj.Size());
String string = String::cast(RELAXED_READ_FIELD(obj, slot_offset));
if (string.IsThinString()) {
@@ -188,8 +228,14 @@ void OffThreadHeap::Publish(Heap* heap) {
if (*string_handle != *internalized_string) {
// Re-read the object from the handle in case there was GC during
// internalization and it moved.
- HeapObject obj = *heap_object_handles[i];
+ HeapObject obj = *heap_object_handles[i].first;
String value = *internalized_string;
+
+ // Sanity checks that the object or string slot value hasn't changed.
+ CHECK_EQ(obj.map(), *heap_object_handles[i].second);
+ CHECK_LT(slot_offset, obj.Size());
+ CHECK_EQ(RELAXED_READ_FIELD(obj, slot_offset), *string_handle);
+
RELAXED_WRITE_FIELD(obj, slot_offset, value);
WRITE_BARRIER(obj, slot_offset, value);
}
@@ -223,7 +269,37 @@ HeapObject OffThreadHeap::AllocateRaw(int size, AllocationType allocation,
} else {
result = space_.AllocateRaw(size, alignment);
}
- return result.ToObjectChecked();
+ HeapObject obj = result.ToObjectChecked();
+ OnAllocationEvent(obj, size);
+ return obj;
+}
+
+bool OffThreadHeap::ReserveSpace(Heap::Reservation* reservations) {
+#ifdef DEBUG
+ for (int space = FIRST_SPACE;
+ space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); space++) {
+ if (space == OLD_SPACE || space == LO_SPACE) continue;
+ Heap::Reservation* reservation = &reservations[space];
+ DCHECK_EQ(reservation->size(), 1);
+ DCHECK_EQ(reservation->at(0).size, 0);
+ }
+#endif
+
+ for (auto& chunk : reservations[OLD_SPACE]) {
+ int size = chunk.size;
+ AllocationResult allocation = space_.AllocateRawUnaligned(size);
+ HeapObject free_space = allocation.ToObjectChecked();
+
+ // Mark with a free list node, in case we have a GC before
+ // deserializing.
+ Address free_space_address = free_space.address();
+ CreateFillerObjectAt(free_space_address, size,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ chunk.start = free_space_address;
+ chunk.end = free_space_address + size;
+ }
+
+ return true;
}
HeapObject OffThreadHeap::CreateFillerObjectAt(
@@ -234,6 +310,17 @@ HeapObject OffThreadHeap::CreateFillerObjectAt(
return filler;
}
+OffThreadTransferHandleStorage* OffThreadHeap::AddTransferHandleStorage(
+ HandleBase handle) {
+ DCHECK_IMPLIES(off_thread_transfer_handles_head_ != nullptr,
+ off_thread_transfer_handles_head_->state() ==
+ OffThreadTransferHandleStorage::kOffThreadHandle);
+ off_thread_transfer_handles_head_ =
+ std::make_unique<OffThreadTransferHandleStorage>(
+ handle.location(), std::move(off_thread_transfer_handles_head_));
+ return off_thread_transfer_handles_head_.get();
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/off-thread-heap.h b/chromium/v8/src/heap/off-thread-heap.h
index de902be52fb..3bb1777df11 100644
--- a/chromium/v8/src/heap/off-thread-heap.h
+++ b/chromium/v8/src/heap/off-thread-heap.h
@@ -6,28 +6,51 @@
#define V8_HEAP_OFF_THREAD_HEAP_H_
#include <vector>
+
#include "src/common/globals.h"
#include "src/heap/large-spaces.h"
+#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
+#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
+class OffThreadTransferHandleStorage;
+
class V8_EXPORT_PRIVATE OffThreadHeap {
public:
explicit OffThreadHeap(Heap* heap);
+ ~OffThreadHeap();
HeapObject AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
void AddToScriptList(Handle<Script> shared);
+ void OnAllocationEvent(HeapObject obj, int size) {
+ // TODO(leszeks): Do something here.
+ }
+
+ ReadOnlySpace* read_only_space() const {
+ // Access the main-thread heap via the spaces.
+ return space_.heap()->read_only_space();
+ }
+
+ bool Contains(HeapObject obj);
+
+ bool ReserveSpace(Heap::Reservation* reservations);
+
HeapObject CreateFillerObjectAt(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode);
+ OffThreadTransferHandleStorage* AddTransferHandleStorage(HandleBase handle);
+
void FinishOffThread();
void Publish(Heap* heap);
private:
+ friend class DeserializerAllocator;
+
class StringSlotCollectingVisitor;
struct RelativeSlot {
@@ -43,6 +66,8 @@ class V8_EXPORT_PRIVATE OffThreadHeap {
OffThreadLargeObjectSpace lo_space_;
std::vector<RelativeSlot> string_slots_;
std::vector<Script> script_list_;
+ std::unique_ptr<OffThreadTransferHandleStorage>
+ off_thread_transfer_handles_head_;
bool is_finished = false;
};
diff --git a/chromium/v8/src/heap/paged-spaces-inl.h b/chromium/v8/src/heap/paged-spaces-inl.h
new file mode 100644
index 00000000000..6b2e5a848a5
--- /dev/null
+++ b/chromium/v8/src/heap/paged-spaces-inl.h
@@ -0,0 +1,208 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PAGED_SPACES_INL_H_
+#define V8_HEAP_PAGED_SPACES_INL_H_
+
+#include "src/heap/incremental-marking.h"
+#include "src/heap/paged-spaces.h"
+#include "src/objects/code-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// PagedSpaceObjectIterator
+
+HeapObject PagedSpaceObjectIterator::Next() {
+ do {
+ HeapObject next_obj = FromCurrentPage();
+ if (!next_obj.is_null()) return next_obj;
+ } while (AdvanceToNextPage());
+ return HeapObject();
+}
+
+HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
+ while (cur_addr_ != cur_end_) {
+ HeapObject obj = HeapObject::FromAddress(cur_addr_);
+ const int obj_size = obj.Size();
+ cur_addr_ += obj_size;
+ DCHECK_LE(cur_addr_, cur_end_);
+ if (!obj.IsFreeSpaceOrFiller()) {
+ if (obj.IsCode()) {
+ DCHECK_EQ(space_->identity(), CODE_SPACE);
+ DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+ } else {
+ DCHECK_OBJECT_SIZE(obj_size);
+ }
+ return obj;
+ }
+ }
+ return HeapObject();
+}
+
+bool PagedSpace::Contains(Address addr) const {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ return true;
+ }
+ return Page::FromAddress(addr)->owner() == this;
+}
+
+bool PagedSpace::Contains(Object o) const {
+ if (!o.IsHeapObject()) return false;
+ return Page::FromAddress(o.ptr())->owner() == this;
+}
+
+void PagedSpace::UnlinkFreeListCategories(Page* page) {
+ DCHECK_EQ(this, page->owner());
+ page->ForAllFreeListCategories([this](FreeListCategory* category) {
+ free_list()->RemoveCategory(category);
+ });
+}
+
+size_t PagedSpace::RelinkFreeListCategories(Page* page) {
+ DCHECK_EQ(this, page->owner());
+ size_t added = 0;
+ page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
+ added += category->available();
+ category->Relink(free_list());
+ });
+
+ DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
+ page->AvailableInFreeList() ==
+ page->AvailableInFreeListFromAllocatedBytes());
+ return added;
+}
+
+bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
+ if (allocation_info_.top() != kNullAddress) {
+ const Address object_address = object.address();
+ if ((allocation_info_.top() - object_size) == object_address) {
+ allocation_info_.set_top(object_address);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
+ return true;
+ }
+ return SlowRefillLinearAllocationArea(size_in_bytes, origin);
+}
+
+HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
+ Address current_top = allocation_info_.top();
+ Address new_top = current_top + size_in_bytes;
+ DCHECK_LE(new_top, allocation_info_.limit());
+ allocation_info_.set_top(new_top);
+ return HeapObject::FromAddress(current_top);
+}
+
+HeapObject PagedSpace::TryAllocateLinearlyAligned(
+ int* size_in_bytes, AllocationAlignment alignment) {
+ Address current_top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+ Address new_top = current_top + filler_size + *size_in_bytes;
+ if (new_top > allocation_info_.limit()) return HeapObject();
+
+ allocation_info_.set_top(new_top);
+ if (filler_size > 0) {
+ *size_in_bytes += filler_size;
+ return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
+ HeapObject::FromAddress(current_top),
+ filler_size);
+ }
+
+ return HeapObject::FromAddress(current_top);
+}
+
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
+ if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
+ return AllocationResult::Retry(identity());
+ }
+ HeapObject object = AllocateLinearly(size_in_bytes);
+ DCHECK(!object.is_null());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return object;
+}
+
+AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ DCHECK_EQ(identity(), OLD_SPACE);
+ int allocation_size = size_in_bytes;
+ HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ if (object.is_null()) {
+ // We don't know exactly how much filler we need to align until space is
+ // allocated, so assume the worst case.
+ int filler_size = Heap::GetMaximumFillToAlign(alignment);
+ allocation_size += filler_size;
+ if (!EnsureLinearAllocationArea(allocation_size, origin)) {
+ return AllocationResult::Retry(identity());
+ }
+ allocation_size = size_in_bytes;
+ object = TryAllocateLinearlyAligned(&allocation_size, alignment);
+ DCHECK(!object.is_null());
+ }
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
+ return object;
+}
+
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ if (top_on_previous_step_ && top() < top_on_previous_step_ &&
+ SupportsInlineAllocation()) {
+ // Generated code decreased the top() pointer to do folded allocations.
+ // The top_on_previous_step_ can be one byte beyond the current page.
+ DCHECK_NE(top(), kNullAddress);
+ DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
+ Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
+ top_on_previous_step_ = top();
+ }
+ size_t bytes_since_last =
+ top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
+
+ DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
+#ifdef V8_HOST_ARCH_32_BIT
+ AllocationResult result =
+ alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
+#else
+ AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
+#endif
+ HeapObject heap_obj;
+ if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
+ AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
+ heap_obj.address(), size_in_bytes);
+ StartNextInlineAllocationStep();
+ DCHECK_IMPLIES(
+ heap()->incremental_marking()->black_allocation(),
+ heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ }
+ return result;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PAGED_SPACES_INL_H_
diff --git a/chromium/v8/src/heap/paged-spaces.cc b/chromium/v8/src/heap/paged-spaces.cc
new file mode 100644
index 00000000000..dabdf2d5a0e
--- /dev/null
+++ b/chromium/v8/src/heap/paged-spaces.cc
@@ -0,0 +1,1047 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/paged-spaces.h"
+
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/execution/isolate.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/memory-allocator.h"
+#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/paged-spaces-inl.h"
+#include "src/heap/read-only-heap.h"
+#include "src/logging/counters.h"
+#include "src/objects/string.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// PagedSpaceObjectIterator
+
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
+ PagedSpace* space)
+ : cur_addr_(kNullAddress),
+ cur_end_(kNullAddress),
+ space_(space),
+ page_range_(space->first_page(), nullptr),
+ current_page_(page_range_.begin()) {
+ space_->MakeLinearAllocationAreaIterable();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+}
+
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
+ PagedSpace* space,
+ Page* page)
+ : cur_addr_(kNullAddress),
+ cur_end_(kNullAddress),
+ space_(space),
+ page_range_(page),
+ current_page_(page_range_.begin()) {
+ space_->MakeLinearAllocationAreaIterable();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+#ifdef DEBUG
+ AllocationSpace owner = page->owner_identity();
+ DCHECK(owner == OLD_SPACE || owner == MAP_SPACE || owner == CODE_SPACE);
+#endif // DEBUG
+}
+
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(OffThreadSpace* space)
+ : cur_addr_(kNullAddress),
+ cur_end_(kNullAddress),
+ space_(space),
+ page_range_(space->first_page(), nullptr),
+ current_page_(page_range_.begin()) {
+ space_->MakeLinearAllocationAreaIterable();
+}
+
+// We have hit the end of the page and should advance to the next block of
+// objects. This happens at the end of the page.
+bool PagedSpaceObjectIterator::AdvanceToNextPage() {
+ DCHECK_EQ(cur_addr_, cur_end_);
+ if (current_page_ == page_range_.end()) return false;
+ Page* cur_page = *(current_page_++);
+
+ cur_addr_ = cur_page->area_start();
+ cur_end_ = cur_page->area_end();
+ DCHECK(cur_page->SweepingDone());
+ return true;
+}
+Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
+ Page* page = static_cast<Page*>(chunk);
+ DCHECK_EQ(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
+ page->area_size());
+ // Make sure that categories are initialized before freeing the area.
+ page->ResetAllocationStatistics();
+ page->SetOldGenerationPageFlags(!is_off_thread_space() &&
+ heap()->incremental_marking()->IsMarking());
+ page->AllocateFreeListCategories();
+ page->InitializeFreeListCategories();
+ page->list_node().Initialize();
+ page->InitializationMemoryFence();
+ return page;
+}
+
+PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
+ Executability executable, FreeList* free_list,
+ LocalSpaceKind local_space_kind)
+ : SpaceWithLinearArea(heap, space, free_list),
+ executable_(executable),
+ local_space_kind_(local_space_kind) {
+ area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
+ accounting_stats_.Clear();
+}
+
+void PagedSpace::TearDown() {
+ while (!memory_chunk_list_.Empty()) {
+ MemoryChunk* chunk = memory_chunk_list_.front();
+ memory_chunk_list_.Remove(chunk);
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
+ }
+ accounting_stats_.Clear();
+}
+
+void PagedSpace::RefillFreeList() {
+ // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+ // generation spaces out.
+ if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
+ identity() != MAP_SPACE) {
+ return;
+ }
+ DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
+ DCHECK_IMPLIES(is_local_space(), is_compaction_space());
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ size_t added = 0;
+
+ {
+ Page* p = nullptr;
+ while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
+ // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
+ // entries here to make them unavailable for allocations.
+ if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ p->ForAllFreeListCategories([this](FreeListCategory* category) {
+ category->Reset(free_list());
+ });
+ }
+
+ // Also merge old-to-new remembered sets if not scavenging because of
+ // data races: One thread might iterate remembered set, while another
+ // thread merges them.
+ if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
+ p->MergeOldToNewRememberedSets();
+ }
+
+ // Only during compaction pages can actually change ownership. This is
+ // safe because there exists no other competing action on the page links
+ // during compaction.
+ if (is_compaction_space()) {
+ DCHECK_NE(this, p->owner());
+ PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
+ base::MutexGuard guard(owner->mutex());
+ owner->RefineAllocatedBytesAfterSweeping(p);
+ owner->RemovePage(p);
+ added += AddPage(p);
+ } else {
+ base::MutexGuard guard(mutex());
+ DCHECK_EQ(this, p->owner());
+ RefineAllocatedBytesAfterSweeping(p);
+ added += RelinkFreeListCategories(p);
+ }
+ added += p->wasted_memory();
+ if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
+ }
+ }
+}
+
+void OffThreadSpace::RefillFreeList() {
+ // We should never try to refill the free list in off-thread space, because
+ // we know it will always be fully linear.
+ UNREACHABLE();
+}
+
+void PagedSpace::MergeLocalSpace(LocalSpace* other) {
+ base::MutexGuard guard(mutex());
+
+ DCHECK(identity() == other->identity());
+
+ // Unmerged fields:
+ // area_size_
+ other->FreeLinearAllocationArea();
+
+ for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
+ i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
+ allocations_origins_[i] += other->allocations_origins_[i];
+ }
+
+ // The linear allocation area of {other} should be destroyed now.
+ DCHECK_EQ(kNullAddress, other->top());
+ DCHECK_EQ(kNullAddress, other->limit());
+
+ bool merging_from_off_thread = other->is_off_thread_space();
+
+ // Move over pages.
+ for (auto it = other->begin(); it != other->end();) {
+ Page* p = *(it++);
+
+ if (merging_from_off_thread) {
+ DCHECK_NULL(p->sweeping_slot_set());
+
+ // Make sure the page is entirely white.
+ CHECK(heap()
+ ->incremental_marking()
+ ->non_atomic_marking_state()
+ ->bitmap(p)
+ ->IsClean());
+
+ p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
+ if (heap()->incremental_marking()->black_allocation()) {
+ p->CreateBlackArea(p->area_start(), p->HighWaterMark());
+ }
+ } else {
+ p->MergeOldToNewRememberedSets();
+ }
+
+ // Ensure that pages are initialized before objects on it are discovered by
+ // concurrent markers.
+ p->InitializationMemoryFence();
+
+ // Relinking requires the category to be unlinked.
+ other->RemovePage(p);
+ AddPage(p);
+ heap()->NotifyOldGenerationExpansion(identity(), p);
+ DCHECK_IMPLIES(
+ !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
+ p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
+
+ // TODO(leszeks): Here we should allocation step, but:
+ // 1. Allocation groups are currently not handled properly by the sampling
+ // allocation profiler, and
+ // 2. Observers might try to take the space lock, which isn't reentrant.
+ // We'll have to come up with a better solution for allocation stepping
+ // before shipping, which will likely be using LocalHeap.
+ }
+
+ DCHECK_EQ(0u, other->Size());
+ DCHECK_EQ(0u, other->Capacity());
+}
+
+size_t PagedSpace::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ size_t size = 0;
+ for (Page* page : *this) {
+ size += page->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+bool PagedSpace::ContainsSlow(Address addr) const {
+ Page* p = Page::FromAddress(addr);
+ for (const Page* page : *this) {
+ if (page == p) return true;
+ }
+ return false;
+}
+
+void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
+ CHECK(page->SweepingDone());
+ auto marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ // The live_byte on the page was accounted in the space allocated
+ // bytes counter. After sweeping allocated_bytes() contains the
+ // accurate live byte count on the page.
+ size_t old_counter = marking_state->live_bytes(page);
+ size_t new_counter = page->allocated_bytes();
+ DCHECK_GE(old_counter, new_counter);
+ if (old_counter > new_counter) {
+ DecreaseAllocatedBytes(old_counter - new_counter, page);
+ // Give the heap a chance to adjust counters in response to the
+ // more precise and smaller old generation size.
+ heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
+ }
+ marking_state->SetLiveBytes(page, 0);
+}
+
+Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
+ base::MutexGuard guard(mutex());
+ Page* page = free_list()->GetPageForSize(size_in_bytes);
+ if (!page) return nullptr;
+ RemovePage(page);
+ return page;
+}
+
+size_t PagedSpace::AddPage(Page* page) {
+ CHECK(page->SweepingDone());
+ page->set_owner(this);
+ memory_chunk_list_.PushBack(page);
+ AccountCommitted(page->size());
+ IncreaseCapacity(page->area_size());
+ IncreaseAllocatedBytes(page->allocated_bytes(), page);
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+ return RelinkFreeListCategories(page);
+}
+
+void PagedSpace::RemovePage(Page* page) {
+ CHECK(page->SweepingDone());
+ memory_chunk_list_.Remove(page);
+ UnlinkFreeListCategories(page);
+ DecreaseAllocatedBytes(page->allocated_bytes(), page);
+ DecreaseCapacity(page->area_size());
+ AccountUncommitted(page->size());
+ for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
+ }
+}
+
+size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
+ size_t unused = page->ShrinkToHighWaterMark();
+ accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
+ AccountUncommitted(unused);
+ return unused;
+}
+
+void PagedSpace::ResetFreeList() {
+ for (Page* page : *this) {
+ free_list_->EvictFreeListItems(page);
+ }
+ DCHECK(free_list_->IsEmpty());
+}
+
+void PagedSpace::ShrinkImmortalImmovablePages() {
+ DCHECK(!heap()->deserialization_complete());
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ FreeLinearAllocationArea();
+ ResetFreeList();
+ for (Page* page : *this) {
+ DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
+ ShrinkPageToHighWaterMark(page);
+ }
+}
+
+Page* PagedSpace::AllocatePage() {
+ return heap()->memory_allocator()->AllocatePage(AreaSize(), this,
+ executable());
+}
+
+Page* PagedSpace::Expand() {
+ Page* page = AllocatePage();
+ if (page == nullptr) return nullptr;
+ AddPage(page);
+ Free(page->area_start(), page->area_size(),
+ SpaceAccountingMode::kSpaceAccounted);
+ return page;
+}
+
+Page* PagedSpace::ExpandBackground(LocalHeap* local_heap) {
+ Page* page = AllocatePage();
+ if (page == nullptr) return nullptr;
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ AddPage(page);
+ Free(page->area_start(), page->area_size(),
+ SpaceAccountingMode::kSpaceAccounted);
+ return page;
+}
+
+int PagedSpace::CountTotalPages() {
+ int count = 0;
+ for (Page* page : *this) {
+ count++;
+ USE(page);
+ }
+ return count;
+}
+
+void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
+ SetTopAndLimit(top, limit);
+ if (top != kNullAddress && top != limit && !is_off_thread_space() &&
+ heap()->incremental_marking()->black_allocation()) {
+ Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
+ }
+}
+
+void PagedSpace::DecreaseLimit(Address new_limit) {
+ Address old_limit = limit();
+ DCHECK_LE(top(), new_limit);
+ DCHECK_GE(old_limit, new_limit);
+ if (new_limit != old_limit) {
+ SetTopAndLimit(top(), new_limit);
+ Free(new_limit, old_limit - new_limit,
+ SpaceAccountingMode::kSpaceAccounted);
+ if (heap()->incremental_marking()->black_allocation()) {
+ Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
+ old_limit);
+ }
+ }
+}
+
+void PagedSpace::MarkLinearAllocationAreaBlack() {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != kNullAddress && current_top != current_limit) {
+ Page::FromAllocationAreaAddress(current_top)
+ ->CreateBlackArea(current_top, current_limit);
+ }
+}
+
+void PagedSpace::UnmarkLinearAllocationArea() {
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != kNullAddress && current_top != current_limit) {
+ Page::FromAllocationAreaAddress(current_top)
+ ->DestroyBlackArea(current_top, current_limit);
+ }
+}
+
+void PagedSpace::MakeLinearAllocationAreaIterable() {
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != kNullAddress && current_top != current_limit) {
+ base::Optional<CodePageMemoryModificationScope> optional_scope;
+
+ if (identity() == CODE_SPACE) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(current_top);
+ optional_scope.emplace(chunk);
+ }
+
+ heap_->CreateFillerObjectAt(current_top,
+ static_cast<int>(current_limit - current_top),
+ ClearRecordedSlots::kNo);
+ }
+}
+
+void PagedSpace::FreeLinearAllocationArea() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top == kNullAddress) {
+ DCHECK_EQ(kNullAddress, current_limit);
+ return;
+ }
+
+ if (!is_off_thread_space() &&
+ heap()->incremental_marking()->black_allocation()) {
+ Page* page = Page::FromAllocationAreaAddress(current_top);
+
+ // Clear the bits in the unused black area.
+ if (current_top != current_limit) {
+ IncrementalMarking::MarkingState* marking_state =
+ heap()->incremental_marking()->marking_state();
+ marking_state->bitmap(page)->ClearRange(
+ page->AddressToMarkbitIndex(current_top),
+ page->AddressToMarkbitIndex(current_limit));
+ marking_state->IncrementLiveBytes(
+ page, -static_cast<int>(current_limit - current_top));
+ }
+ }
+
+ if (!is_local_space()) {
+ InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
+ }
+
+ SetTopAndLimit(kNullAddress, kNullAddress);
+ DCHECK_GE(current_limit, current_top);
+
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ if (identity() == CODE_SPACE) {
+ heap()->UnprotectAndRegisterMemoryChunk(
+ MemoryChunk::FromAddress(current_top));
+ }
+ Free(current_top, current_limit - current_top,
+ SpaceAccountingMode::kSpaceAccounted);
+}
+
+void PagedSpace::ReleasePage(Page* page) {
+ DCHECK_EQ(
+ 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
+ page));
+ DCHECK_EQ(page->owner(), this);
+
+ free_list_->EvictFreeListItems(page);
+
+ if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
+ DCHECK(!top_on_previous_step_);
+ allocation_info_.Reset(kNullAddress, kNullAddress);
+ }
+
+ heap()->isolate()->RemoveCodeMemoryChunk(page);
+
+ AccountUncommitted(page->size());
+ accounting_stats_.DecreaseCapacity(page->area_size());
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+}
+
+void PagedSpace::SetReadable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadable();
+ }
+}
+
+void PagedSpace::SetReadAndExecutable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndExecutable();
+ }
+}
+
+void PagedSpace::SetReadAndWritable() {
+ DCHECK(identity() == CODE_SPACE);
+ for (Page* page : *this) {
+ CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndWritable();
+ }
+}
+
+std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
+ return std::unique_ptr<ObjectIterator>(
+ new PagedSpaceObjectIterator(heap, this));
+}
+
+bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
+ size_t size_in_bytes, AllocationOrigin origin) {
+ DCHECK(IsAligned(size_in_bytes, kTaggedSize));
+ DCHECK_LE(top(), limit());
+#ifdef DEBUG
+ if (top() != limit()) {
+ DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
+ }
+#endif
+ // Don't free list allocate if there is linear space available.
+ DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
+
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap. This also puts it back in the free list
+ // if it is big enough.
+ FreeLinearAllocationArea();
+
+ if (!is_local_space()) {
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+ }
+
+ size_t new_node_size = 0;
+ FreeSpace new_node =
+ free_list_->Allocate(size_in_bytes, &new_node_size, origin);
+ if (new_node.is_null()) return false;
+ DCHECK_GE(new_node_size, size_in_bytes);
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ Page* page = Page::FromHeapObject(new_node);
+ IncreaseAllocatedBytes(new_node_size, page);
+
+ Address start = new_node.address();
+ Address end = new_node.address() + new_node_size;
+ Address limit = ComputeLimit(start, end, size_in_bytes);
+ DCHECK_LE(limit, end);
+ DCHECK_LE(size_in_bytes, limit - start);
+ if (limit != end) {
+ if (identity() == CODE_SPACE) {
+ heap()->UnprotectAndRegisterMemoryChunk(page);
+ }
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
+ }
+ SetLinearAllocationArea(start, limit);
+
+ return true;
+}
+
+base::Optional<std::pair<Address, size_t>>
+PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ DCHECK(!is_local_space() && identity() == OLD_SPACE);
+ DCHECK_EQ(origin, AllocationOrigin::kRuntime);
+
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ // Sweeping is still in progress.
+ if (collector->sweeping_in_progress()) {
+ // First try to refill the free-list, concurrent sweeper threads
+ // may have freed some objects in the meantime.
+ {
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ RefillFreeList();
+ }
+
+ // Retry the free list allocation.
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+
+ Sweeper::FreeSpaceMayContainInvalidatedSlots
+ invalidated_slots_in_free_space =
+ Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
+ const int kMaxPagesToSweep = 1;
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
+ identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
+ invalidated_slots_in_free_space);
+
+ {
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ RefillFreeList();
+ }
+
+ if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+ }
+ }
+
+ if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
+ heap()->CanExpandOldGenerationBackground(AreaSize()) &&
+ ExpandBackground(local_heap)) {
+ DCHECK((CountTotalPages() > 1) ||
+ (min_size_in_bytes <= free_list_->Available()));
+ auto result = TryAllocationFromFreeListBackground(
+ local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ if (result) return result;
+ }
+
+ // TODO(dinfuehr): Complete sweeping here and try allocation again.
+
+ return {};
+}
+
+base::Optional<std::pair<Address, size_t>>
+PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
+ ParkedMutexGuard lock(local_heap, &allocation_mutex_);
+ DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
+ DCHECK_EQ(identity(), OLD_SPACE);
+
+ size_t new_node_size = 0;
+ FreeSpace new_node =
+ free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
+ if (new_node.is_null()) return {};
+ DCHECK_GE(new_node_size, min_size_in_bytes);
+
+ // The old-space-step might have finished sweeping and restarted marking.
+ // Verify that it did not turn the page of the new node into an evacuation
+ // candidate.
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+ // Memory in the linear allocation area is counted as allocated. We may free
+ // a little of this again immediately - see below.
+ Page* page = Page::FromHeapObject(new_node);
+ IncreaseAllocatedBytes(new_node_size, page);
+
+ heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
+
+ size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
+
+ Address start = new_node.address();
+ Address end = new_node.address() + new_node_size;
+ Address limit = new_node.address() + used_size_in_bytes;
+ DCHECK_LE(limit, end);
+ DCHECK_LE(min_size_in_bytes, limit - start);
+ if (limit != end) {
+ Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
+ }
+
+ return std::make_pair(start, used_size_in_bytes);
+}
+
+#ifdef DEBUG
+void PagedSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
+ bool allocation_pointer_found_in_space =
+ (allocation_info_.top() == allocation_info_.limit());
+ size_t external_space_bytes[kNumTypes];
+ size_t external_page_bytes[kNumTypes];
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ for (Page* page : *this) {
+ CHECK_EQ(page->owner(), this);
+
+ for (int i = 0; i < kNumTypes; i++) {
+ external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
+ }
+
+ if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
+ allocation_pointer_found_in_space = true;
+ }
+ CHECK(page->SweepingDone());
+ PagedSpaceObjectIterator it(isolate->heap(), this, page);
+ Address end_of_previous_object = page->area_start();
+ Address top = page->area_end();
+
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ CHECK(end_of_previous_object <= object.address());
+
+ // The first word should be a map, and we expect all map pointers to
+ // be in map space.
+ Map map = object.map();
+ CHECK(map.IsMap());
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->map_space()->Contains(map));
+
+ // Perform space-specific object verification.
+ VerifyObject(object);
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate);
+
+ if (identity() != RO_SPACE && !FLAG_verify_heap_skip_remembered_set) {
+ isolate->heap()->VerifyRememberedSetFor(object);
+ }
+
+ // All the interior pointers should be contained in the heap.
+ int size = object.Size();
+ object.IterateBody(map, size, visitor);
+ CHECK(object.address() + size <= top);
+ end_of_previous_object = object.address() + size;
+
+ if (object.IsExternalString()) {
+ ExternalString external_string = ExternalString::cast(object);
+ size_t size = external_string.ExternalPayloadSize();
+ external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
+ } else if (object.IsJSArrayBuffer()) {
+ JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
+ if (ArrayBufferTracker::IsTracked(array_buffer)) {
+ size_t size =
+ ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
+ ->PerIsolateAccountingLength();
+ external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
+ }
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
+ external_space_bytes[t] += external_page_bytes[t];
+ }
+ }
+ for (int i = 0; i < kNumTypes; i++) {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ i == ExternalBackingStoreType::kArrayBuffer)
+ continue;
+ ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
+ CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
+ }
+ CHECK(allocation_pointer_found_in_space);
+
+ if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
+ size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
+ CHECK_EQ(bytes,
+ ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
+
+#ifdef DEBUG
+ VerifyCountersAfterSweeping(isolate->heap());
+#endif
+}
+
+void PagedSpace::VerifyLiveBytes() {
+ IncrementalMarking::MarkingState* marking_state =
+ heap()->incremental_marking()->marking_state();
+ for (Page* page : *this) {
+ CHECK(page->SweepingDone());
+ PagedSpaceObjectIterator it(heap(), this, page);
+ int black_size = 0;
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ // All the interior pointers should be contained in the heap.
+ if (marking_state->IsBlack(object)) {
+ black_size += object.Size();
+ }
+ }
+ CHECK_LE(black_size, marking_state->live_bytes(page));
+ }
+}
+#endif // VERIFY_HEAP
+
+#ifdef DEBUG
+void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
+ size_t total_capacity = 0;
+ size_t total_allocated = 0;
+ for (Page* page : *this) {
+ DCHECK(page->SweepingDone());
+ total_capacity += page->area_size();
+ PagedSpaceObjectIterator it(heap, this, page);
+ size_t real_allocated = 0;
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ if (!object.IsFreeSpaceOrFiller()) {
+ real_allocated += object.Size();
+ }
+ }
+ total_allocated += page->allocated_bytes();
+ // The real size can be smaller than the accounted size if array trimming,
+ // object slack tracking happened after sweeping.
+ DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
+ DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
+ }
+ DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
+ DCHECK_EQ(total_allocated, accounting_stats_.Size());
+}
+
+void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
+ // We need to refine the counters on pages that are already swept and have
+ // not been moved over to the actual space. Otherwise, the AccountingStats
+ // are just an over approximation.
+ RefillFreeList();
+
+ size_t total_capacity = 0;
+ size_t total_allocated = 0;
+ auto marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ for (Page* page : *this) {
+ size_t page_allocated =
+ page->SweepingDone()
+ ? page->allocated_bytes()
+ : static_cast<size_t>(marking_state->live_bytes(page));
+ total_capacity += page->area_size();
+ total_allocated += page_allocated;
+ DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
+ }
+ DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
+ DCHECK_EQ(total_allocated, accounting_stats_.Size());
+}
+#endif
+
+void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
+ Address new_limit = ComputeLimit(top(), limit(), min_size);
+ DCHECK_LE(new_limit, limit());
+ DecreaseLimit(new_limit);
+}
+
+// -----------------------------------------------------------------------------
+// OldSpace implementation
+
+void PagedSpace::PrepareForMarkCompact() {
+ // We don't have a linear allocation area while sweeping. It will be restored
+ // on the first allocation after the sweep.
+ FreeLinearAllocationArea();
+
+ // Clear the free list before a full GC---it will be rebuilt afterward.
+ free_list_->Reset();
+}
+
+size_t PagedSpace::SizeOfObjects() {
+ CHECK_GE(limit(), top());
+ DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
+ return Size() - (limit() - top());
+}
+
+bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
+ AllocationOrigin origin) {
+ DCHECK(!is_local_space());
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ // Wait for the sweeper threads here and complete the sweeping phase.
+ collector->EnsureSweepingCompleted();
+
+ // After waiting for the sweeper threads, there may be new free-list
+ // entries.
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
+ }
+ return false;
+}
+
+bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ VMState<GC> state(heap()->isolate());
+ RuntimeCallTimerScope runtime_timer(
+ heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
+ base::Optional<base::MutexGuard> optional_mutex;
+
+ if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
+ identity() == OLD_SPACE) {
+ optional_mutex.emplace(&allocation_mutex_);
+ }
+
+ return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
+}
+
+bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
+}
+
+bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
+ return true;
+
+ if (heap()->CanExpandOldGenerationBackground(size_in_bytes) && Expand()) {
+ DCHECK((CountTotalPages() > 1) ||
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
+ return RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin);
+ }
+
+ return false;
+}
+
+bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ // Non-compaction local spaces are not supported.
+ DCHECK_IMPLIES(is_local_space(), is_compaction_space());
+
+ // Allocation in this space has failed.
+ DCHECK_GE(size_in_bytes, 0);
+ const int kMaxPagesToSweep = 1;
+
+ if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
+ return true;
+
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ // Sweeping is still in progress.
+ if (collector->sweeping_in_progress()) {
+ if (FLAG_concurrent_sweeping && !is_compaction_space() &&
+ !collector->sweeper()->AreSweeperTasksRunning()) {
+ collector->EnsureSweepingCompleted();
+ }
+
+ // First try to refill the free-list, concurrent sweeper threads
+ // may have freed some objects in the meantime.
+ RefillFreeList();
+
+ // Retry the free list allocation.
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin))
+ return true;
+
+ if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
+ origin))
+ return true;
+ }
+
+ if (is_compaction_space()) {
+ // The main thread may have acquired all swept pages. Try to steal from
+ // it. This can only happen during young generation evacuation.
+ PagedSpace* main_space = heap()->paged_space(identity());
+ Page* page = main_space->RemovePageSafe(size_in_bytes);
+ if (page != nullptr) {
+ AddPage(page);
+ if (RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin))
+ return true;
+ }
+ }
+
+ if (heap()->ShouldExpandOldGenerationOnSlowAllocation() &&
+ heap()->CanExpandOldGeneration(AreaSize())) {
+ Page* page = Expand();
+ if (page) {
+ if (!is_compaction_space()) {
+ heap()->NotifyOldGenerationExpansion(identity(), page);
+ }
+ DCHECK((CountTotalPages() > 1) ||
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
+ return RefillLinearAllocationAreaFromFreeList(
+ static_cast<size_t>(size_in_bytes), origin);
+ }
+ }
+
+ if (is_compaction_space()) {
+ return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
+
+ } else {
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements from their free-lists. Allocation may still fail here which
+ // would indicate that there is not enough memory for the given allocation.
+ return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
+ }
+}
+
+bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
+ int max_pages, int size_in_bytes,
+ AllocationOrigin origin) {
+ // Cleanup invalidated old-to-new refs for compaction space in the
+ // final atomic pause.
+ Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
+ is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
+ : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ int max_freed = collector->sweeper()->ParallelSweepSpace(
+ identity(), required_freed_bytes, max_pages,
+ invalidated_slots_in_free_space);
+ RefillFreeList();
+ if (max_freed >= size_in_bytes)
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
+ }
+ return false;
+}
+
+// -----------------------------------------------------------------------------
+// MapSpace implementation
+
+// TODO(dmercadier): use a heap instead of sorting like that.
+// Using a heap will have multiple benefits:
+// - for now, SortFreeList is only called after sweeping, which is somewhat
+// late. Using a heap, sorting could be done online: FreeListCategories would
+// be inserted in a heap (ie, in a sorted manner).
+// - SortFreeList is a bit fragile: any change to FreeListMap (or to
+// MapSpace::free_list_) could break it.
+void MapSpace::SortFreeList() {
+ using LiveBytesPagePair = std::pair<size_t, Page*>;
+ std::vector<LiveBytesPagePair> pages;
+ pages.reserve(CountTotalPages());
+
+ for (Page* p : *this) {
+ free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
+
+ // Sorting by least-allocated-bytes first.
+ std::sort(pages.begin(), pages.end(),
+ [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+ return a.first < b.first;
+ });
+
+ for (LiveBytesPagePair const& p : pages) {
+ // Since AddCategory inserts in head position, it reverts the order produced
+ // by the sort above: least-allocated-bytes will be Added first, and will
+ // therefore be the last element (and the first one will be
+ // most-allocated-bytes).
+ free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
+ }
+}
+
+#ifdef VERIFY_HEAP
+void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/paged-spaces.h b/chromium/v8/src/heap/paged-spaces.h
new file mode 100644
index 00000000000..395ff293433
--- /dev/null
+++ b/chromium/v8/src/heap/paged-spaces.h
@@ -0,0 +1,588 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PAGED_SPACES_H_
+#define V8_HEAP_PAGED_SPACES_H_
+
+#include <memory>
+#include <utility>
+
+#include "src/base/bounds.h"
+#include "src/base/macros.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/heap/allocation-stats.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class HeapObject;
+class Isolate;
+class LocalSpace;
+class OffThreadSpace;
+class ObjectVisitor;
+
+// -----------------------------------------------------------------------------
+// Heap object iterator in old/map spaces.
+//
+// A PagedSpaceObjectIterator iterates objects from the bottom of the given
+// space to its top or from the bottom of the given page to its top.
+//
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects. The caller must create a new
+// iterator in order to be sure to visit these new objects.
+class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
+ public:
+ // Creates a new object iterator in a given space.
+ PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
+ PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
+
+ // Creates a new object iterator in a given off-thread space.
+ explicit PagedSpaceObjectIterator(OffThreadSpace* space);
+
+ // Advance to the next object, skipping free spaces and other fillers and
+ // skipping the special garbage section of which there is one per space.
+ // Returns nullptr when the iteration has ended.
+ inline HeapObject Next() override;
+
+ private:
+ // Fast (inlined) path of next().
+ inline HeapObject FromCurrentPage();
+
+ // Slow path of next(), goes into the next page. Returns false if the
+ // iteration has ended.
+ bool AdvanceToNextPage();
+
+ Address cur_addr_; // Current iteration point.
+ Address cur_end_; // End iteration point.
+ PagedSpace* space_;
+ PageRange page_range_;
+ PageRange::iterator current_page_;
+};
+
+class V8_EXPORT_PRIVATE PagedSpace
+ : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
+ public:
+ using iterator = PageIterator;
+ using const_iterator = ConstPageIterator;
+
+ static const size_t kCompactionMemoryWanted = 500 * KB;
+
+ // Creates a space with an id.
+ PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
+ FreeList* free_list,
+ LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
+
+ ~PagedSpace() override { TearDown(); }
+
+ // Checks whether an object/address is in this space.
+ inline bool Contains(Address a) const;
+ inline bool Contains(Object o) const;
+ bool ContainsSlow(Address addr) const;
+
+ // Does the space need executable memory?
+ Executability executable() { return executable_; }
+
+ // Prepares for a mark-compact GC.
+ void PrepareForMarkCompact();
+
+ // Current capacity without growing (Size() + Available()).
+ size_t Capacity() { return accounting_stats_.Capacity(); }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() override;
+
+ // Sets the capacity, the available space and the wasted space to zero.
+ // The stats are rebuilt during sweeping by adding each page to the
+ // capacity and the size when it is encountered. As free spaces are
+ // discovered during the sweeping they are subtracted from the size and added
+ // to the available and wasted totals. The free list is cleared as well.
+ void ClearAllocatorState() {
+ accounting_stats_.ClearSize();
+ free_list_->Reset();
+ }
+
+ // Available bytes without growing. These are the bytes on the free list.
+ // The bytes in the linear allocation area are not included in this total
+ // because updating the stats would slow down allocation. New pages are
+ // immediately added to the free list so they show up here.
+ size_t Available() override { return free_list_->Available(); }
+
+ // Allocated bytes in this space. Garbage bytes that were not found due to
+ // concurrent sweeping are counted as being allocated! The bytes in the
+ // current linear allocation area (between top and limit) are also counted
+ // here.
+ size_t Size() override { return accounting_stats_.Size(); }
+
+ // As size, but the bytes in lazily swept pages are estimated and the bytes
+ // in the current linear allocation area are not included.
+ size_t SizeOfObjects() override;
+
+ // Wasted bytes in this space. These are just the bytes that were thrown away
+ // due to being too small to use for allocation.
+ virtual size_t Waste() { return free_list_->wasted_bytes(); }
+
+ // Allocate the requested number of bytes in the space if possible, return a
+ // failure object if not.
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space double aligned if
+ // possible, return a failure object if not.
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space and consider allocation
+ // alignment if needed.
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
+
+ // Allocate the requested number of bytes in the space from a background
+ // thread.
+ V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
+ SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
+ if (size_in_bytes == 0) return 0;
+ heap()->CreateFillerObjectAtBackground(
+ start, static_cast<int>(size_in_bytes),
+ ClearFreedMemoryMode::kDontClearFreedMemory);
+ if (mode == SpaceAccountingMode::kSpaceAccounted) {
+ return AccountedFree(start, size_in_bytes);
+ } else {
+ return UnaccountedFree(start, size_in_bytes);
+ }
+ }
+
+ // Give a block of memory to the space's free list. It might be added to
+ // the free list or accounted as waste.
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ size_t AccountedFree(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
+ Page* page = Page::FromAddress(start);
+ accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
+ DCHECK_GE(size_in_bytes, wasted);
+ return size_in_bytes - wasted;
+ }
+
+ size_t UnaccountedFree(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
+ DCHECK_GE(size_in_bytes, wasted);
+ return size_in_bytes - wasted;
+ }
+
+ inline bool TryFreeLast(HeapObject object, int object_size);
+
+ void ResetFreeList();
+
+ // Empty space linear allocation area, returning unused area to free list.
+ void FreeLinearAllocationArea();
+
+ void MakeLinearAllocationAreaIterable();
+
+ void MarkLinearAllocationAreaBlack();
+ void UnmarkLinearAllocationArea();
+
+ void DecreaseAllocatedBytes(size_t bytes, Page* page) {
+ accounting_stats_.DecreaseAllocatedBytes(bytes, page);
+ }
+ void IncreaseAllocatedBytes(size_t bytes, Page* page) {
+ accounting_stats_.IncreaseAllocatedBytes(bytes, page);
+ }
+ void DecreaseCapacity(size_t bytes) {
+ accounting_stats_.DecreaseCapacity(bytes);
+ }
+ void IncreaseCapacity(size_t bytes) {
+ accounting_stats_.IncreaseCapacity(bytes);
+ }
+
+ void RefineAllocatedBytesAfterSweeping(Page* page);
+
+ Page* InitializePage(MemoryChunk* chunk);
+
+ void ReleasePage(Page* page);
+
+ // Adds the page to this space and returns the number of bytes added to the
+ // free list of the space.
+ size_t AddPage(Page* page);
+ void RemovePage(Page* page);
+ // Remove a page if it has at least |size_in_bytes| bytes available that can
+ // be used for allocation.
+ Page* RemovePageSafe(int size_in_bytes);
+
+ void SetReadable();
+ void SetReadAndExecutable();
+ void SetReadAndWritable();
+
+ void SetDefaultCodePermissions() {
+ if (FLAG_jitless) {
+ SetReadable();
+ } else {
+ SetReadAndExecutable();
+ }
+ }
+
+#ifdef VERIFY_HEAP
+ // Verify integrity of this space.
+ virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
+
+ void VerifyLiveBytes();
+
+ // Overridden by subclasses to verify space-specific object
+ // properties (e.g., only maps or free-list nodes are in map space).
+ virtual void VerifyObject(HeapObject obj) {}
+#endif
+
+#ifdef DEBUG
+ void VerifyCountersAfterSweeping(Heap* heap);
+ void VerifyCountersBeforeConcurrentSweeping();
+ // Print meta info and objects in this space.
+ void Print() override;
+
+ // Report code object related statistics
+ static void ReportCodeStatistics(Isolate* isolate);
+ static void ResetCodeStatistics(Isolate* isolate);
+#endif
+
+ bool CanExpand(size_t size);
+
+ // Returns the number of total pages in this space.
+ int CountTotalPages();
+
+ // Return size of allocatable area on a page in this space.
+ inline int AreaSize() { return static_cast<int>(area_size_); }
+
+ bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
+
+ bool is_off_thread_space() {
+ return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
+ }
+
+ bool is_compaction_space() {
+ return base::IsInRange(local_space_kind_,
+ LocalSpaceKind::kFirstCompactionSpace,
+ LocalSpaceKind::kLastCompactionSpace);
+ }
+
+ LocalSpaceKind local_space_kind() { return local_space_kind_; }
+
+ // Merges {other} into the current space. Note that this modifies {other},
+ // e.g., removes its bump pointer area and resets statistics.
+ void MergeLocalSpace(LocalSpace* other);
+
+ // Refills the free list from the corresponding free list filled by the
+ // sweeper.
+ virtual void RefillFreeList();
+
+ base::Mutex* mutex() { return &space_mutex_; }
+
+ inline void UnlinkFreeListCategories(Page* page);
+ inline size_t RelinkFreeListCategories(Page* page);
+
+ Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
+ const Page* first_page() const {
+ return reinterpret_cast<const Page*>(Space::first_page());
+ }
+
+ iterator begin() { return iterator(first_page()); }
+ iterator end() { return iterator(nullptr); }
+
+ const_iterator begin() const { return const_iterator(first_page()); }
+ const_iterator end() const { return const_iterator(nullptr); }
+
+ // Shrink immortal immovable pages of the space to be exactly the size needed
+ // using the high water mark.
+ void ShrinkImmortalImmovablePages();
+
+ size_t ShrinkPageToHighWaterMark(Page* page);
+
+ std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
+
+ void SetLinearAllocationArea(Address top, Address limit);
+
+ private:
+ // Set space linear allocation area.
+ void SetTopAndLimit(Address top, Address limit) {
+ DCHECK(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ BasicMemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.Reset(top, limit);
+ }
+ void DecreaseLimit(Address new_limit);
+ void UpdateInlineAllocationLimit(size_t min_size) override;
+ bool SupportsInlineAllocation() override {
+ return identity() == OLD_SPACE && !is_local_space();
+ }
+
+ protected:
+ // PagedSpaces that should be included in snapshots have different, i.e.,
+ // smaller, initial pages.
+ virtual bool snapshotable() { return true; }
+
+ bool HasPages() { return first_page() != nullptr; }
+
+ // Cleans up the space, frees all pages in this space except those belonging
+ // to the initial chunk, uncommits addresses in the initial chunk.
+ void TearDown();
+
+ // Expands the space by allocating a fixed number of pages. Returns false if
+ // it cannot allocate requested number of pages from OS, or if the hard heap
+ // size limit has been hit.
+ Page* Expand();
+ Page* ExpandBackground(LocalHeap* local_heap);
+ Page* AllocatePage();
+
+ // Sets up a linear allocation area that fits the given number of bytes.
+ // Returns false if there is not enough space and the caller has to retry
+ // after collecting garbage.
+ inline bool EnsureLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin);
+ // Allocates an object from the linear allocation area. Assumes that the
+ // linear allocation area is large enought to fit the object.
+ inline HeapObject AllocateLinearly(int size_in_bytes);
+ // Tries to allocate an aligned object from the linear allocation area.
+ // Returns nullptr if the linear allocation area does not fit the object.
+ // Otherwise, returns the object pointer and writes the allocation size
+ // (object size + alignment filler size) to the size_in_bytes.
+ inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
+ AllocationAlignment alignment);
+
+ V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
+ size_t size_in_bytes, AllocationOrigin origin);
+
+ // If sweeping is still in progress try to sweep unswept pages. If that is
+ // not successful, wait for the sweeper threads and retry free-list
+ // allocation. Returns false if there is not enough space and the caller
+ // has to retry after collecting garbage.
+ V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
+ int size_in_bytes, AllocationOrigin origin);
+
+ V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
+ int max_pages,
+ int size_in_bytes,
+ AllocationOrigin origin);
+
+ // Slow path of AllocateRaw. This function is space-dependent. Returns false
+ // if there is not enough space and the caller has to retry after
+ // collecting garbage.
+ V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin);
+
+ // Implementation of SlowAllocateRaw. Returns false if there is not enough
+ // space and the caller has to retry after collecting garbage.
+ V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin);
+
+ V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
+ TryAllocationFromFreeListBackground(LocalHeap* local_heap,
+ size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationAlignment alignment,
+ AllocationOrigin origin);
+
+ Executability executable_;
+
+ LocalSpaceKind local_space_kind_;
+
+ size_t area_size_;
+
+ // Accounting information for this space.
+ AllocationStats accounting_stats_;
+
+ // Mutex guarding any concurrent access to the space.
+ base::Mutex space_mutex_;
+
+ // Mutex guarding concurrent allocation.
+ base::Mutex allocation_mutex_;
+
+ friend class IncrementalMarking;
+ friend class MarkCompactCollector;
+
+ // Used in cctest.
+ friend class heap::HeapTester;
+};
+
+// -----------------------------------------------------------------------------
+// Base class for compaction space and off-thread space.
+
+class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
+ public:
+ LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
+ LocalSpaceKind local_space_kind)
+ : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
+ local_space_kind) {
+ DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
+ }
+
+ protected:
+ // The space is temporary and not included in any snapshots.
+ bool snapshotable() override { return false; }
+};
+
+// -----------------------------------------------------------------------------
+// Compaction space that is used temporarily during compaction.
+
+class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
+ public:
+ CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
+ LocalSpaceKind local_space_kind)
+ : LocalSpace(heap, id, executable, local_space_kind) {
+ DCHECK(is_compaction_space());
+ }
+
+ protected:
+ V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin) override;
+};
+
+// A collection of |CompactionSpace|s used by a single compaction task.
+class CompactionSpaceCollection : public Malloced {
+ public:
+ explicit CompactionSpaceCollection(Heap* heap,
+ LocalSpaceKind local_space_kind)
+ : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
+ local_space_kind),
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
+ local_space_kind) {}
+
+ CompactionSpace* Get(AllocationSpace space) {
+ switch (space) {
+ case OLD_SPACE:
+ return &old_space_;
+ case CODE_SPACE:
+ return &code_space_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ }
+
+ private:
+ CompactionSpace old_space_;
+ CompactionSpace code_space_;
+};
+
+// -----------------------------------------------------------------------------
+// Old generation regular object space.
+
+class OldSpace : public PagedSpace {
+ public:
+ // Creates an old space object. The constructor does not allocate pages
+ // from OS.
+ explicit OldSpace(Heap* heap)
+ : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
+
+ static bool IsAtPageStart(Address addr) {
+ return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
+ MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ }
+
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
+ if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
+ type == ExternalBackingStoreType::kArrayBuffer)
+ return heap()->OldArrayBufferBytes();
+ return external_backing_store_bytes_[type];
+ }
+};
+
+// -----------------------------------------------------------------------------
+// Old generation code object space.
+
+class CodeSpace : public PagedSpace {
+ public:
+ // Creates an old space object. The constructor does not allocate pages
+ // from OS.
+ explicit CodeSpace(Heap* heap)
+ : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
+};
+
+// -----------------------------------------------------------------------------
+// Old space for all map objects
+
+class MapSpace : public PagedSpace {
+ public:
+ // Creates a map space object.
+ explicit MapSpace(Heap* heap)
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
+
+ int RoundSizeDownToObjectAlignment(int size) override {
+ if (base::bits::IsPowerOfTwo(Map::kSize)) {
+ return RoundDown(size, Map::kSize);
+ } else {
+ return (size / Map::kSize) * Map::kSize;
+ }
+ }
+
+ void SortFreeList();
+
+#ifdef VERIFY_HEAP
+ void VerifyObject(HeapObject obj) override;
+#endif
+};
+
+// -----------------------------------------------------------------------------
+// Off-thread space that is used for folded allocation on a different thread.
+
+class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
+ public:
+ explicit OffThreadSpace(Heap* heap)
+ : LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
+ LocalSpaceKind::kOffThreadSpace) {
+#ifdef V8_ENABLE_THIRD_PARTY_HEAP
+ // OffThreadSpace doesn't work with third-party heap.
+ UNREACHABLE();
+#endif
+ }
+
+ protected:
+ V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
+ int size_in_bytes, AllocationOrigin origin) override;
+
+ void RefillFreeList() override;
+};
+
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space or to evacuation candidates.
+class OldGenerationMemoryChunkIterator {
+ public:
+ inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
+
+ // Return nullptr when the iterator is done.
+ inline MemoryChunk* next();
+
+ private:
+ enum State {
+ kOldSpaceState,
+ kMapState,
+ kCodeState,
+ kLargeObjectState,
+ kCodeLargeObjectState,
+ kFinishedState
+ };
+ Heap* heap_;
+ State state_;
+ PageIterator old_iterator_;
+ PageIterator code_iterator_;
+ PageIterator map_iterator_;
+ LargePageIterator lo_iterator_;
+ LargePageIterator code_lo_iterator_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PAGED_SPACES_H_
diff --git a/chromium/v8/src/heap/read-only-heap.cc b/chromium/v8/src/heap/read-only-heap.cc
index e2387984ccd..5bea259e7de 100644
--- a/chromium/v8/src/heap/read-only-heap.cc
+++ b/chromium/v8/src/heap/read-only-heap.cc
@@ -10,6 +10,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/lsan.h"
#include "src/base/platform/mutex.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-spaces.h"
@@ -137,7 +138,7 @@ ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(
void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
DCHECK(!init_complete_);
- read_only_space_->ShrinkImmortalImmovablePages();
+ read_only_space_->ShrinkPages();
#ifdef V8_SHARED_RO_HEAP
std::shared_ptr<ReadOnlyArtifacts> artifacts(*read_only_artifacts_.Pointer());
read_only_space()->DetachPagesAndAddToArtifacts(artifacts);
@@ -174,7 +175,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
if (artifacts) {
auto ro_space = artifacts->shared_read_only_space();
statistics->read_only_space_size_ = ro_space->CommittedMemory();
- statistics->read_only_space_used_size_ = ro_space->SizeOfObjects();
+ statistics->read_only_space_used_size_ = ro_space->Size();
statistics->read_only_space_physical_size_ =
ro_space->CommittedPhysicalMemory();
}
@@ -183,7 +184,7 @@ void ReadOnlyHeap::PopulateReadOnlySpaceStatistics(
// static
bool ReadOnlyHeap::Contains(Address address) {
- return MemoryChunk::FromAddress(address)->InReadOnlySpace();
+ return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace();
}
// static
@@ -191,7 +192,7 @@ bool ReadOnlyHeap::Contains(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return third_party_heap::Heap::InReadOnlySpace(object.address());
} else {
- return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
+ return BasicMemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
}
@@ -214,30 +215,33 @@ ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
: ro_space_(ro_space),
- current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL ? nullptr
- : ro_space->first_page()),
+ current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
+ ? std::vector<ReadOnlyPage*>::iterator()
+ : ro_space->pages().begin()),
current_addr_(V8_ENABLE_THIRD_PARTY_HEAP_BOOL
? Address()
- : current_page_->area_start()) {}
+ : (*current_page_)->area_start()) {}
HeapObject ReadOnlyHeapObjectIterator::Next() {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
return HeapObject(); // Unsupported
}
- if (current_page_ == nullptr) {
+ if (current_page_ == ro_space_->pages().end()) {
return HeapObject();
}
+ BasicMemoryChunk* current_page = *current_page_;
for (;;) {
- DCHECK_LE(current_addr_, current_page_->area_end());
- if (current_addr_ == current_page_->area_end()) {
+ DCHECK_LE(current_addr_, current_page->area_end());
+ if (current_addr_ == current_page->area_end()) {
// Progress to the next page.
- current_page_ = current_page_->next_page();
- if (current_page_ == nullptr) {
+ ++current_page_;
+ if (current_page_ == ro_space_->pages().end()) {
return HeapObject();
}
- current_addr_ = current_page_->area_start();
+ current_page = *current_page_;
+ current_addr_ = current_page->area_start();
}
if (current_addr_ == ro_space_->top() &&
diff --git a/chromium/v8/src/heap/read-only-heap.h b/chromium/v8/src/heap/read-only-heap.h
index ed105211296..548f73bfbbf 100644
--- a/chromium/v8/src/heap/read-only-heap.h
+++ b/chromium/v8/src/heap/read-only-heap.h
@@ -7,6 +7,7 @@
#include <memory>
#include <utility>
+#include <vector>
#include "src/base/macros.h"
#include "src/base/optional.h"
@@ -20,10 +21,12 @@ class SharedMemoryStatistics;
namespace internal {
+class BasicMemoryChunk;
class Isolate;
class Page;
class ReadOnlyArtifacts;
class ReadOnlyDeserializer;
+class ReadOnlyPage;
class ReadOnlySpace;
// This class transparently manages read-only space, roots and cache creation
@@ -116,7 +119,7 @@ class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
private:
ReadOnlySpace* const ro_space_;
- Page* current_page_;
+ std::vector<ReadOnlyPage*>::const_iterator current_page_;
Address current_addr_;
};
diff --git a/chromium/v8/src/heap/read-only-spaces.cc b/chromium/v8/src/heap/read-only-spaces.cc
index a2e72952580..a88753edf99 100644
--- a/chromium/v8/src/heap/read-only-spaces.cc
+++ b/chromium/v8/src/heap/read-only-spaces.cc
@@ -4,10 +4,14 @@
#include "src/heap/read-only-spaces.h"
+#include "include/v8-internal.h"
#include "src/base/lsan.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/objects-inl.h"
@@ -20,20 +24,29 @@ namespace internal {
// ReadOnlySpace implementation
ReadOnlySpace::ReadOnlySpace(Heap* heap)
- : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
- is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
+ : BaseSpace(heap, RO_SPACE),
+ top_(kNullAddress),
+ limit_(kNullAddress),
+ is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()),
+ capacity_(0),
+ area_size_(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE)) {}
+
+ReadOnlySpace::~ReadOnlySpace() {
+ Unseal();
+ for (ReadOnlyPage* chunk : pages_) {
+ heap()->memory_allocator()->FreeReadOnlyPage(chunk);
+ }
+ pages_.resize(0);
+ accounting_stats_.Clear();
}
ReadOnlyArtifacts::~ReadOnlyArtifacts() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- MemoryChunk* next_chunk;
- for (MemoryChunk* chunk = pages_.front(); chunk != nullptr;
- chunk = next_chunk) {
+ for (ReadOnlyPage* chunk : pages_) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
page_allocator->SetPermissions(chunk_address, chunk->size(),
PageAllocator::kReadWrite);
- next_chunk = chunk->list_node().next();
size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
CHECK(page_allocator->FreePages(chunk_address, size));
}
@@ -45,17 +58,19 @@ void ReadOnlyArtifacts::set_read_only_heap(
}
SharedReadOnlySpace::~SharedReadOnlySpace() {
- // Clear the memory chunk list before the space is deleted, so that the
- // inherited destructors don't try to destroy the MemoryChunks themselves.
- memory_chunk_list_ = heap::List<MemoryChunk>();
+ // Clear the chunk list before the space is deleted, so that the inherited
+ // destructors don't try to destroy the BasicMemoryChunks themselves.
+ pages_.resize(0);
}
SharedReadOnlySpace::SharedReadOnlySpace(
Heap* heap, std::shared_ptr<ReadOnlyArtifacts> artifacts)
: ReadOnlySpace(heap) {
- artifacts->pages().ShallowCopyTo(&memory_chunk_list_);
+ pages_ = artifacts->pages();
is_marked_read_only_ = true;
accounting_stats_ = artifacts->accounting_stats();
+ top_ = kNullAddress;
+ limit_ = kNullAddress;
}
void ReadOnlySpace::DetachPagesAndAddToArtifacts(
@@ -63,14 +78,13 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
Heap* heap = ReadOnlySpace::heap();
Seal(SealMode::kDetachFromHeapAndForget);
artifacts->set_accounting_stats(accounting_stats_);
- artifacts->TransferPages(std::move(memory_chunk_list_));
+ artifacts->TransferPages(std::move(pages_));
artifacts->set_shared_read_only_space(
std::make_unique<SharedReadOnlySpace>(heap, artifacts));
heap->ReplaceReadOnlySpace(artifacts->shared_read_only_space());
}
void ReadOnlyPage::MakeHeaderRelocatable() {
- ReleaseAllocatedMemoryNeededForWritableChunk();
// Detached read-only space needs to have a valid marking bitmap. Instruct
// Lsan to ignore it if required.
LSAN_IGNORE_OBJECT(marking_bitmap_);
@@ -80,12 +94,13 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
PageAllocator::Permission access) {
- for (Page* p : *this) {
+ for (BasicMemoryChunk* chunk : pages_) {
// Read only pages don't have valid reservation object so we get proper
// page allocator manually.
v8::PageAllocator* page_allocator =
- memory_allocator->page_allocator(p->executable());
- CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
+ memory_allocator->page_allocator(NOT_EXECUTABLE);
+ CHECK(SetPermissions(page_allocator, chunk->address(), chunk->size(),
+ access));
}
}
@@ -93,27 +108,20 @@ void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
// fix them.
-void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
- free_list_->RepairLists(heap());
+void ReadOnlySpace::RepairFreeSpacesAfterDeserialization() {
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
// Each page may have a small free space that is not tracked by a free list.
// Those free spaces still contain null as their map pointer.
// Overwrite them with new fillers.
- for (Page* page : *this) {
- int size = static_cast<int>(page->wasted_memory());
- if (size == 0) {
- // If there is no wasted memory then all free space is in the free list.
- continue;
- }
- Address start = page->HighWaterMark();
- Address end = page->area_end();
- if (start < end - size) {
- // A region at the high watermark is already in free list.
- HeapObject filler = HeapObject::FromAddress(start);
- CHECK(filler.IsFreeSpaceOrFiller());
- start += filler.Size();
+ for (BasicMemoryChunk* chunk : pages_) {
+ Address start = chunk->HighWaterMark();
+ Address end = chunk->area_end();
+ // Put a filler object in the gap between the end of the allocated objects
+ // and the end of the allocatable area.
+ if (start < end) {
+ heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
+ ClearRecordedSlots::kNo);
}
- CHECK_EQ(size, static_cast<int>(end - start));
- heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
}
}
@@ -144,29 +152,374 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
if (ro_mode == SealMode::kDetachFromHeapAndForget) {
DetachFromHeap();
- for (Page* p : *this) {
- memory_allocator->UnregisterMemory(p);
- static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
- }
- } else {
- for (Page* p : *this) {
- p->ReleaseAllocatedMemoryNeededForWritableChunk();
+ for (BasicMemoryChunk* chunk : pages_) {
+ memory_allocator->UnregisterMemory(chunk);
+ static_cast<ReadOnlyPage*>(chunk)->MakeHeaderRelocatable();
}
}
- free_list_.reset();
-
SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
}
void ReadOnlySpace::Unseal() {
DCHECK(is_marked_read_only_);
- if (HasPages()) {
+ if (!pages_.empty()) {
SetPermissionsForPages(heap()->memory_allocator(),
PageAllocator::kReadWrite);
}
is_marked_read_only_ = false;
}
+bool ReadOnlySpace::ContainsSlow(Address addr) {
+ BasicMemoryChunk* c = BasicMemoryChunk::FromAddress(addr);
+ for (BasicMemoryChunk* chunk : pages_) {
+ if (chunk == c) return true;
+ }
+ return false;
+}
+
+namespace {
+// Only iterates over a single chunk as the chunk iteration is done externally.
+class ReadOnlySpaceObjectIterator : public ObjectIterator {
+ public:
+ ReadOnlySpaceObjectIterator(Heap* heap, ReadOnlySpace* space,
+ BasicMemoryChunk* chunk)
+ : cur_addr_(kNullAddress), cur_end_(kNullAddress), space_(space) {}
+
+ // Advance to the next object, skipping free spaces and other fillers and
+ // skipping the special garbage section of which there is one per space.
+ // Returns nullptr when the iteration has ended.
+ HeapObject Next() override {
+ HeapObject next_obj = FromCurrentPage();
+ if (!next_obj.is_null()) return next_obj;
+ return HeapObject();
+ }
+
+ private:
+ HeapObject FromCurrentPage() {
+ while (cur_addr_ != cur_end_) {
+ if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+ cur_addr_ = space_->limit();
+ continue;
+ }
+ HeapObject obj = HeapObject::FromAddress(cur_addr_);
+ const int obj_size = obj.Size();
+ cur_addr_ += obj_size;
+ DCHECK_LE(cur_addr_, cur_end_);
+ if (!obj.IsFreeSpaceOrFiller()) {
+ if (obj.IsCode()) {
+ DCHECK(Code::cast(obj).is_builtin());
+ DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+ } else {
+ DCHECK_OBJECT_SIZE(obj_size);
+ }
+ return obj;
+ }
+ }
+ return HeapObject();
+ }
+
+ Address cur_addr_; // Current iteration point.
+ Address cur_end_; // End iteration point.
+ ReadOnlySpace* space_;
+};
+} // namespace
+
+#ifdef VERIFY_HEAP
+namespace {
+class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
+ public:
+ explicit VerifyReadOnlyPointersVisitor(Heap* heap)
+ : VerifyPointersVisitor(heap) {}
+
+ protected:
+ void VerifyPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ if (!host.is_null()) {
+ CHECK(ReadOnlyHeap::Contains(host.map()));
+ }
+ VerifyPointersVisitor::VerifyPointers(host, start, end);
+
+ for (MaybeObjectSlot current = start; current < end; ++current) {
+ HeapObject heap_object;
+ if ((*current)->GetHeapObject(&heap_object)) {
+ CHECK(ReadOnlyHeap::Contains(heap_object));
+ }
+ }
+ }
+};
+} // namespace
+
+void ReadOnlySpace::Verify(Isolate* isolate) {
+ bool allocation_pointer_found_in_space = top_ == limit_;
+ VerifyReadOnlyPointersVisitor visitor(isolate->heap());
+
+ for (BasicMemoryChunk* page : pages_) {
+#ifdef V8_SHARED_RO_HEAP
+ CHECK_NULL(page->owner());
+#else
+ CHECK_EQ(page->owner(), this);
+#endif
+
+ if (page == Page::FromAllocationAreaAddress(top_)) {
+ allocation_pointer_found_in_space = true;
+ }
+ ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
+ Address end_of_previous_object = page->area_start();
+ Address top = page->area_end();
+
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ CHECK(end_of_previous_object <= object.address());
+
+ Map map = object.map();
+ CHECK(map.IsMap());
+
+ // The object itself should look OK.
+ object.ObjectVerify(isolate);
+
+ // All the interior pointers should be contained in the heap.
+ int size = object.Size();
+ object.IterateBody(map, size, &visitor);
+ CHECK(object.address() + size <= top);
+ end_of_previous_object = object.address() + size;
+
+ CHECK(!object.IsExternalString());
+ CHECK(!object.IsJSArrayBuffer());
+ }
+ }
+ CHECK(allocation_pointer_found_in_space);
+
+#ifdef DEBUG
+ VerifyCounters(isolate->heap());
+#endif
+}
+
+#ifdef DEBUG
+void ReadOnlySpace::VerifyCounters(Heap* heap) {
+ size_t total_capacity = 0;
+ size_t total_allocated = 0;
+ for (BasicMemoryChunk* page : pages_) {
+ total_capacity += page->area_size();
+ ReadOnlySpaceObjectIterator it(heap, this, page);
+ size_t real_allocated = 0;
+ for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
+ if (!object.IsFreeSpaceOrFiller()) {
+ real_allocated += object.Size();
+ }
+ }
+ total_allocated += page->allocated_bytes();
+ // The real size can be smaller than the accounted size if array trimming,
+ // object slack tracking happened after sweeping.
+ DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
+ DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
+ }
+ DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
+ DCHECK_EQ(total_allocated, accounting_stats_.Size());
+}
+#endif // DEBUG
+#endif // VERIFY_HEAP
+
+size_t ReadOnlySpace::CommittedPhysicalMemory() {
+ if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
+ size_t size = 0;
+ for (auto* chunk : pages_) {
+ size += chunk->size();
+ }
+
+ return size;
+}
+
+void ReadOnlySpace::FreeLinearAllocationArea() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ if (top_ == kNullAddress) {
+ DCHECK_EQ(kNullAddress, limit_);
+ return;
+ }
+
+ // Clear the bits in the unused black area.
+ ReadOnlyPage* page = pages_.back();
+ heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
+ page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
+
+ heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
+ ClearRecordedSlots::kNo);
+
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
+
+ top_ = kNullAddress;
+ limit_ = kNullAddress;
+}
+
+void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
+ if (top_ + size_in_bytes <= limit_) {
+ return;
+ }
+
+ DCHECK_GE(size_in_bytes, 0);
+
+ FreeLinearAllocationArea();
+
+ BasicMemoryChunk* chunk =
+ heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
+ capacity_ += AreaSize();
+
+ accounting_stats_.IncreaseCapacity(chunk->area_size());
+ AccountCommitted(chunk->size());
+ CHECK_NOT_NULL(chunk);
+ pages_.push_back(static_cast<ReadOnlyPage*>(chunk));
+
+ heap()->CreateFillerObjectAt(chunk->area_start(),
+ static_cast<int>(chunk->area_size()),
+ ClearRecordedSlots::kNo);
+
+ top_ = chunk->area_start();
+ limit_ = chunk->area_end();
+ return;
+}
+
+HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
+ int size_in_bytes, AllocationAlignment alignment) {
+ Address current_top = top_;
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+ Address new_top = current_top + filler_size + size_in_bytes;
+ if (new_top > limit_) return HeapObject();
+
+ top_ = new_top;
+ if (filler_size > 0) {
+ return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
+ HeapObject::FromAddress(current_top),
+ filler_size);
+ }
+
+ // Allocation always occurs in the last chunk for RO_SPACE.
+ BasicMemoryChunk* chunk = pages_.back();
+ int allocated_size = filler_size + size_in_bytes;
+ accounting_stats_.IncreaseAllocatedBytes(allocated_size, chunk);
+ chunk->IncreaseAllocatedBytes(allocated_size);
+
+ return HeapObject::FromAddress(current_top);
+}
+
+AllocationResult ReadOnlySpace::AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment) {
+ DCHECK(!IsDetached());
+ int allocation_size = size_in_bytes;
+
+ HeapObject object = TryAllocateLinearlyAligned(allocation_size, alignment);
+ if (object.is_null()) {
+ // We don't know exactly how much filler we need to align until space is
+ // allocated, so assume the worst case.
+ EnsureSpaceForAllocation(allocation_size +
+ Heap::GetMaximumFillToAlign(alignment));
+ allocation_size = size_in_bytes;
+ object = TryAllocateLinearlyAligned(size_in_bytes, alignment);
+ CHECK(!object.is_null());
+ }
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ return object;
+}
+
+AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
+ DCHECK(!IsDetached());
+ EnsureSpaceForAllocation(size_in_bytes);
+ Address current_top = top_;
+ Address new_top = current_top + size_in_bytes;
+ DCHECK_LE(new_top, limit_);
+ top_ = new_top;
+ HeapObject object = HeapObject::FromAddress(current_top);
+
+ DCHECK(!object.is_null());
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ // Allocation always occurs in the last chunk for RO_SPACE.
+ BasicMemoryChunk* chunk = pages_.back();
+ accounting_stats_.IncreaseAllocatedBytes(size_in_bytes, chunk);
+ chunk->IncreaseAllocatedBytes(size_in_bytes);
+
+ return object;
+}
+
+AllocationResult ReadOnlySpace::AllocateRaw(size_t size_in_bytes,
+ AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+ AllocationResult result = alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment)
+ : AllocateRawUnaligned(size_in_bytes);
+#else
+ AllocationResult result =
+ AllocateRawUnaligned(static_cast<int>(size_in_bytes));
+#endif
+ HeapObject heap_obj;
+ if (!result.IsRetry() && result.To(&heap_obj)) {
+ DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ }
+ return result;
+}
+
+size_t ReadOnlyPage::ShrinkToHighWaterMark() {
+ // Shrink pages to high water mark. The water mark points either to a filler
+ // or the area_end.
+ HeapObject filler = HeapObject::FromAddress(HighWaterMark());
+ if (filler.address() == area_end()) return 0;
+ CHECK(filler.IsFreeSpaceOrFiller());
+ DCHECK_EQ(filler.address() + filler.Size(), area_end());
+
+ size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
+ MemoryAllocator::GetCommitPageSize());
+ if (unused > 0) {
+ DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
+ if (FLAG_trace_gc_verbose) {
+ PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
+ reinterpret_cast<void*>(this),
+ reinterpret_cast<void*>(area_end()),
+ reinterpret_cast<void*>(area_end() - unused));
+ }
+ heap()->CreateFillerObjectAt(
+ filler.address(),
+ static_cast<int>(area_end() - filler.address() - unused),
+ ClearRecordedSlots::kNo);
+ heap()->memory_allocator()->PartialFreeMemory(
+ this, address() + size() - unused, unused, area_end() - unused);
+ if (filler.address() != area_end()) {
+ CHECK(filler.IsFreeSpaceOrFiller());
+ CHECK_EQ(filler.address() + filler.Size(), area_end());
+ }
+ }
+ return unused;
+}
+
+void ReadOnlySpace::ShrinkPages() {
+ BasicMemoryChunk::UpdateHighWaterMark(top_);
+ heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_),
+ ClearRecordedSlots::kNo);
+
+ for (ReadOnlyPage* chunk : pages_) {
+ DCHECK(chunk->IsFlagSet(Page::NEVER_EVACUATE));
+ size_t unused = chunk->ShrinkToHighWaterMark();
+ capacity_ -= unused;
+ accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
+ AccountUncommitted(unused);
+ }
+ limit_ = pages_.back()->area_end();
+}
+
+ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
+ ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
+ page->allocated_bytes_ = 0;
+ page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
+ heap()
+ ->incremental_marking()
+ ->non_atomic_marking_state()
+ ->bitmap(chunk)
+ ->MarkAllBits();
+ chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
+
+ return page;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/read-only-spaces.h b/chromium/v8/src/heap/read-only-spaces.h
index dd82182b7f6..ae2e6859440 100644
--- a/chromium/v8/src/heap/read-only-spaces.h
+++ b/chromium/v8/src/heap/read-only-spaces.h
@@ -10,21 +10,27 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
+#include "src/common/globals.h"
+#include "src/heap/allocation-stats.h"
+#include "src/heap/base-space.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/list.h"
#include "src/heap/memory-chunk.h"
-#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
+class MemoryAllocator;
class ReadOnlyHeap;
-class ReadOnlyPage : public Page {
+class ReadOnlyPage : public BasicMemoryChunk {
public:
// Clears any pointers in the header that point out of the page that would
// otherwise make the header non-relocatable.
void MakeHeaderRelocatable();
+ size_t ShrinkToHighWaterMark();
+
private:
friend class ReadOnlySpace;
};
@@ -45,8 +51,8 @@ class ReadOnlyArtifacts {
return shared_read_only_space_.get();
}
- heap::List<MemoryChunk>& pages() { return pages_; }
- void TransferPages(heap::List<MemoryChunk>&& pages) {
+ std::vector<ReadOnlyPage*>& pages() { return pages_; }
+ void TransferPages(std::vector<ReadOnlyPage*>&& pages) {
pages_ = std::move(pages);
}
@@ -56,7 +62,7 @@ class ReadOnlyArtifacts {
ReadOnlyHeap* read_only_heap() { return read_only_heap_.get(); }
private:
- heap::List<MemoryChunk> pages_;
+ std::vector<ReadOnlyPage*> pages_;
AllocationStats stats_;
std::unique_ptr<SharedReadOnlySpace> shared_read_only_space_;
std::unique_ptr<ReadOnlyHeap> read_only_heap_;
@@ -64,22 +70,28 @@ class ReadOnlyArtifacts {
// -----------------------------------------------------------------------------
// Read Only space for all Immortal Immovable and Immutable objects
-class ReadOnlySpace : public PagedSpace {
+class ReadOnlySpace : public BaseSpace {
public:
- explicit ReadOnlySpace(Heap* heap);
+ V8_EXPORT_PRIVATE explicit ReadOnlySpace(Heap* heap);
// Detach the pages and them to artifacts for using in creating a
// SharedReadOnlySpace.
void DetachPagesAndAddToArtifacts(
std::shared_ptr<ReadOnlyArtifacts> artifacts);
- ~ReadOnlySpace() override { Unseal(); }
+ V8_EXPORT_PRIVATE ~ReadOnlySpace() override;
+
+ bool IsDetached() const { return heap_ == nullptr; }
bool writable() const { return !is_marked_read_only_; }
bool Contains(Address a) = delete;
bool Contains(Object o) = delete;
+ V8_EXPORT_PRIVATE
+ AllocationResult AllocateRaw(size_t size_in_bytes,
+ AllocationAlignment alignment);
+
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
@@ -87,13 +99,35 @@ class ReadOnlySpace : public PagedSpace {
// Seal the space by marking it read-only, optionally detaching it
// from the heap and forgetting it for memory bookkeeping purposes (e.g.
// prevent space's memory from registering as leaked).
- void Seal(SealMode ro_mode);
+ V8_EXPORT_PRIVATE void Seal(SealMode ro_mode);
// During boot the free_space_map is created, and afterwards we may need
- // to write it into the free list nodes that were already created.
- void RepairFreeListsAfterDeserialization();
+ // to write it into the free space nodes that were already created.
+ void RepairFreeSpacesAfterDeserialization();
+
+ size_t Size() override { return accounting_stats_.Size(); }
+ V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory() override;
+
+ const std::vector<ReadOnlyPage*>& pages() const { return pages_; }
+ Address top() const { return top_; }
+ Address limit() const { return limit_; }
+ size_t Capacity() const { return capacity_; }
+
+ bool ContainsSlow(Address addr);
+ V8_EXPORT_PRIVATE void ShrinkPages();
+#ifdef VERIFY_HEAP
+ void Verify(Isolate* isolate);
+#ifdef DEBUG
+ void VerifyCounters(Heap* heap);
+#endif // DEBUG
+#endif // VERIFY_HEAP
+
+ // Return size of allocatable area on a page in this space.
+ int AreaSize() { return static_cast<int>(area_size_); }
+
+ ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
- size_t Available() override { return 0; }
+ Address FirstPageAddress() const { return pages_.front()->address(); }
protected:
void SetPermissionsForPages(MemoryAllocator* memory_allocator,
@@ -101,16 +135,36 @@ class ReadOnlySpace : public PagedSpace {
bool is_marked_read_only_ = false;
+ // Accounting information for this space.
+ AllocationStats accounting_stats_;
+
+ std::vector<ReadOnlyPage*> pages_;
+
+ Address top_;
+ Address limit_;
+
private:
- // Unseal the space after is has been sealed, by making it writable.
- // TODO(v8:7464): Only possible if the space hasn't been detached.
+ // Unseal the space after it has been sealed, by making it writable.
void Unseal();
- //
- // String padding must be cleared just before serialization and therefore the
- // string padding in the space will already have been cleared if the space was
- // deserialized.
+ void DetachFromHeap() { heap_ = nullptr; }
+
+ AllocationResult AllocateRawUnaligned(int size_in_bytes);
+ AllocationResult AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment);
+
+ HeapObject TryAllocateLinearlyAligned(int size_in_bytes,
+ AllocationAlignment alignment);
+ void EnsureSpaceForAllocation(int size_in_bytes);
+ void FreeLinearAllocationArea();
+
+ // String padding must be cleared just before serialization and therefore
+ // the string padding in the space will already have been cleared if the
+ // space was deserialized.
bool is_string_padding_cleared_;
+
+ size_t capacity_;
+ const size_t area_size_;
};
class SharedReadOnlySpace : public ReadOnlySpace {
diff --git a/chromium/v8/src/heap/remembered-set-inl.h b/chromium/v8/src/heap/remembered-set-inl.h
index 034e98a06fb..3790ed9e712 100644
--- a/chromium/v8/src/heap/remembered-set-inl.h
+++ b/chromium/v8/src/heap/remembered-set-inl.h
@@ -5,437 +5,53 @@
#ifndef V8_HEAP_REMEMBERED_SET_INL_H_
#define V8_HEAP_REMEMBERED_SET_INL_H_
-#include <memory>
-
-#include "src/base/bounds.h"
-#include "src/base/memory.h"
-#include "src/codegen/reloc-info.h"
-#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
-#include "src/heap/heap.h"
-#include "src/heap/memory-chunk.h"
-#include "src/heap/slot-set.h"
-#include "src/heap/spaces.h"
-#include "src/heap/worklist.h"
+#include "src/heap/remembered-set.h"
namespace v8 {
namespace internal {
-enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
-
-class RememberedSetOperations {
- public:
- // Given a page and a slot in that page, this function adds the slot to the
- // remembered set.
- template <AccessMode access_mode>
- static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- uintptr_t offset = slot_addr - chunk->address();
- slot_set->Insert<access_mode>(offset);
- }
-
- template <typename Callback>
- static int Iterate(SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
- SlotSet::EmptyBucketMode mode) {
- int slots = 0;
- if (slot_set != nullptr) {
- slots += slot_set->Iterate(chunk->address(), 0, chunk->buckets(),
- callback, mode);
+template <typename Callback>
+SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
+ SlotType slot_type,
+ Address addr,
+ Callback callback) {
+ switch (slot_type) {
+ case CODE_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
+ return UpdateCodeTarget(&rinfo, callback);
}
- return slots;
- }
-
- static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
- if (slot_set != nullptr) {
- uintptr_t offset = slot_addr - chunk->address();
- slot_set->Remove(offset);
- }
- }
-
- static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start,
- Address end, SlotSet::EmptyBucketMode mode) {
- if (slot_set != nullptr) {
- uintptr_t start_offset = start - chunk->address();
- uintptr_t end_offset = end - chunk->address();
- DCHECK_LT(start_offset, end_offset);
- slot_set->RemoveRange(static_cast<int>(start_offset),
- static_cast<int>(end_offset), chunk->buckets(),
- mode);
- }
- }
-
- static void CheckNoneInRange(SlotSet* slot_set, MemoryChunk* chunk,
- Address start, Address end) {
- if (slot_set != nullptr) {
- size_t start_bucket = SlotSet::BucketForSlot(start - chunk->address());
- // Both 'end' and 'end_bucket' are exclusive limits, so do some index
- // juggling to make sure we get the right bucket even if the end address
- // is at the start of a bucket.
- size_t end_bucket =
- SlotSet::BucketForSlot(end - chunk->address() - kTaggedSize) + 1;
- slot_set->Iterate(
- chunk->address(), start_bucket, end_bucket,
- [start, end](MaybeObjectSlot slot) {
- CHECK(!base::IsInRange(slot.address(), start, end + 1));
- return KEEP_SLOT;
- },
- SlotSet::KEEP_EMPTY_BUCKETS);
+ case CODE_ENTRY_SLOT: {
+ return UpdateCodeEntry(addr, callback);
}
- }
-};
-
-// TODO(ulan): Investigate performance of de-templatizing this class.
-template <RememberedSetType type>
-class RememberedSet : public AllStatic {
- public:
- // Given a page and a slot in that page, this function adds the slot to the
- // remembered set.
- template <AccessMode access_mode>
- static void Insert(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type, access_mode>();
- if (slot_set == nullptr) {
- slot_set = chunk->AllocateSlotSet<type>();
+ case COMPRESSED_EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
+ return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
- }
-
- // Given a page and a slot in that page, this function returns true if
- // the remembered set contains the slot.
- static bool Contains(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
- if (slot_set == nullptr) {
- return false;
+ case FULL_EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
+ return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- uintptr_t offset = slot_addr - chunk->address();
- return slot_set->Contains(offset);
- }
-
- static void CheckNoneInRange(MemoryChunk* chunk, Address start, Address end) {
- SlotSet* slot_set = chunk->slot_set<type>();
- RememberedSetOperations::CheckNoneInRange(slot_set, chunk, start, end);
- }
-
- // Given a page and a slot in that page, this function removes the slot from
- // the remembered set.
- // If the slot was never added, then the function does nothing.
- static void Remove(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->slot_set<type>();
- RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
- }
-
- // Given a page and a range of slots in that page, this function removes the
- // slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->slot_set<type>();
- RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
- }
-
- // Iterates and filters the remembered set with the given callback.
- // The callback should take (Address slot) and return SlotCallbackResult.
- template <typename Callback>
- static void Iterate(Heap* heap, RememberedSetIterationMode mode,
- Callback callback) {
- IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
- if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
- Iterate(chunk, callback);
- if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
- });
- }
-
- // Iterates over all memory chunks that contains non-empty slot sets.
- // The callback should take (MemoryChunk* chunk) and return void.
- template <typename Callback>
- static void IterateMemoryChunks(Heap* heap, Callback callback) {
- OldGenerationMemoryChunkIterator it(heap);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != nullptr) {
- SlotSet* slot_set = chunk->slot_set<type>();
- SlotSet* sweeping_slot_set =
- type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
- TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
- if (slot_set != nullptr || sweeping_slot_set != nullptr ||
- typed_slot_set != nullptr ||
- chunk->invalidated_slots<type>() != nullptr) {
- callback(chunk);
+ case COMPRESSED_OBJECT_SLOT: {
+ HeapObject old_target = HeapObject::cast(Object(
+ DecompressTaggedAny(heap->isolate(), base::Memory<Tagged_t>(addr))));
+ HeapObject new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
+ if (new_target != old_target) {
+ base::Memory<Tagged_t>(addr) = CompressTagged(new_target.ptr());
}
+ return result;
}
- }
-
- // Iterates and filters the remembered set in the given memory chunk with
- // the given callback. The callback should take (Address slot) and return
- // SlotCallbackResult.
- //
- // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
- // threads concurrently inserting slots.
- template <typename Callback>
- static int Iterate(MemoryChunk* chunk, Callback callback,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->slot_set<type>();
- return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
- }
-
- template <typename Callback>
- static int IterateAndTrackEmptyBuckets(
- MemoryChunk* chunk, Callback callback,
- Worklist<MemoryChunk*, 64>::View empty_chunks) {
- SlotSet* slot_set = chunk->slot_set<type>();
- int slots = 0;
- if (slot_set != nullptr) {
- PossiblyEmptyBuckets* possibly_empty_buckets =
- chunk->possibly_empty_buckets();
- slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
- chunk->buckets(), callback,
- possibly_empty_buckets);
- if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
- }
- return slots;
- }
-
- static void FreeEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- SlotSet* slot_set = chunk->slot_set<type>();
- if (slot_set != nullptr && slot_set->FreeEmptyBuckets(chunk->buckets())) {
- chunk->ReleaseSlotSet<type>();
- }
- }
-
- static bool CheckPossiblyEmptyBuckets(MemoryChunk* chunk) {
- DCHECK(type == OLD_TO_NEW);
- SlotSet* slot_set = chunk->slot_set<type, AccessMode::NON_ATOMIC>();
- if (slot_set != nullptr &&
- slot_set->CheckPossiblyEmptyBuckets(chunk->buckets(),
- chunk->possibly_empty_buckets())) {
- chunk->ReleaseSlotSet<type>();
- return true;
- }
-
- return false;
- }
-
- // Given a page and a typed slot in that page, this function adds the slot
- // to the remembered set.
- static void InsertTyped(MemoryChunk* memory_chunk, SlotType slot_type,
- uint32_t offset) {
- TypedSlotSet* slot_set = memory_chunk->typed_slot_set<type>();
- if (slot_set == nullptr) {
- slot_set = memory_chunk->AllocateTypedSlotSet<type>();
- }
- slot_set->Insert(slot_type, offset);
- }
-
- static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> other) {
- TypedSlotSet* slot_set = page->typed_slot_set<type>();
- if (slot_set == nullptr) {
- slot_set = page->AllocateTypedSlotSet<type>();
+ case FULL_OBJECT_SLOT: {
+ return callback(FullMaybeObjectSlot(addr));
}
- slot_set->Merge(other.get());
- }
-
- // Given a page and a range of typed slots in that page, this function removes
- // the slots from the remembered set.
- static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
- TypedSlotSet* slot_set = page->typed_slot_set<type>();
- if (slot_set != nullptr) {
- slot_set->Iterate(
- [=](SlotType slot_type, Address slot_addr) {
- return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
- : KEEP_SLOT;
- },
- TypedSlotSet::FREE_EMPTY_CHUNKS);
- }
- }
-
- // Iterates and filters the remembered set with the given callback.
- // The callback should take (SlotType slot_type, Address addr) and return
- // SlotCallbackResult.
- template <typename Callback>
- static void IterateTyped(Heap* heap, RememberedSetIterationMode mode,
- Callback callback) {
- IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
- if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
- IterateTyped(chunk, callback);
- if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
- });
- }
-
- // Iterates and filters typed pointers in the given memory chunk with the
- // given callback. The callback should take (SlotType slot_type, Address addr)
- // and return SlotCallbackResult.
- template <typename Callback>
- static void IterateTyped(MemoryChunk* chunk, Callback callback) {
- TypedSlotSet* slot_set = chunk->typed_slot_set<type>();
- if (slot_set != nullptr) {
- int new_count =
- slot_set->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
- if (new_count == 0) {
- chunk->ReleaseTypedSlotSet<type>();
- }
- }
- }
-
- // Clear all old to old slots from the remembered set.
- static void ClearAll(Heap* heap) {
- STATIC_ASSERT(type == OLD_TO_OLD);
- OldGenerationMemoryChunkIterator it(heap);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != nullptr) {
- chunk->ReleaseSlotSet<OLD_TO_OLD>();
- chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
- chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
- }
- }
-};
-
-class UpdateTypedSlotHelper {
- public:
- // Updates a typed slot using an untyped slot callback where |addr| depending
- // on slot type represents either address for respective RelocInfo or address
- // of the uncompressed constant pool entry.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
- Address addr, Callback callback) {
- switch (slot_type) {
- case CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
- return UpdateCodeTarget(&rinfo, callback);
- }
- case CODE_ENTRY_SLOT: {
- return UpdateCodeEntry(addr, callback);
- }
- case COMPRESSED_EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
- return UpdateEmbeddedPointer(heap, &rinfo, callback);
- }
- case FULL_EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
- return UpdateEmbeddedPointer(heap, &rinfo, callback);
- }
- case COMPRESSED_OBJECT_SLOT: {
- HeapObject old_target = HeapObject::cast(Object(DecompressTaggedAny(
- heap->isolate(), base::Memory<Tagged_t>(addr))));
- HeapObject new_target = old_target;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
- if (new_target != old_target) {
- base::Memory<Tagged_t>(addr) = CompressTagged(new_target.ptr());
- }
- return result;
- }
- case FULL_OBJECT_SLOT: {
- return callback(FullMaybeObjectSlot(addr));
- }
- case CLEARED_SLOT:
- break;
- }
- UNREACHABLE();
- }
-
- private:
- // Updates a code entry slot using an untyped slot callback.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateCodeEntry(Address entry_address,
- Callback callback) {
- Code code = Code::GetObjectFromEntryAddress(entry_address);
- Code old_code = code;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
- DCHECK(!HasWeakHeapObjectTag(code));
- if (code != old_code) {
- base::Memory<Address>(entry_address) = code.entry();
- }
- return result;
- }
-
- // Updates a code target slot using an untyped slot callback.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
- Callback callback) {
- DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Code new_target = old_target;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
- if (new_target != old_target) {
- rinfo->set_target_address(Code::cast(new_target).raw_instruction_start());
- }
- return result;
- }
-
- // Updates an embedded pointer slot using an untyped slot callback.
- // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
- template <typename Callback>
- static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
- Callback callback) {
- DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
- HeapObject old_target = rinfo->target_object_no_host(heap->isolate());
- HeapObject new_target = old_target;
- SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target));
- if (new_target != old_target) {
- rinfo->set_target_object(heap, HeapObject::cast(new_target));
- }
- return result;
- }
-};
-
-class RememberedSetSweeping {
- public:
- template <AccessMode access_mode>
- static void Insert(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
- if (slot_set == nullptr) {
- slot_set = chunk->AllocateSweepingSlotSet();
- }
- RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
- }
-
- static void Remove(MemoryChunk* chunk, Address slot_addr) {
- DCHECK(chunk->Contains(slot_addr));
- SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
- RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
- }
-
- // Given a page and a range of slots in that page, this function removes the
- // slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->sweeping_slot_set();
- RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
- }
-
- // Iterates and filters the remembered set in the given memory chunk with
- // the given callback. The callback should take (Address slot) and return
- // SlotCallbackResult.
- //
- // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
- // threads concurrently inserting slots.
- template <typename Callback>
- static int Iterate(MemoryChunk* chunk, Callback callback,
- SlotSet::EmptyBucketMode mode) {
- SlotSet* slot_set = chunk->sweeping_slot_set();
- return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
- }
-};
-
-inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTargetMode(rmode)) {
- return CODE_TARGET_SLOT;
- } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
- return FULL_EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- return COMPRESSED_EMBEDDED_OBJECT_SLOT;
+ case CLEARED_SLOT:
+ break;
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8
-
#endif // V8_HEAP_REMEMBERED_SET_INL_H_
diff --git a/chromium/v8/src/heap/remembered-set.h b/chromium/v8/src/heap/remembered-set.h
new file mode 100644
index 00000000000..4ded63de03a
--- /dev/null
+++ b/chromium/v8/src/heap/remembered-set.h
@@ -0,0 +1,406 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_REMEMBERED_SET_H_
+#define V8_HEAP_REMEMBERED_SET_H_
+
+#include <memory>
+
+#include "src/base/bounds.h"
+#include "src/base/memory.h"
+#include "src/codegen/reloc-info.h"
+#include "src/common/globals.h"
+#include "src/heap/heap.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/paged-spaces.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces.h"
+#include "src/heap/worklist.h"
+
+namespace v8 {
+namespace internal {
+
+enum RememberedSetIterationMode { SYNCHRONIZED, NON_SYNCHRONIZED };
+
+class RememberedSetOperations {
+ public:
+ // Given a page and a slot in that page, this function adds the slot to the
+ // remembered set.
+ template <AccessMode access_mode>
+ static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ uintptr_t offset = slot_addr - chunk->address();
+ slot_set->Insert<access_mode>(offset);
+ }
+
+ template <typename Callback>
+ static int Iterate(SlotSet* slot_set, MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ int slots = 0;
+ if (slot_set != nullptr) {
+ slots += slot_set->Iterate(chunk->address(), 0, chunk->buckets(),
+ callback, mode);
+ }
+ return slots;
+ }
+
+ static void Remove(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
+ if (slot_set != nullptr) {
+ uintptr_t offset = slot_addr - chunk->address();
+ slot_set->Remove(offset);
+ }
+ }
+
+ static void RemoveRange(SlotSet* slot_set, MemoryChunk* chunk, Address start,
+ Address end, SlotSet::EmptyBucketMode mode) {
+ if (slot_set != nullptr) {
+ uintptr_t start_offset = start - chunk->address();
+ uintptr_t end_offset = end - chunk->address();
+ DCHECK_LT(start_offset, end_offset);
+ slot_set->RemoveRange(static_cast<int>(start_offset),
+ static_cast<int>(end_offset), chunk->buckets(),
+ mode);
+ }
+ }
+
+ static void CheckNoneInRange(SlotSet* slot_set, MemoryChunk* chunk,
+ Address start, Address end) {
+ if (slot_set != nullptr) {
+ size_t start_bucket = SlotSet::BucketForSlot(start - chunk->address());
+ // Both 'end' and 'end_bucket' are exclusive limits, so do some index
+ // juggling to make sure we get the right bucket even if the end address
+ // is at the start of a bucket.
+ size_t end_bucket =
+ SlotSet::BucketForSlot(end - chunk->address() - kTaggedSize) + 1;
+ slot_set->Iterate(
+ chunk->address(), start_bucket, end_bucket,
+ [start, end](MaybeObjectSlot slot) {
+ CHECK(!base::IsInRange(slot.address(), start, end + 1));
+ return KEEP_SLOT;
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ }
+ }
+};
+
+// TODO(ulan): Investigate performance of de-templatizing this class.
+template <RememberedSetType type>
+class RememberedSet : public AllStatic {
+ public:
+ // Given a page and a slot in that page, this function adds the slot to the
+ // remembered set.
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type, access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSlotSet<type>();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a slot in that page, this function returns true if
+ // the remembered set contains the slot.
+ static bool Contains(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ if (slot_set == nullptr) {
+ return false;
+ }
+ uintptr_t offset = slot_addr - chunk->address();
+ return slot_set->Contains(offset);
+ }
+
+ static void CheckNoneInRange(MemoryChunk* chunk, Address start, Address end) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::CheckNoneInRange(slot_set, chunk, start, end);
+ }
+
+ // Given a page and a slot in that page, this function removes the slot from
+ // the remembered set.
+ // If the slot was never added, then the function does nothing.
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
+
+ // Iterates and filters the remembered set with the given callback.
+ // The callback should take (Address slot) and return SlotCallbackResult.
+ template <typename Callback>
+ static void Iterate(Heap* heap, RememberedSetIterationMode mode,
+ Callback callback) {
+ IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
+ if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
+ Iterate(chunk, callback);
+ if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
+ });
+ }
+
+ // Iterates over all memory chunks that contains non-empty slot sets.
+ // The callback should take (MemoryChunk* chunk) and return void.
+ template <typename Callback>
+ static void IterateMemoryChunks(Heap* heap, Callback callback) {
+ OldGenerationMemoryChunkIterator it(heap);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ SlotSet* sweeping_slot_set =
+ type == OLD_TO_NEW ? chunk->sweeping_slot_set() : nullptr;
+ TypedSlotSet* typed_slot_set = chunk->typed_slot_set<type>();
+ if (slot_set != nullptr || sweeping_slot_set != nullptr ||
+ typed_slot_set != nullptr ||
+ chunk->invalidated_slots<type>() != nullptr) {
+ callback(chunk);
+ }
+ }
+ }
+
+ // Iterates and filters the remembered set in the given memory chunk with
+ // the given callback. The callback should take (Address slot) and return
+ // SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
+ template <typename Callback>
+ static int Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
+ }
+
+ template <typename Callback>
+ static int IterateAndTrackEmptyBuckets(
+ MemoryChunk* chunk, Callback callback,
+ Worklist<MemoryChunk*, 64>::View empty_chunks) {
+ SlotSet* slot_set = chunk->slot_set<type>();
+ int slots = 0;
+ if (slot_set != nullptr) {
+ PossiblyEmptyBuckets* possibly_empty_buckets =
+ chunk->possibly_empty_buckets();
+ slots += slot_set->IterateAndTrackEmptyBuckets(chunk->address(), 0,
+ chunk->buckets(), callback,
+ possibly_empty_buckets);
+ if (!possibly_empty_buckets->IsEmpty()) empty_chunks.Push(chunk);
+ }
+ return slots;
+ }
+
+ static void FreeEmptyBuckets(MemoryChunk* chunk) {
+ DCHECK(type == OLD_TO_NEW);
+ SlotSet* slot_set = chunk->slot_set<type>();
+ if (slot_set != nullptr && slot_set->FreeEmptyBuckets(chunk->buckets())) {
+ chunk->ReleaseSlotSet<type>();
+ }
+ }
+
+ static bool CheckPossiblyEmptyBuckets(MemoryChunk* chunk) {
+ DCHECK(type == OLD_TO_NEW);
+ SlotSet* slot_set = chunk->slot_set<type, AccessMode::NON_ATOMIC>();
+ if (slot_set != nullptr &&
+ slot_set->CheckPossiblyEmptyBuckets(chunk->buckets(),
+ chunk->possibly_empty_buckets())) {
+ chunk->ReleaseSlotSet<type>();
+ return true;
+ }
+
+ return false;
+ }
+
+ // Given a page and a typed slot in that page, this function adds the slot
+ // to the remembered set.
+ static void InsertTyped(MemoryChunk* memory_chunk, SlotType slot_type,
+ uint32_t offset) {
+ TypedSlotSet* slot_set = memory_chunk->typed_slot_set<type>();
+ if (slot_set == nullptr) {
+ slot_set = memory_chunk->AllocateTypedSlotSet<type>();
+ }
+ slot_set->Insert(slot_type, offset);
+ }
+
+ static void MergeTyped(MemoryChunk* page, std::unique_ptr<TypedSlots> other) {
+ TypedSlotSet* slot_set = page->typed_slot_set<type>();
+ if (slot_set == nullptr) {
+ slot_set = page->AllocateTypedSlotSet<type>();
+ }
+ slot_set->Merge(other.get());
+ }
+
+ // Given a page and a range of typed slots in that page, this function removes
+ // the slots from the remembered set.
+ static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
+ TypedSlotSet* slot_set = page->typed_slot_set<type>();
+ if (slot_set != nullptr) {
+ slot_set->Iterate(
+ [=](SlotType slot_type, Address slot_addr) {
+ return start <= slot_addr && slot_addr < end ? REMOVE_SLOT
+ : KEEP_SLOT;
+ },
+ TypedSlotSet::FREE_EMPTY_CHUNKS);
+ }
+ }
+
+ // Iterates and filters the remembered set with the given callback.
+ // The callback should take (SlotType slot_type, Address addr) and return
+ // SlotCallbackResult.
+ template <typename Callback>
+ static void IterateTyped(Heap* heap, RememberedSetIterationMode mode,
+ Callback callback) {
+ IterateMemoryChunks(heap, [mode, callback](MemoryChunk* chunk) {
+ if (mode == SYNCHRONIZED) chunk->mutex()->Lock();
+ IterateTyped(chunk, callback);
+ if (mode == SYNCHRONIZED) chunk->mutex()->Unlock();
+ });
+ }
+
+ // Iterates and filters typed pointers in the given memory chunk with the
+ // given callback. The callback should take (SlotType slot_type, Address addr)
+ // and return SlotCallbackResult.
+ template <typename Callback>
+ static void IterateTyped(MemoryChunk* chunk, Callback callback) {
+ TypedSlotSet* slot_set = chunk->typed_slot_set<type>();
+ if (slot_set != nullptr) {
+ int new_count =
+ slot_set->Iterate(callback, TypedSlotSet::KEEP_EMPTY_CHUNKS);
+ if (new_count == 0) {
+ chunk->ReleaseTypedSlotSet<type>();
+ }
+ }
+ }
+
+ // Clear all old to old slots from the remembered set.
+ static void ClearAll(Heap* heap) {
+ STATIC_ASSERT(type == OLD_TO_OLD);
+ OldGenerationMemoryChunkIterator it(heap);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ chunk->ReleaseSlotSet<OLD_TO_OLD>();
+ chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
+ chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
+ }
+ }
+};
+
+class UpdateTypedSlotHelper {
+ public:
+ // Updates a typed slot using an untyped slot callback where |addr| depending
+ // on slot type represents either address for respective RelocInfo or address
+ // of the uncompressed constant pool entry.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
+ Address addr, Callback callback);
+
+ private:
+ // Updates a code entry slot using an untyped slot callback.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateCodeEntry(Address entry_address,
+ Callback callback) {
+ Code code = Code::GetObjectFromEntryAddress(entry_address);
+ Code old_code = code;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
+ DCHECK(!HasWeakHeapObjectTag(code));
+ if (code != old_code) {
+ base::Memory<Address>(entry_address) = code.entry();
+ }
+ return result;
+ }
+
+ // Updates a code target slot using an untyped slot callback.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
+ Callback callback) {
+ DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
+ Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
+ if (new_target != old_target) {
+ rinfo->set_target_address(Code::cast(new_target).raw_instruction_start());
+ }
+ return result;
+ }
+
+ // Updates an embedded pointer slot using an untyped slot callback.
+ // The callback accepts FullMaybeObjectSlot and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
+ Callback callback) {
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
+ HeapObject old_target = rinfo->target_object_no_host(heap->isolate());
+ HeapObject new_target = old_target;
+ SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
+ if (new_target != old_target) {
+ rinfo->set_target_object(heap, HeapObject::cast(new_target));
+ }
+ return result;
+ }
+};
+
+class RememberedSetSweeping {
+ public:
+ template <AccessMode access_mode>
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<access_mode>();
+ if (slot_set == nullptr) {
+ slot_set = chunk->AllocateSweepingSlotSet();
+ }
+ RememberedSetOperations::Insert<access_mode>(slot_set, chunk, slot_addr);
+ }
+
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = chunk->sweeping_slot_set<AccessMode::ATOMIC>();
+ RememberedSetOperations::Remove(slot_set, chunk, slot_addr);
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->sweeping_slot_set();
+ RememberedSetOperations::RemoveRange(slot_set, chunk, start, end, mode);
+ }
+
+ // Iterates and filters the remembered set in the given memory chunk with
+ // the given callback. The callback should take (Address slot) and return
+ // SlotCallbackResult.
+ //
+ // Notice that |mode| can only be of FREE* or PREFREE* if there are no other
+ // threads concurrently inserting slots.
+ template <typename Callback>
+ static int Iterate(MemoryChunk* chunk, Callback callback,
+ SlotSet::EmptyBucketMode mode) {
+ SlotSet* slot_set = chunk->sweeping_slot_set();
+ return RememberedSetOperations::Iterate(slot_set, chunk, callback, mode);
+ }
+};
+
+inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
+ if (RelocInfo::IsCodeTargetMode(rmode)) {
+ return CODE_TARGET_SLOT;
+ } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
+ return FULL_EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ return COMPRESSED_EMBEDDED_OBJECT_SLOT;
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_REMEMBERED_SET_H_
diff --git a/chromium/v8/src/heap/safepoint.cc b/chromium/v8/src/heap/safepoint.cc
index e6ccf642c09..3012413f48c 100644
--- a/chromium/v8/src/heap/safepoint.cc
+++ b/chromium/v8/src/heap/safepoint.cc
@@ -13,13 +13,16 @@ namespace v8 {
namespace internal {
GlobalSafepoint::GlobalSafepoint(Heap* heap)
- : heap_(heap), local_heaps_head_(nullptr), is_active_(false) {}
+ : heap_(heap), local_heaps_head_(nullptr), active_safepoint_scopes_(0) {}
-void GlobalSafepoint::Start() { StopThreads(); }
+void GlobalSafepoint::Start() { EnterSafepointScope(); }
-void GlobalSafepoint::End() { ResumeThreads(); }
+void GlobalSafepoint::End() { LeaveSafepointScope(); }
-void GlobalSafepoint::StopThreads() {
+void GlobalSafepoint::EnterSafepointScope() {
+ if (!FLAG_local_heaps) return;
+
+ if (++active_safepoint_scopes_ > 1) return;
local_heaps_mutex_.Lock();
barrier_.Arm();
@@ -37,12 +40,13 @@ void GlobalSafepoint::StopThreads() {
current->state_change_.Wait(&current->state_mutex_);
}
}
-
- is_active_ = true;
}
-void GlobalSafepoint::ResumeThreads() {
- is_active_ = false;
+void GlobalSafepoint::LeaveSafepointScope() {
+ if (!FLAG_local_heaps) return;
+
+ DCHECK_GT(active_safepoint_scopes_, 0);
+ if (--active_safepoint_scopes_ > 0) return;
for (LocalHeap* current = local_heaps_head_; current;
current = current->next_) {
@@ -90,12 +94,10 @@ void GlobalSafepoint::Barrier::Wait() {
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
- if (FLAG_local_heaps) safepoint_->StopThreads();
+ safepoint_->EnterSafepointScope();
}
-SafepointScope::~SafepointScope() {
- if (FLAG_local_heaps) safepoint_->ResumeThreads();
-}
+SafepointScope::~SafepointScope() { safepoint_->LeaveSafepointScope(); }
void GlobalSafepoint::AddLocalHeap(LocalHeap* local_heap) {
base::MutexGuard guard(&local_heaps_mutex_);
diff --git a/chromium/v8/src/heap/safepoint.h b/chromium/v8/src/heap/safepoint.h
index 3ba96e11d59..0d397c9adaf 100644
--- a/chromium/v8/src/heap/safepoint.h
+++ b/chromium/v8/src/heap/safepoint.h
@@ -47,7 +47,7 @@ class GlobalSafepoint {
void Start();
void End();
- bool IsActive() { return is_active_; }
+ bool IsActive() { return active_safepoint_scopes_ > 0; }
private:
class Barrier {
@@ -63,8 +63,8 @@ class GlobalSafepoint {
void Wait();
};
- void StopThreads();
- void ResumeThreads();
+ void EnterSafepointScope();
+ void LeaveSafepointScope();
void AddLocalHeap(LocalHeap* local_heap);
void RemoveLocalHeap(LocalHeap* local_heap);
@@ -75,7 +75,7 @@ class GlobalSafepoint {
base::Mutex local_heaps_mutex_;
LocalHeap* local_heaps_head_;
- bool is_active_;
+ int active_safepoint_scopes_;
friend class SafepointScope;
friend class LocalHeap;
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 3b3cc77b312..18933a5ac78 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -97,7 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization.
HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
- MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
+ BasicMemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
}
#endif
}
@@ -211,7 +211,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
// object_size > kMaxRegularHeapObjectSize
if (V8_UNLIKELY(
FLAG_young_generation_large_objects &&
- MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
+ BasicMemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner_identity());
if (object.synchronized_compare_and_swap_map_word(
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index d0d0a30fb13..06d3af4c0ac 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -14,6 +14,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
#include "src/objects/data-handler-inl.h"
@@ -524,7 +525,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
target.IterateBodyFast(map, size, &visitor);
if (map.IsJSArrayBufferMap()) {
- DCHECK(!MemoryChunk::FromHeapObject(target)->IsLargePage());
+ DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
}
}
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index b62dd5c7fd1..a4d14649d6b 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -393,6 +393,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, self_reference_marker);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, basic_block_counters_marker);
ALLOCATE_VARSIZE_MAP(BIGINT_TYPE, bigint);
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
@@ -420,16 +421,14 @@ bool Heap::CreateInitialMaps() {
#define TORQUE_ALLOCATE_MAP(NAME, Name, name) \
ALLOCATE_MAP(NAME, Name::kSize, name)
- TORQUE_INTERNAL_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
+ TORQUE_DEFINED_FIXED_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_MAP);
#undef TORQUE_ALLOCATE_MAP
#define TORQUE_ALLOCATE_VARSIZE_MAP(NAME, Name, name) \
ALLOCATE_VARSIZE_MAP(NAME, name)
- TORQUE_INTERNAL_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
+ TORQUE_DEFINED_VARSIZE_INSTANCE_TYPE_LIST(TORQUE_ALLOCATE_VARSIZE_MAP);
#undef TORQUE_ALLOCATE_VARSIZE_MAP
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
-
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell);
@@ -717,8 +716,9 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(-7), isolate()), "undefined",
Oddball::kStaleRegister));
- // Initialize the self-reference marker.
+ // Initialize marker objects used during compilation.
set_self_reference_marker(*factory->NewSelfReferenceMarker());
+ set_basic_block_counters_marker(*factory->NewBasicBlockCountersMarker());
set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
@@ -769,6 +769,8 @@ void Heap::CreateInitialObjects() {
set_number_string_cache(*factory->NewFixedArray(
kInitialNumberStringCacheSize * 2, AllocationType::kOld));
+ set_basic_block_profiling_data(ArrayList::cast(roots.empty_fixed_array()));
+
// Allocate cache for string split and regexp-multiple.
set_string_split_cache(*factory->NewFixedArray(
RegExpResultsCache::kRegExpResultsCacheSize, AllocationType::kOld));
@@ -780,14 +782,6 @@ void Heap::CreateInitialObjects() {
factory->NewManyClosuresCell(factory->undefined_value());
set_many_closures_cell(*many_closures_cell);
- {
- Handle<FixedArray> empty_sloppy_arguments_elements =
- factory->NewFixedArray(2, AllocationType::kReadOnly);
- empty_sloppy_arguments_elements->set_map_after_allocation(
- roots.sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
- set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
- }
-
set_detached_contexts(roots.empty_weak_array_list());
set_retaining_path_targets(roots.empty_weak_array_list());
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index cb8b0a54d74..b54b6ac1150 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -6,15 +6,15 @@
#define V8_HEAP_SPACES_INL_H_
#include "src/base/atomic-utils.h"
-#include "src/base/bounded-page-allocator.h"
#include "src/base/v8-fallthrough.h"
#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
#include "src/heap/spaces.h"
#include "src/objects/code-inl.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -42,63 +42,6 @@ PageRange::PageRange(Address start, Address limit)
#endif // DEBUG
}
-// -----------------------------------------------------------------------------
-// SemiSpaceObjectIterator
-
-HeapObject SemiSpaceObjectIterator::Next() {
- while (current_ != limit_) {
- if (Page::IsAlignedToPageSize(current_)) {
- Page* page = Page::FromAllocationAreaAddress(current_);
- page = page->next_page();
- DCHECK(page);
- current_ = page->area_start();
- if (current_ == limit_) return HeapObject();
- }
- HeapObject object = HeapObject::FromAddress(current_);
- current_ += object.Size();
- if (!object.IsFreeSpaceOrFiller()) {
- return object;
- }
- }
- return HeapObject();
-}
-
-// -----------------------------------------------------------------------------
-// PagedSpaceObjectIterator
-
-HeapObject PagedSpaceObjectIterator::Next() {
- do {
- HeapObject next_obj = FromCurrentPage();
- if (!next_obj.is_null()) return next_obj;
- } while (AdvanceToNextPage());
- return HeapObject();
-}
-
-HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
- while (cur_addr_ != cur_end_) {
- if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
- cur_addr_ = space_->limit();
- continue;
- }
- HeapObject obj = HeapObject::FromAddress(cur_addr_);
- const int obj_size = obj.Size();
- cur_addr_ += obj_size;
- DCHECK_LE(cur_addr_, cur_end_);
- if (!obj.IsFreeSpaceOrFiller()) {
- if (obj.IsCode()) {
- DCHECK_IMPLIES(
- space_->identity() != CODE_SPACE,
- space_->identity() == RO_SPACE && Code::cast(obj).is_builtin());
- DCHECK_CODEOBJECT_SIZE(obj_size, space_);
- } else {
- DCHECK_OBJECT_SIZE(obj_size);
- }
- return obj;
- }
- }
- return HeapObject();
-}
-
void Space::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) {
base::CheckedIncrement(&external_backing_store_bytes_[type], amount);
@@ -120,93 +63,6 @@ void Space::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
}
-// -----------------------------------------------------------------------------
-// SemiSpace
-
-bool SemiSpace::Contains(HeapObject o) {
- MemoryChunk* memory_chunk = MemoryChunk::FromHeapObject(o);
- if (memory_chunk->IsLargePage()) return false;
- return id_ == kToSpace ? memory_chunk->IsToPage()
- : memory_chunk->IsFromPage();
-}
-
-bool SemiSpace::Contains(Object o) {
- return o.IsHeapObject() && Contains(HeapObject::cast(o));
-}
-
-bool SemiSpace::ContainsSlow(Address a) {
- for (Page* p : *this) {
- if (p == MemoryChunk::FromAddress(a)) return true;
- }
- return false;
-}
-
-// --------------------------------------------------------------------------
-// NewSpace
-
-bool NewSpace::Contains(Object o) {
- return o.IsHeapObject() && Contains(HeapObject::cast(o));
-}
-
-bool NewSpace::Contains(HeapObject o) {
- return MemoryChunk::FromHeapObject(o)->InNewSpace();
-}
-
-bool NewSpace::ContainsSlow(Address a) {
- return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
-}
-
-bool NewSpace::ToSpaceContainsSlow(Address a) {
- return to_space_.ContainsSlow(a);
-}
-
-bool NewSpace::ToSpaceContains(Object o) { return to_space_.Contains(o); }
-bool NewSpace::FromSpaceContains(Object o) { return from_space_.Contains(o); }
-
-bool PagedSpace::Contains(Address addr) {
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- return true;
- }
- return Page::FromAddress(addr)->owner() == this;
-}
-
-bool PagedSpace::Contains(Object o) {
- if (!o.IsHeapObject()) return false;
- return Page::FromAddress(o.ptr())->owner() == this;
-}
-
-void PagedSpace::UnlinkFreeListCategories(Page* page) {
- DCHECK_EQ(this, page->owner());
- page->ForAllFreeListCategories([this](FreeListCategory* category) {
- free_list()->RemoveCategory(category);
- });
-}
-
-size_t PagedSpace::RelinkFreeListCategories(Page* page) {
- DCHECK_EQ(this, page->owner());
- size_t added = 0;
- page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- added += category->available();
- category->Relink(free_list());
- });
-
- DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
- page->AvailableInFreeList() ==
- page->AvailableInFreeListFromAllocatedBytes());
- return added;
-}
-
-bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
- if (allocation_info_.top() != kNullAddress) {
- const Address object_address = object.address();
- if ((allocation_info_.top() - object_size) == object_address) {
- allocation_info_.set_top(object_address);
- return true;
- }
- }
- return false;
-}
-
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -278,53 +134,6 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-bool FreeListCategory::is_linked(FreeList* owner) const {
- return prev_ != nullptr || next_ != nullptr ||
- owner->categories_[type_] == this;
-}
-
-void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
- available_ -= allocation_size;
-}
-
-Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
- FreeListCategory* category_top = top(type);
- if (category_top != nullptr) {
- DCHECK(!category_top->top().is_null());
- return Page::FromHeapObject(category_top->top());
- } else {
- return nullptr;
- }
-}
-
-Page* FreeListLegacy::GetPageForSize(size_t size_in_bytes) {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- if (!page && static_cast<int>(kSmall) >= minimum_category)
- page = GetPageForCategoryType(kSmall);
- if (!page && static_cast<int>(kTiny) >= minimum_category)
- page = GetPageForCategoryType(kTiny);
- if (!page && static_cast<int>(kTiniest) >= minimum_category)
- page = GetPageForCategoryType(kTiniest);
- return page;
-}
-
-Page* FreeListFastAlloc::GetPageForSize(size_t size_in_bytes) {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- return page;
-}
-
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
@@ -343,216 +152,6 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
-bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
- return true;
- }
- return SlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top();
- Address new_top = current_top + size_in_bytes;
- DCHECK_LE(new_top, allocation_info_.limit());
- allocation_info_.set_top(new_top);
- return HeapObject::FromAddress(current_top);
-}
-
-HeapObject PagedSpace::TryAllocateLinearlyAligned(
- int* size_in_bytes, AllocationAlignment alignment) {
- Address current_top = allocation_info_.top();
- int filler_size = Heap::GetFillToAlign(current_top, alignment);
-
- Address new_top = current_top + filler_size + *size_in_bytes;
- if (new_top > allocation_info_.limit()) return HeapObject();
-
- allocation_info_.set_top(new_top);
- if (filler_size > 0) {
- *size_in_bytes += filler_size;
- return Heap::PrecedeWithFiller(ReadOnlyRoots(heap()),
- HeapObject::FromAddress(current_top),
- filler_size);
- }
-
- return HeapObject::FromAddress(current_top);
-}
-
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
- return AllocationResult::Retry(identity());
- }
- HeapObject object = AllocateLinearly(size_in_bytes);
- DCHECK(!object.is_null());
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return object;
-}
-
-AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
- DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- int allocation_size = size_in_bytes;
- HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- if (object.is_null()) {
- // We don't know exactly how much filler we need to align until space is
- // allocated, so assume the worst case.
- int filler_size = Heap::GetMaximumFillToAlign(alignment);
- allocation_size += filler_size;
- if (!EnsureLinearAllocationArea(allocation_size, origin)) {
- return AllocationResult::Retry(identity());
- }
- allocation_size = size_in_bytes;
- object = TryAllocateLinearlyAligned(&allocation_size, alignment);
- DCHECK(!object.is_null());
- }
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return object;
-}
-
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (top_on_previous_step_ && top() < top_on_previous_step_ &&
- SupportsInlineAllocation()) {
- // Generated code decreased the top() pointer to do folded allocations.
- // The top_on_previous_step_ can be one byte beyond the current page.
- DCHECK_NE(top(), kNullAddress);
- DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
- Page::FromAllocationAreaAddress(top_on_previous_step_ - 1));
- top_on_previous_step_ = top();
- }
- size_t bytes_since_last =
- top_on_previous_step_ ? top() - top_on_previous_step_ : 0;
-
- DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
-#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result =
- alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
-#endif
- HeapObject heap_obj;
- if (!result.IsRetry() && result.To(&heap_obj) && !is_local_space()) {
- AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
- heap_obj.address(), size_in_bytes);
- StartNextInlineAllocationStep();
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
- }
- return result;
-}
-
-// -----------------------------------------------------------------------------
-// NewSpace
-
-AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- Address top = allocation_info_.top();
- int filler_size = Heap::GetFillToAlign(top, alignment);
- int aligned_size_in_bytes = size_in_bytes + filler_size;
-
- if (allocation_info_.limit() - top <
- static_cast<uintptr_t>(aligned_size_in_bytes)) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- filler_size = Heap::GetFillToAlign(top, alignment);
- aligned_size_in_bytes = size_in_bytes + filler_size;
- }
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + aligned_size_in_bytes);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- if (filler_size > 0) {
- obj = Heap::PrecedeWithFiller(ReadOnlyRoots(heap()), obj, filler_size);
- }
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return obj;
-}
-
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
- AllocationOrigin origin) {
- Address top = allocation_info_.top();
- if (allocation_info_.limit() < top + size_in_bytes) {
- // See if we can create room.
- if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
- return AllocationResult::Retry();
- }
-
- top = allocation_info_.top();
- }
-
- HeapObject obj = HeapObject::FromAddress(top);
- allocation_info_.set_top(top + size_in_bytes);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
-
- if (FLAG_trace_allocations_origins) {
- UpdateAllocationOrigins(origin);
- }
-
- return obj;
-}
-
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- if (top() < top_on_previous_step_) {
- // Generated code decreased the top() pointer to do folded allocations
- DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
- Page::FromAllocationAreaAddress(top_on_previous_step_));
- top_on_previous_step_ = top();
- }
-#ifdef V8_HOST_ARCH_32_BIT
- return alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment, origin)
- : AllocateRawUnaligned(size_in_bytes, origin);
-#else
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): Consider using aligned allocations once the
- // allocation alignment inconsistency is fixed. For now we keep using
- // unaligned access since both x64 and arm64 architectures (where pointer
- // compression is supported) allow unaligned access to doubles and full words.
-#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes, origin);
-#endif
-}
-
-V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
- base::MutexGuard guard(&mutex_);
- return AllocateRaw(size_in_bytes, alignment, origin);
-}
-
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
AllocationResult result,
intptr_t size) {
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 5e8874fafde..45c1de44c20 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -9,12 +9,9 @@
#include <utility>
#include "src/base/bits.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/macros.h"
-#include "src/base/optional.h"
-#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
-#include "src/execution/vm-state-inl.h"
-#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@@ -26,9 +23,8 @@
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
-#include "src/heap/sweeper.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/free-space-inl.h"
@@ -49,55 +45,6 @@ namespace internal {
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
-// ----------------------------------------------------------------------------
-// PagedSpaceObjectIterator
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
- PagedSpace* space)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(space->first_page(), nullptr),
- current_page_(page_range_.begin()) {
- heap->mark_compact_collector()->EnsureSweepingCompleted();
-}
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(Heap* heap,
- PagedSpace* space,
- Page* page)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(page),
- current_page_(page_range_.begin()) {
- heap->mark_compact_collector()->EnsureSweepingCompleted();
-#ifdef DEBUG
- AllocationSpace owner = page->owner_identity();
- DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
- owner == CODE_SPACE);
-#endif // DEBUG
-}
-
-PagedSpaceObjectIterator::PagedSpaceObjectIterator(OffThreadSpace* space)
- : cur_addr_(kNullAddress),
- cur_end_(kNullAddress),
- space_(space),
- page_range_(space->first_page(), nullptr),
- current_page_(page_range_.begin()) {}
-
-// We have hit the end of the page and should advance to the next block of
-// objects. This happens at the end of the page.
-bool PagedSpaceObjectIterator::AdvanceToNextPage() {
- DCHECK_EQ(cur_addr_, cur_end_);
- if (current_page_ == page_range_.end()) return false;
- Page* cur_page = *(current_page_++);
-
- cur_addr_ = cur_page->area_start();
- cur_end_ = cur_page->area_end();
- DCHECK(cur_page->SweepingDone());
- return true;
-}
-
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
@@ -113,541 +60,6 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
}
}
-static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
- LAZY_INSTANCE_INITIALIZER;
-
-Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- auto it = recently_freed_.find(code_range_size);
- if (it == recently_freed_.end() || it->second.empty()) {
- return reinterpret_cast<Address>(GetRandomMmapAddr());
- }
- Address result = it->second.back();
- it->second.pop_back();
- return result;
-}
-
-void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- recently_freed_[code_range_size].push_back(code_range_start);
-}
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-//
-
-MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
- size_t code_range_size)
- : isolate_(isolate),
- data_page_allocator_(isolate->page_allocator()),
- code_page_allocator_(nullptr),
- capacity_(RoundUp(capacity, Page::kPageSize)),
- size_(0),
- size_executable_(0),
- lowest_ever_allocated_(static_cast<Address>(-1ll)),
- highest_ever_allocated_(kNullAddress),
- unmapper_(isolate->heap(), this) {
- InitializeCodePageAllocator(data_page_allocator_, code_range_size);
-}
-
-void MemoryAllocator::InitializeCodePageAllocator(
- v8::PageAllocator* page_allocator, size_t requested) {
- DCHECK_NULL(code_page_allocator_instance_.get());
-
- code_page_allocator_ = page_allocator;
-
- if (requested == 0) {
- if (!isolate_->RequiresCodeRange()) return;
- // When a target requires the code range feature, we put all code objects
- // in a kMaximalCodeRangeSize range of virtual address space, so that
- // they can call each other with near calls.
- requested = kMaximalCodeRangeSize;
- } else if (requested <= kMinimumCodeRangeSize) {
- requested = kMinimumCodeRangeSize;
- }
-
- const size_t reserved_area =
- kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
- if (requested < (kMaximalCodeRangeSize - reserved_area)) {
- requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
- // Fullfilling both reserved pages requirement and huge code area
- // alignments is not supported (requires re-implementation).
- DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
- }
- DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
-
- Address hint =
- RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
- page_allocator->AllocatePageSize());
- VirtualMemory reservation(
- page_allocator, requested, reinterpret_cast<void*>(hint),
- Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
- if (!reservation.IsReserved()) {
- V8::FatalProcessOutOfMemory(isolate_,
- "CodeRange setup: allocate virtual memory");
- }
- code_range_ = reservation.region();
- isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
-
- // We are sure that we have mapped a block of requested addresses.
- DCHECK_GE(reservation.size(), requested);
- Address base = reservation.address();
-
- // On some platforms, specifically Win64, we need to reserve some pages at
- // the beginning of an executable space. See
- // https://cs.chromium.org/chromium/src/components/crash/content/
- // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
- // for details.
- if (reserved_area > 0) {
- if (!reservation.SetPermissions(base, reserved_area,
- PageAllocator::kReadWrite))
- V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
-
- base += reserved_area;
- }
- Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
- size_t size =
- RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
- MemoryChunk::kPageSize);
- DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
-
- LOG(isolate_,
- NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
- requested));
-
- code_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
- page_allocator, aligned_base, size,
- static_cast<size_t>(MemoryChunk::kAlignment));
- code_page_allocator_ = code_page_allocator_instance_.get();
-}
-
-void MemoryAllocator::TearDown() {
- unmapper()->TearDown();
-
- // Check that spaces were torn down before MemoryAllocator.
- DCHECK_EQ(size_, 0u);
- // TODO(gc) this will be true again when we fix FreeMemory.
- // DCHECK_EQ(0, size_executable_);
- capacity_ = 0;
-
- if (last_chunk_.IsReserved()) {
- last_chunk_.Free();
- }
-
- if (code_page_allocator_instance_.get()) {
- DCHECK(!code_range_.is_empty());
- code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
- code_range_.size());
- code_range_ = base::AddressRegion();
- code_page_allocator_instance_.reset();
- }
- code_page_allocator_ = nullptr;
- data_page_allocator_ = nullptr;
-}
-
-class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
- public:
- explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
- : CancelableTask(isolate),
- unmapper_(unmapper),
- tracer_(isolate->heap()->tracer()) {}
-
- private:
- void RunInternal() override {
- TRACE_BACKGROUND_GC(tracer_,
- GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
- unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- unmapper_->active_unmapping_tasks_--;
- unmapper_->pending_unmapping_tasks_semaphore_.Signal();
- if (FLAG_trace_unmapper) {
- PrintIsolate(unmapper_->heap_->isolate(),
- "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
- }
- }
-
- Unmapper* const unmapper_;
- GCTracer* const tracer_;
- DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
-};
-
-void MemoryAllocator::Unmapper::FreeQueuedChunks() {
- if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
- if (!MakeRoomForNewTasks()) {
- // kMaxUnmapperTasks are already running. Avoid creating any more.
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
- kMaxUnmapperTasks);
- }
- return;
- }
- auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
- if (FLAG_trace_unmapper) {
- PrintIsolate(heap_->isolate(),
- "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
- task->id());
- }
- DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
- DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
- DCHECK_GE(active_unmapping_tasks_, 0);
- active_unmapping_tasks_++;
- task_ids_[pending_unmapping_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- } else {
- PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
- }
-}
-
-void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
- for (int i = 0; i < pending_unmapping_tasks_; i++) {
- if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- TryAbortResult::kTaskAborted) {
- pending_unmapping_tasks_semaphore_.Wait();
- }
- }
- pending_unmapping_tasks_ = 0;
- active_unmapping_tasks_ = 0;
-
- if (FLAG_trace_unmapper) {
- PrintIsolate(
- heap_->isolate(),
- "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
- }
-}
-
-void MemoryAllocator::Unmapper::PrepareForGC() {
- // Free non-regular chunks because they cannot be re-used.
- PerformFreeMemoryOnQueuedNonRegularChunks();
-}
-
-void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
- CancelAndWaitForPendingTasks();
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
-}
-
-bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
- DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
-
- if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
- // All previous unmapping tasks have been run to completion.
- // Finalize those tasks to make room for new ones.
- CancelAndWaitForPendingTasks();
- }
- return pending_unmapping_tasks_ != kMaxUnmapperTasks;
-}
-
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
- MemoryChunk* chunk = nullptr;
- while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
- allocator_->PerformFreeMemory(chunk);
- }
-}
-
-template <MemoryAllocator::Unmapper::FreeMode mode>
-void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
- MemoryChunk* chunk = nullptr;
- if (FLAG_trace_unmapper) {
- PrintIsolate(
- heap_->isolate(),
- "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
- NumberOfChunks());
- }
- // Regular chunks.
- while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
- bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
- allocator_->PerformFreeMemory(chunk);
- if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
- }
- if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
- // The previous loop uncommitted any pages marked as pooled and added them
- // to the pooled list. In case of kReleasePooled we need to free them
- // though.
- while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
- allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
- }
- }
- PerformFreeMemoryOnQueuedNonRegularChunks();
-}
-
-void MemoryAllocator::Unmapper::TearDown() {
- CHECK_EQ(0, pending_unmapping_tasks_);
- PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
- DCHECK(chunks_[i].empty());
- }
-}
-
-size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
- base::MutexGuard guard(&mutex_);
- return chunks_[kRegular].size() + chunks_[kNonRegular].size();
-}
-
-int MemoryAllocator::Unmapper::NumberOfChunks() {
- base::MutexGuard guard(&mutex_);
- size_t result = 0;
- for (int i = 0; i < kNumberOfChunkQueues; i++) {
- result += chunks_[i].size();
- }
- return static_cast<int>(result);
-}
-
-size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
- base::MutexGuard guard(&mutex_);
-
- size_t sum = 0;
- // kPooled chunks are already uncommited. We only have to account for
- // kRegular and kNonRegular chunks.
- for (auto& chunk : chunks_[kRegular]) {
- sum += chunk->size();
- }
- for (auto& chunk : chunks_[kNonRegular]) {
- sum += chunk->size();
- }
- return sum;
-}
-
-bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
- Address base = reservation->address();
- size_t size = reservation->size();
- if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
- return false;
- }
- UpdateAllocatedSpaceLimits(base, base + size);
- return true;
-}
-
-bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
- size_t size = reservation->size();
- if (!reservation->SetPermissions(reservation->address(), size,
- PageAllocator::kNoAccess)) {
- return false;
- }
- return true;
-}
-
-void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
- Address base, size_t size) {
- CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
-}
-
-Address MemoryAllocator::AllocateAlignedMemory(
- size_t reserve_size, size_t commit_size, size_t alignment,
- Executability executable, void* hint, VirtualMemory* controller) {
- v8::PageAllocator* page_allocator = this->page_allocator(executable);
- DCHECK(commit_size <= reserve_size);
- VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
- if (!reservation.IsReserved()) return kNullAddress;
- Address base = reservation.address();
- size_ += reservation.size();
-
- if (executable == EXECUTABLE) {
- if (!CommitExecutableMemory(&reservation, base, commit_size,
- reserve_size)) {
- base = kNullAddress;
- }
- } else {
- if (reservation.SetPermissions(base, commit_size,
- PageAllocator::kReadWrite)) {
- UpdateAllocatedSpaceLimits(base, base + commit_size);
- } else {
- base = kNullAddress;
- }
- }
-
- if (base == kNullAddress) {
- // Failed to commit the body. Free the mapping and any partially committed
- // regions inside it.
- reservation.Free();
- size_ -= reserve_size;
- return kNullAddress;
- }
-
- *controller = std::move(reservation);
- return base;
-}
-
-void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
- auto result = code_object_registry_newly_allocated_.insert(code);
- USE(result);
- DCHECK(result.second);
-}
-
-void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
- code_object_registry_already_existing_.push_back(code);
-}
-
-void CodeObjectRegistry::Clear() {
- code_object_registry_already_existing_.clear();
- code_object_registry_newly_allocated_.clear();
-}
-
-void CodeObjectRegistry::Finalize() {
- code_object_registry_already_existing_.shrink_to_fit();
-}
-
-bool CodeObjectRegistry::Contains(Address object) const {
- return (code_object_registry_newly_allocated_.find(object) !=
- code_object_registry_newly_allocated_.end()) ||
- (std::binary_search(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(),
- object));
-}
-
-Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
- Address address) const {
- // Let's first find the object which comes right before address in the vector
- // of already existing code objects.
- Address already_existing_set_ = 0;
- Address newly_allocated_set_ = 0;
- if (!code_object_registry_already_existing_.empty()) {
- auto it =
- std::upper_bound(code_object_registry_already_existing_.begin(),
- code_object_registry_already_existing_.end(), address);
- if (it != code_object_registry_already_existing_.begin()) {
- already_existing_set_ = *(--it);
- }
- }
-
- // Next, let's find the object which comes right before address in the set
- // of newly allocated code objects.
- if (!code_object_registry_newly_allocated_.empty()) {
- auto it = code_object_registry_newly_allocated_.upper_bound(address);
- if (it != code_object_registry_newly_allocated_.begin()) {
- newly_allocated_set_ = *(--it);
- }
- }
-
- // The code objects which contains address has to be in one of the two
- // data structures.
- DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
-
- // The address which is closest to the given address is the code object.
- return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
- : newly_allocated_set_;
-}
-
-namespace {
-
-PageAllocator::Permission DefaultWritableCodePermissions() {
- return FLAG_jitless ? PageAllocator::kReadWrite
- : PageAllocator::kReadWriteExecute;
-}
-
-} // namespace
-
-MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner,
- VirtualMemory reservation) {
- MemoryChunk* chunk = FromAddress(base);
- DCHECK_EQ(base, chunk->address());
- new (chunk) BasicMemoryChunk(size, area_start, area_end);
-
- chunk->heap_ = heap;
- chunk->set_owner(owner);
- chunk->InitializeReservedMemory();
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
- nullptr);
- base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
- nullptr);
- chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
- chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
- chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
- chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
- chunk->page_protection_change_mutex_ = new base::Mutex();
- chunk->write_unprotect_counter_ = 0;
- chunk->mutex_ = new base::Mutex();
- chunk->allocated_bytes_ = chunk->area_size();
- chunk->wasted_memory_ = 0;
- chunk->young_generation_bitmap_ = nullptr;
- chunk->local_tracker_ = nullptr;
-
- chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
- 0;
- chunk->external_backing_store_bytes_
- [ExternalBackingStoreType::kExternalString] = 0;
-
- chunk->categories_ = nullptr;
-
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
- 0);
- if (owner->identity() == RO_SPACE) {
- heap->incremental_marking()
- ->non_atomic_marking_state()
- ->bitmap(chunk)
- ->MarkAllBits();
- chunk->SetFlag(READ_ONLY_HEAP);
- }
-
- if (executable == EXECUTABLE) {
- chunk->SetFlag(IS_EXECUTABLE);
- if (heap->write_protect_code_memory()) {
- chunk->write_unprotect_counter_ =
- heap->code_space_memory_modification_scope_depth();
- } else {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAligned(area_start, page_size));
- size_t area_size = RoundUp(area_end - area_start, page_size);
- CHECK(reservation.SetPermissions(area_start, area_size,
- DefaultWritableCodePermissions()));
- }
- }
-
- chunk->reservation_ = std::move(reservation);
-
- if (owner->identity() == CODE_SPACE) {
- chunk->code_object_registry_ = new CodeObjectRegistry();
- } else {
- chunk->code_object_registry_ = nullptr;
- }
-
- chunk->possibly_empty_buckets_.Initialize();
-
- return chunk;
-}
-
-Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
- Page* page = static_cast<Page*>(chunk);
- DCHECK_EQ(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
- page->area_size());
- // Make sure that categories are initialized before freeing the area.
- page->ResetAllocationStatistics();
- page->SetOldGenerationPageFlags(!is_off_thread_space() &&
- heap()->incremental_marking()->IsMarking());
- page->AllocateFreeListCategories();
- page->InitializeFreeListCategories();
- page->list_node().Initialize();
- page->InitializationMemoryFence();
- return page;
-}
-
-Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
- bool in_to_space = (id() != kFromSpace);
- chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
- Page* page = static_cast<Page*>(chunk);
- page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- page->AllocateLocalTracker();
- page->list_node().Initialize();
-#ifdef ENABLE_MINOR_MC
- if (FLAG_minor_mc) {
- page->AllocateYoungGenerationBitmap();
- heap()
- ->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->ClearLiveness(page);
- }
-#endif // ENABLE_MINOR_MC
- page->InitializationMemoryFence();
- return page;
-}
-
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
@@ -718,169 +130,6 @@ void Page::MergeOldToNewRememberedSets() {
sweeping_slot_set_ = nullptr;
}
-size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
- return size();
- return high_water_mark_;
-}
-
-bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
-
-bool MemoryChunk::InLargeObjectSpace() const {
- return owner_identity() == LO_SPACE;
-}
-
-MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable,
- Space* owner) {
- DCHECK_LE(commit_area_size, reserve_area_size);
-
- size_t chunk_size;
- Heap* heap = isolate_->heap();
- Address base = kNullAddress;
- VirtualMemory reservation;
- Address area_start = kNullAddress;
- Address area_end = kNullAddress;
- void* address_hint =
- AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
-
- //
- // MemoryChunk layout:
- //
- // Executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- base + CodePageGuardStartOffset
- // | Guard |
- // +----------------------------+<- area_start_
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- aligned at OS page boundary
- // | Guard |
- // +----------------------------+<- base + chunk_size
- //
- // Non-executable
- // +----------------------------+<- base aligned with MemoryChunk::kAlignment
- // | Header |
- // +----------------------------+<- area_start_ (base + area_start_)
- // | Area |
- // +----------------------------+<- area_end_ (area_start + commit_area_size)
- // | Committed but not used |
- // +----------------------------+<- aligned at OS page boundary
- // | Reserved but not committed |
- // +----------------------------+<- base + chunk_size
- //
-
- if (executable == EXECUTABLE) {
- chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
- reserve_area_size +
- MemoryChunkLayout::CodePageGuardSize(),
- GetCommitPageSize());
-
- // Size of header (not executable) plus area (executable).
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
- if (base == kNullAddress) return nullptr;
- // Update executable memory size.
- size_executable_ += reservation.size();
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
- ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
- commit_area_size, kZapValue);
- }
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
- area_end = area_start + commit_area_size;
- } else {
- chunk_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
- GetCommitPageSize());
- size_t commit_size = ::RoundUp(
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- GetCommitPageSize());
- base =
- AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
- executable, address_hint, &reservation);
-
- if (base == kNullAddress) return nullptr;
-
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(
- base,
- MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
- kZapValue);
- }
-
- area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
- area_end = area_start + commit_area_size;
- }
-
- // Use chunk_size for statistics because we assume that treat reserved but
- // not-yet committed memory regions of chunks as allocated.
- LOG(isolate_,
- NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
-
- // We cannot use the last chunk in the address space because we would
- // overflow when comparing top and limit if this chunk is used for a
- // linear allocation area.
- if ((base + chunk_size) == 0u) {
- CHECK(!last_chunk_.IsReserved());
- last_chunk_ = std::move(reservation);
- UncommitMemory(&last_chunk_);
- size_ -= chunk_size;
- if (executable == EXECUTABLE) {
- size_executable_ -= chunk_size;
- }
- CHECK(last_chunk_.IsReserved());
- return AllocateChunk(reserve_area_size, commit_area_size, executable,
- owner);
- }
-
- MemoryChunk* chunk =
- MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
- executable, owner, std::move(reservation));
-
- if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
- return chunk;
-}
-
-void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
- if (is_marking) {
- SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::INCREMENTAL_MARKING);
- } else {
- ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
- }
-}
-
-void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
- SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
- if (is_marking) {
- SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- SetFlag(MemoryChunk::INCREMENTAL_MARKING);
- } else {
- ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
- }
-}
-
-void Page::ResetAllocationStatistics() {
- allocated_bytes_ = area_size();
- wasted_memory_ = 0;
-}
-
void Page::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(this);
@@ -972,6 +221,19 @@ void Page::CreateBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
}
+void Page::CreateBlackAreaBackground(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_LT(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ IncrementalMarking::AtomicMarkingState* marking_state =
+ heap()->incremental_marking()->atomic_marking_state();
+ marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ heap()->incremental_marking()->IncrementLiveBytesBackground(
+ this, static_cast<intptr_t>(end - start));
+}
+
void Page::DestroyBlackArea(Address start, Address end) {
DCHECK(heap()->incremental_marking()->black_allocation());
DCHECK_EQ(Page::FromAddress(start), this);
@@ -984,441 +246,17 @@ void Page::DestroyBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
}
-void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
- size_t bytes_to_free,
- Address new_area_end) {
- VirtualMemory* reservation = chunk->reserved_memory();
- DCHECK(reservation->IsReserved());
- chunk->set_size(chunk->size() - bytes_to_free);
- chunk->set_area_end(new_area_end);
- if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- // Add guard page at the end.
- size_t page_size = GetCommitPageSize();
- DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
- DCHECK_EQ(chunk->address() + chunk->size(),
- chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
- reservation->SetPermissions(chunk->area_end(), page_size,
- PageAllocator::kNoAccess);
- }
- // On e.g. Windows, a reservation may be larger than a page and releasing
- // partially starting at |start_free| will also release the potentially
- // unused part behind the current page.
- const size_t released_bytes = reservation->Release(start_free);
- DCHECK_GE(size_, released_bytes);
- size_ -= released_bytes;
-}
-
-void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
- DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
- VirtualMemory* reservation = chunk->reserved_memory();
- const size_t size =
- reservation->IsReserved() ? reservation->size() : chunk->size();
- DCHECK_GE(size_, static_cast<size_t>(size));
- size_ -= size;
- if (chunk->executable() == EXECUTABLE) {
- DCHECK_GE(size_executable_, size);
- size_executable_ -= size;
- }
-
- if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
- chunk->SetFlag(MemoryChunk::UNREGISTERED);
-}
-
-void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
- DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- UnregisterMemory(chunk);
- isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
- chunk->IsEvacuationCandidate());
- chunk->SetFlag(MemoryChunk::PRE_FREED);
-}
-
-void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
- DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
- DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- chunk->ReleaseAllAllocatedMemory();
-
- VirtualMemory* reservation = chunk->reserved_memory();
- if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
- UncommitMemory(reservation);
- } else {
- if (reservation->IsReserved()) {
- reservation->Free();
- } else {
- // Only read-only pages can have non-initialized reservation object.
- DCHECK_EQ(RO_SPACE, chunk->owner_identity());
- FreeMemory(page_allocator(chunk->executable()), chunk->address(),
- chunk->size());
- }
- }
-}
-
-template <MemoryAllocator::FreeMode mode>
-void MemoryAllocator::Free(MemoryChunk* chunk) {
- switch (mode) {
- case kFull:
- PreFreeMemory(chunk);
- PerformFreeMemory(chunk);
- break;
- case kAlreadyPooled:
- // Pooled pages cannot be touched anymore as their memory is uncommitted.
- // Pooled pages are not-executable.
- FreeMemory(data_page_allocator(), chunk->address(),
- static_cast<size_t>(MemoryChunk::kPageSize));
- break;
- case kPooledAndQueue:
- DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
- DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
- chunk->SetFlag(MemoryChunk::POOLED);
- V8_FALLTHROUGH;
- case kPreFreeAndQueue:
- PreFreeMemory(chunk);
- // The chunks added to this queue will be freed by a concurrent thread.
- unmapper()->AddMemoryChunkSafe(chunk);
- break;
- }
-}
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kFull>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
- MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
-
-template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
- Executability executable) {
- MemoryChunk* chunk = nullptr;
- if (alloc_mode == kPooled) {
- DCHECK_EQ(size, static_cast<size_t>(
- MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- owner->identity())));
- DCHECK_EQ(executable, NOT_EXECUTABLE);
- chunk = AllocatePagePooled(owner);
- }
- if (chunk == nullptr) {
- chunk = AllocateChunk(size, size, executable, owner);
- }
- if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk);
-}
-
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
-LargePage* MemoryAllocator::AllocateLargePage(size_t size,
- LargeObjectSpace* owner,
- Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
- if (chunk == nullptr) return nullptr;
- return LargePage::Initialize(isolate_->heap(), chunk, executable);
-}
-
-template <typename SpaceType>
-MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
- MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
- if (chunk == nullptr) return nullptr;
- const int size = MemoryChunk::kPageSize;
- const Address start = reinterpret_cast<Address>(chunk);
- const Address area_start =
- start +
- MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
- const Address area_end = start + size;
- // Pooled pages are always regular data pages.
- DCHECK_NE(CODE_SPACE, owner->identity());
- VirtualMemory reservation(data_page_allocator(), start, size);
- if (!CommitMemory(&reservation)) return nullptr;
- if (Heap::ShouldZapGarbage()) {
- ZapBlock(start, size, kZapValue);
- }
- MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
- NOT_EXECUTABLE, owner, std::move(reservation));
- size_ += size;
- return chunk;
-}
-
-void MemoryAllocator::ZapBlock(Address start, size_t size,
- uintptr_t zap_value) {
- DCHECK(IsAligned(start, kTaggedSize));
- DCHECK(IsAligned(size, kTaggedSize));
- MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
- size >> kTaggedSizeLog2);
-}
-
-intptr_t MemoryAllocator::GetCommitPageSize() {
- if (FLAG_v8_os_page_size != 0) {
- DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
- return FLAG_v8_os_page_size * KB;
- } else {
- return CommitPageSize();
- }
-}
-
-base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
- size_t size) {
- size_t page_size = MemoryAllocator::GetCommitPageSize();
- if (size < page_size + FreeSpace::kSize) {
- return base::AddressRegion(0, 0);
- }
- Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
- Address discardable_end = RoundDown(addr + size, page_size);
- if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
- return base::AddressRegion(discardable_start,
- discardable_end - discardable_start);
-}
-
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
- size_t commit_size,
- size_t reserved_size) {
- const size_t page_size = GetCommitPageSize();
- // All addresses and sizes must be aligned to the commit page size.
- DCHECK(IsAligned(start, page_size));
- DCHECK_EQ(0, commit_size % page_size);
- DCHECK_EQ(0, reserved_size % page_size);
- const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
- const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
- const size_t code_area_offset =
- MemoryChunkLayout::ObjectStartOffsetInCodePage();
- // reserved_size includes two guard regions, commit_size does not.
- DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
- const Address pre_guard_page = start + pre_guard_offset;
- const Address code_area = start + code_area_offset;
- const Address post_guard_page = start + reserved_size - guard_size;
- // Commit the non-executable header, from start to pre-code guard page.
- if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
- // Create the pre-code guard page, following the header.
- if (vm->SetPermissions(pre_guard_page, page_size,
- PageAllocator::kNoAccess)) {
- // Commit the executable code body.
- if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
- PageAllocator::kReadWrite)) {
- // Create the post-code guard page.
- if (vm->SetPermissions(post_guard_page, page_size,
- PageAllocator::kNoAccess)) {
- UpdateAllocatedSpaceLimits(start, code_area + commit_size);
- return true;
- }
- vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
- }
- }
- vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
- }
- return false;
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryChunk implementation
-
-void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
- if (mutex_ != nullptr) {
- delete mutex_;
- mutex_ = nullptr;
- }
- if (page_protection_change_mutex_ != nullptr) {
- delete page_protection_change_mutex_;
- page_protection_change_mutex_ = nullptr;
- }
- if (code_object_registry_ != nullptr) {
- delete code_object_registry_;
- code_object_registry_ = nullptr;
- }
-
- possibly_empty_buckets_.Release();
- ReleaseSlotSet<OLD_TO_NEW>();
- ReleaseSweepingSlotSet();
- ReleaseSlotSet<OLD_TO_OLD>();
- ReleaseTypedSlotSet<OLD_TO_NEW>();
- ReleaseTypedSlotSet<OLD_TO_OLD>();
- ReleaseInvalidatedSlots<OLD_TO_NEW>();
- ReleaseInvalidatedSlots<OLD_TO_OLD>();
-
- if (local_tracker_ != nullptr) ReleaseLocalTracker();
- if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
-
- if (!IsLargePage()) {
- Page* page = static_cast<Page*>(this);
- page->ReleaseFreeListCategories();
- }
-}
-
-void MemoryChunk::ReleaseAllAllocatedMemory() {
- ReleaseAllocatedMemoryNeededForWritableChunk();
- if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
-}
-
-template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
-template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-SlotSet* MemoryChunk::AllocateSlotSet() {
- return AllocateSlotSet(&slot_set_[type]);
-}
-
-SlotSet* MemoryChunk::AllocateSweepingSlotSet() {
- return AllocateSlotSet(&sweeping_slot_set_);
-}
-
-SlotSet* MemoryChunk::AllocateSlotSet(SlotSet** slot_set) {
- SlotSet* new_slot_set = SlotSet::Allocate(buckets());
- SlotSet* old_slot_set = base::AsAtomicPointer::AcquireRelease_CompareAndSwap(
- slot_set, nullptr, new_slot_set);
- if (old_slot_set != nullptr) {
- SlotSet::Delete(new_slot_set, buckets());
- new_slot_set = old_slot_set;
- }
- DCHECK(new_slot_set);
- return new_slot_set;
-}
-
-template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseSlotSet() {
- ReleaseSlotSet(&slot_set_[type]);
-}
-
-void MemoryChunk::ReleaseSweepingSlotSet() {
- ReleaseSlotSet(&sweeping_slot_set_);
-}
-
-void MemoryChunk::ReleaseSlotSet(SlotSet** slot_set) {
- if (*slot_set) {
- SlotSet::Delete(*slot_set, buckets());
- *slot_set = nullptr;
- }
-}
-
-template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
-template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
- TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
- TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
- &typed_slot_set_[type], nullptr, typed_slot_set);
- if (old_value != nullptr) {
- delete typed_slot_set;
- typed_slot_set = old_value;
- }
- DCHECK(typed_slot_set);
- return typed_slot_set;
-}
-
-template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseTypedSlotSet() {
- TypedSlotSet* typed_slot_set = typed_slot_set_[type];
- if (typed_slot_set) {
- typed_slot_set_[type] = nullptr;
- delete typed_slot_set;
- }
-}
-
-template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
-template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
- DCHECK_NULL(invalidated_slots_[type]);
- invalidated_slots_[type] = new InvalidatedSlots();
- return invalidated_slots_[type];
-}
-
-template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
-template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
-
-template <RememberedSetType type>
-void MemoryChunk::ReleaseInvalidatedSlots() {
- if (invalidated_slots_[type]) {
- delete invalidated_slots_[type];
- invalidated_slots_[type] = nullptr;
- }
-}
-
-template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object);
-template V8_EXPORT_PRIVATE void
-MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object);
-
-template <RememberedSetType type>
-void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object) {
- bool skip_slot_recording;
-
- if (type == OLD_TO_NEW) {
- skip_slot_recording = InYoungGeneration();
- } else {
- skip_slot_recording = ShouldSkipEvacuationSlotRecording();
- }
-
- if (skip_slot_recording) {
- return;
- }
-
- if (invalidated_slots<type>() == nullptr) {
- AllocateInvalidatedSlots<type>();
- }
-
- invalidated_slots<type>()->insert(object);
-}
-
-void MemoryChunk::InvalidateRecordedSlots(HeapObject object) {
- if (V8_DISABLE_WRITE_BARRIERS_BOOL) return;
- if (heap()->incremental_marking()->IsCompacting()) {
- // We cannot check slot_set_[OLD_TO_OLD] here, since the
- // concurrent markers might insert slots concurrently.
- RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object);
- }
-
- if (!FLAG_always_promote_young_mc || slot_set_[OLD_TO_NEW] != nullptr)
- RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(object);
-}
-
-template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
- HeapObject object);
-template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
- HeapObject object);
-
-template <RememberedSetType type>
-bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
- if (invalidated_slots<type>() == nullptr) {
- return false;
- }
- return invalidated_slots<type>()->find(object) !=
- invalidated_slots<type>()->end();
-}
-
-void MemoryChunk::ReleaseLocalTracker() {
- DCHECK_NOT_NULL(local_tracker_);
- delete local_tracker_;
- local_tracker_ = nullptr;
-}
-
-void MemoryChunk::AllocateYoungGenerationBitmap() {
- DCHECK_NULL(young_generation_bitmap_);
- young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
-}
-
-void MemoryChunk::ReleaseYoungGenerationBitmap() {
- DCHECK_NOT_NULL(young_generation_bitmap_);
- free(young_generation_bitmap_);
- young_generation_bitmap_ = nullptr;
+void Page::DestroyBlackAreaBackground(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_LT(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ IncrementalMarking::AtomicMarkingState* marking_state =
+ heap()->incremental_marking()->atomic_marking_state();
+ marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ heap()->incremental_marking()->IncrementLiveBytesBackground(
+ this, -static_cast<intptr_t>(end - start));
}
// -----------------------------------------------------------------------------
@@ -1481,293 +319,6 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
return next_step;
}
-PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
- Executability executable, FreeList* free_list,
- LocalSpaceKind local_space_kind)
- : SpaceWithLinearArea(heap, space, free_list),
- executable_(executable),
- local_space_kind_(local_space_kind) {
- area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
- accounting_stats_.Clear();
-}
-
-void PagedSpace::TearDown() {
- while (!memory_chunk_list_.Empty()) {
- MemoryChunk* chunk = memory_chunk_list_.front();
- memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
- }
- accounting_stats_.Clear();
-}
-
-void PagedSpace::RefillFreeList() {
- // Any PagedSpace might invoke RefillFreeList. We filter all but our old
- // generation spaces out.
- if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
- identity() != MAP_SPACE && identity() != RO_SPACE) {
- return;
- }
- DCHECK_NE(local_space_kind(), LocalSpaceKind::kOffThreadSpace);
- DCHECK_IMPLIES(is_local_space(), is_compaction_space());
- DCHECK(!IsDetached());
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- size_t added = 0;
-
- {
- Page* p = nullptr;
- while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
- // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
- // entries here to make them unavailable for allocations.
- if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
- p->ForAllFreeListCategories([this](FreeListCategory* category) {
- category->Reset(free_list());
- });
- }
-
- // Also merge old-to-new remembered sets if not scavenging because of
- // data races: One thread might iterate remembered set, while another
- // thread merges them.
- if (local_space_kind() != LocalSpaceKind::kCompactionSpaceForScavenge) {
- p->MergeOldToNewRememberedSets();
- }
-
- // Only during compaction pages can actually change ownership. This is
- // safe because there exists no other competing action on the page links
- // during compaction.
- if (is_compaction_space()) {
- DCHECK_NE(this, p->owner());
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
- base::MutexGuard guard(owner->mutex());
- owner->RefineAllocatedBytesAfterSweeping(p);
- owner->RemovePage(p);
- added += AddPage(p);
- } else {
- base::MutexGuard guard(mutex());
- DCHECK_EQ(this, p->owner());
- RefineAllocatedBytesAfterSweeping(p);
- added += RelinkFreeListCategories(p);
- }
- added += p->wasted_memory();
- if (is_compaction_space() && (added > kCompactionMemoryWanted)) break;
- }
- }
-}
-
-void OffThreadSpace::RefillFreeList() {
- // We should never try to refill the free list in off-thread space, because
- // we know it will always be fully linear.
- UNREACHABLE();
-}
-
-void PagedSpace::MergeLocalSpace(LocalSpace* other) {
- base::MutexGuard guard(mutex());
-
- DCHECK(identity() == other->identity());
-
- // Unmerged fields:
- // area_size_
- other->FreeLinearAllocationArea();
-
- for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
- i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
- allocations_origins_[i] += other->allocations_origins_[i];
- }
-
- // The linear allocation area of {other} should be destroyed now.
- DCHECK_EQ(kNullAddress, other->top());
- DCHECK_EQ(kNullAddress, other->limit());
-
- bool merging_from_off_thread = other->is_off_thread_space();
-
- // Move over pages.
- for (auto it = other->begin(); it != other->end();) {
- Page* p = *(it++);
-
- if (merging_from_off_thread) {
- DCHECK_NULL(p->sweeping_slot_set());
- p->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- if (heap()->incremental_marking()->black_allocation()) {
- p->CreateBlackArea(p->area_start(), p->HighWaterMark());
- }
- } else {
- p->MergeOldToNewRememberedSets();
- }
-
- // Relinking requires the category to be unlinked.
- other->RemovePage(p);
- AddPage(p);
- // These code pages were allocated by the CompactionSpace.
- if (identity() == CODE_SPACE) heap()->isolate()->AddCodeMemoryChunk(p);
- DCHECK_IMPLIES(
- !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
- p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
-
- // TODO(leszeks): Here we should allocation step, but:
- // 1. Allocation groups are currently not handled properly by the sampling
- // allocation profiler, and
- // 2. Observers might try to take the space lock, which isn't reentrant.
- // We'll have to come up with a better solution for allocation stepping
- // before shipping, which will likely be using LocalHeap.
- }
-
- DCHECK_EQ(0u, other->Size());
- DCHECK_EQ(0u, other->Capacity());
-}
-
-size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- size_t size = 0;
- for (Page* page : *this) {
- size += page->CommittedPhysicalMemory();
- }
- return size;
-}
-
-bool PagedSpace::ContainsSlow(Address addr) {
- Page* p = Page::FromAddress(addr);
- for (Page* page : *this) {
- if (page == p) return true;
- }
- return false;
-}
-
-void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
- CHECK(page->SweepingDone());
- auto marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- // The live_byte on the page was accounted in the space allocated
- // bytes counter. After sweeping allocated_bytes() contains the
- // accurate live byte count on the page.
- size_t old_counter = marking_state->live_bytes(page);
- size_t new_counter = page->allocated_bytes();
- DCHECK_GE(old_counter, new_counter);
- if (old_counter > new_counter) {
- DecreaseAllocatedBytes(old_counter - new_counter, page);
- // Give the heap a chance to adjust counters in response to the
- // more precise and smaller old generation size.
- heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
- }
- marking_state->SetLiveBytes(page, 0);
-}
-
-Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
- base::MutexGuard guard(mutex());
- Page* page = free_list()->GetPageForSize(size_in_bytes);
- if (!page) return nullptr;
- RemovePage(page);
- return page;
-}
-
-size_t PagedSpace::AddPage(Page* page) {
- CHECK(page->SweepingDone());
- page->set_owner(this);
- memory_chunk_list_.PushBack(page);
- AccountCommitted(page->size());
- IncreaseCapacity(page->area_size());
- IncreaseAllocatedBytes(page->allocated_bytes(), page);
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
- return RelinkFreeListCategories(page);
-}
-
-void PagedSpace::RemovePage(Page* page) {
- CHECK(page->SweepingDone());
- memory_chunk_list_.Remove(page);
- UnlinkFreeListCategories(page);
- DecreaseAllocatedBytes(page->allocated_bytes(), page);
- DecreaseCapacity(page->area_size());
- AccountUncommitted(page->size());
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
- size_t unused = page->ShrinkToHighWaterMark();
- accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
- AccountUncommitted(unused);
- return unused;
-}
-
-void PagedSpace::ResetFreeList() {
- for (Page* page : *this) {
- free_list_->EvictFreeListItems(page);
- }
- DCHECK(free_list_->IsEmpty());
-}
-
-void PagedSpace::ShrinkImmortalImmovablePages() {
- DCHECK(!heap()->deserialization_complete());
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- FreeLinearAllocationArea();
- ResetFreeList();
- for (Page* page : *this) {
- DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
- ShrinkPageToHighWaterMark(page);
- }
-}
-
-bool PagedSpace::Expand() {
- // Always lock against the main space as we can only adjust capacity and
- // pages concurrently for the main paged space.
- base::MutexGuard guard(heap()->paged_space(identity())->mutex());
-
- const int size = AreaSize();
-
- if (!heap()->CanExpandOldGeneration(size)) return false;
-
- Page* page =
- heap()->memory_allocator()->AllocatePage(size, this, executable());
- if (page == nullptr) return false;
- // Pages created during bootstrapping may contain immortal immovable objects.
- if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
- AddPage(page);
- // If this is a non-compaction code space, this is a previously unseen page.
- if (identity() == CODE_SPACE && !is_compaction_space()) {
- heap()->isolate()->AddCodeMemoryChunk(page);
- }
- Free(page->area_start(), page->area_size(),
- SpaceAccountingMode::kSpaceAccounted);
- heap()->NotifyOldGenerationExpansion();
- return true;
-}
-
-int PagedSpace::CountTotalPages() {
- int count = 0;
- for (Page* page : *this) {
- count++;
- USE(page);
- }
- return count;
-}
-
-void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
- SetTopAndLimit(top, limit);
- if (top != kNullAddress && top != limit && !is_off_thread_space() &&
- heap()->incremental_marking()->black_allocation()) {
- Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
- }
-}
-
-void PagedSpace::DecreaseLimit(Address new_limit) {
- Address old_limit = limit();
- DCHECK_LE(top(), new_limit);
- DCHECK_GE(old_limit, new_limit);
- if (new_limit != old_limit) {
- SetTopAndLimit(top(), new_limit);
- Free(new_limit, old_limit - new_limit,
- SpaceAccountingMode::kSpaceAccounted);
- if (heap()->incremental_marking()->black_allocation()) {
- Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
- old_limit);
- }
- }
-}
-
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
size_t min_size) {
DCHECK_GE(end - start, min_size);
@@ -1802,560 +353,6 @@ void SpaceWithLinearArea::PrintAllocationsOrigins() {
allocations_origins_[2]);
}
-void PagedSpace::MarkLinearAllocationAreaBlack() {
- DCHECK(heap()->incremental_marking()->black_allocation());
- Address current_top = top();
- Address current_limit = limit();
- if (current_top != kNullAddress && current_top != current_limit) {
- Page::FromAllocationAreaAddress(current_top)
- ->CreateBlackArea(current_top, current_limit);
- }
-}
-
-void PagedSpace::UnmarkLinearAllocationArea() {
- Address current_top = top();
- Address current_limit = limit();
- if (current_top != kNullAddress && current_top != current_limit) {
- Page::FromAllocationAreaAddress(current_top)
- ->DestroyBlackArea(current_top, current_limit);
- }
-}
-
-void PagedSpace::FreeLinearAllocationArea() {
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- Address current_top = top();
- Address current_limit = limit();
- if (current_top == kNullAddress) {
- DCHECK_EQ(kNullAddress, current_limit);
- return;
- }
-
- if (!is_off_thread_space() &&
- heap()->incremental_marking()->black_allocation()) {
- Page* page = Page::FromAllocationAreaAddress(current_top);
-
- // Clear the bits in the unused black area.
- if (current_top != current_limit) {
- IncrementalMarking::MarkingState* marking_state =
- heap()->incremental_marking()->marking_state();
- marking_state->bitmap(page)->ClearRange(
- page->AddressToMarkbitIndex(current_top),
- page->AddressToMarkbitIndex(current_limit));
- marking_state->IncrementLiveBytes(
- page, -static_cast<int>(current_limit - current_top));
- }
- }
-
- InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
- SetTopAndLimit(kNullAddress, kNullAddress);
- DCHECK_GE(current_limit, current_top);
-
- // The code page of the linear allocation area needs to be unprotected
- // because we are going to write a filler into that memory area below.
- if (identity() == CODE_SPACE) {
- heap()->UnprotectAndRegisterMemoryChunk(
- MemoryChunk::FromAddress(current_top));
- }
- Free(current_top, current_limit - current_top,
- SpaceAccountingMode::kSpaceAccounted);
-}
-
-void PagedSpace::ReleasePage(Page* page) {
- DCHECK_EQ(
- 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
- page));
- DCHECK_EQ(page->owner(), this);
-
- free_list_->EvictFreeListItems(page);
-
- if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
- DCHECK(!top_on_previous_step_);
- allocation_info_.Reset(kNullAddress, kNullAddress);
- }
-
- heap()->isolate()->RemoveCodeMemoryChunk(page);
-
- AccountUncommitted(page->size());
- accounting_stats_.DecreaseCapacity(page->area_size());
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
-}
-
-void PagedSpace::SetReadable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadable();
- }
-}
-
-void PagedSpace::SetReadAndExecutable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndExecutable();
- }
-}
-
-void PagedSpace::SetReadAndWritable() {
- DCHECK(identity() == CODE_SPACE);
- for (Page* page : *this) {
- CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
- }
-}
-
-std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator(Heap* heap) {
- return std::unique_ptr<ObjectIterator>(
- new PagedSpaceObjectIterator(heap, this));
-}
-
-bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
- size_t size_in_bytes, AllocationOrigin origin) {
- DCHECK(IsAligned(size_in_bytes, kTaggedSize));
- DCHECK_LE(top(), limit());
-#ifdef DEBUG
- if (top() != limit()) {
- DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
- }
-#endif
- // Don't free list allocate if there is linear space available.
- DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
-
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap. This also puts it back in the free list
- // if it is big enough.
- FreeLinearAllocationArea();
-
- if (!is_local_space()) {
- heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- heap()->GCFlagsForIncrementalMarking(),
- kGCCallbackScheduleIdleGarbageCollection);
- }
-
- size_t new_node_size = 0;
- FreeSpace new_node =
- free_list_->Allocate(size_in_bytes, &new_node_size, origin);
- if (new_node.is_null()) return false;
- DCHECK_GE(new_node_size, size_in_bytes);
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- Page* page = Page::FromHeapObject(new_node);
- IncreaseAllocatedBytes(new_node_size, page);
-
- Address start = new_node.address();
- Address end = new_node.address() + new_node_size;
- Address limit = ComputeLimit(start, end, size_in_bytes);
- DCHECK_LE(limit, end);
- DCHECK_LE(size_in_bytes, limit - start);
- if (limit != end) {
- if (identity() == CODE_SPACE) {
- heap()->UnprotectAndRegisterMemoryChunk(page);
- }
- Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
- }
- SetLinearAllocationArea(start, limit);
-
- return true;
-}
-
-base::Optional<std::pair<Address, size_t>>
-PagedSpace::SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK(!is_local_space() && identity() == OLD_SPACE);
- DCHECK_EQ(origin, AllocationOrigin::kRuntime);
- base::MutexGuard lock(&allocation_mutex_);
-
- auto result = TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
- RefillFreeList();
-
- // Retry the free list allocation.
- auto result = TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- if (result) return result;
-
- Sweeper::FreeSpaceMayContainInvalidatedSlots
- invalidated_slots_in_free_space =
- Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- const int kMaxPagesToSweep = 1;
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), static_cast<int>(min_size_in_bytes), kMaxPagesToSweep,
- invalidated_slots_in_free_space);
- RefillFreeList();
- if (static_cast<size_t>(max_freed) >= min_size_in_bytes)
- return TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- }
-
- if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
- Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (min_size_in_bytes <= free_list_->Available()));
- return TryAllocationFromFreeListBackground(
- min_size_in_bytes, max_size_in_bytes, alignment, origin);
- }
-
- // TODO(dinfuehr): Complete sweeping here and try allocation again.
-
- return {};
-}
-
-base::Optional<std::pair<Address, size_t>>
-PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin) {
- DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
- DCHECK_EQ(identity(), OLD_SPACE);
-
- size_t new_node_size = 0;
- FreeSpace new_node =
- free_list_->Allocate(min_size_in_bytes, &new_node_size, origin);
- if (new_node.is_null()) return {};
- DCHECK_GE(new_node_size, min_size_in_bytes);
-
- // The old-space-step might have finished sweeping and restarted marking.
- // Verify that it did not turn the page of the new node into an evacuation
- // candidate.
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
- // Memory in the linear allocation area is counted as allocated. We may free
- // a little of this again immediately - see below.
- Page* page = Page::FromHeapObject(new_node);
- IncreaseAllocatedBytes(new_node_size, page);
-
- heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
-
- size_t used_size_in_bytes = Min(new_node_size, max_size_in_bytes);
-
- Address start = new_node.address();
- Address end = new_node.address() + new_node_size;
- Address limit = new_node.address() + used_size_in_bytes;
- DCHECK_LE(limit, end);
- DCHECK_LE(min_size_in_bytes, limit - start);
- if (limit != end) {
- Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
- }
-
- return std::make_pair(start, used_size_in_bytes);
-}
-
-#ifdef DEBUG
-void PagedSpace::Print() {}
-#endif
-
-#ifdef VERIFY_HEAP
-void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
- bool allocation_pointer_found_in_space =
- (allocation_info_.top() == allocation_info_.limit());
- size_t external_space_bytes[kNumTypes];
- size_t external_page_bytes[kNumTypes];
-
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- for (Page* page : *this) {
-#ifdef V8_SHARED_RO_HEAP
- if (identity() == RO_SPACE) {
- CHECK_NULL(page->owner());
- } else {
- CHECK_EQ(page->owner(), this);
- }
-#else
- CHECK_EQ(page->owner(), this);
-#endif
-
- for (int i = 0; i < kNumTypes; i++) {
- external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
- allocation_pointer_found_in_space = true;
- }
- CHECK(page->SweepingDone());
- PagedSpaceObjectIterator it(isolate->heap(), this, page);
- Address end_of_previous_object = page->area_start();
- Address top = page->area_end();
-
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- CHECK(end_of_previous_object <= object.address());
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space.
- Map map = object.map();
- CHECK(map.IsMap());
- CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->map_space()->Contains(map));
-
- // Perform space-specific object verification.
- VerifyObject(object);
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- if (!FLAG_verify_heap_skip_remembered_set) {
- isolate->heap()->VerifyRememberedSetFor(object);
- }
-
- // All the interior pointers should be contained in the heap.
- int size = object.Size();
- object.IterateBody(map, size, visitor);
- CHECK(object.address() + size <= top);
- end_of_previous_object = object.address() + size;
-
- if (object.IsExternalString()) {
- ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object.IsJSArrayBuffer()) {
- JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
- if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size =
- ArrayBufferTracker::Lookup(isolate->heap(), array_buffer)
- ->PerIsolateAccountingLength();
- external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
- }
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
- external_space_bytes[t] += external_page_bytes[t];
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- i == ExternalBackingStoreType::kArrayBuffer)
- continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
- CHECK(allocation_pointer_found_in_space);
-
- if (identity() == OLD_SPACE && V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
-#ifdef DEBUG
- VerifyCountersAfterSweeping(isolate->heap());
-#endif
-}
-
-void PagedSpace::VerifyLiveBytes() {
- DCHECK_NE(identity(), RO_SPACE);
- IncrementalMarking::MarkingState* marking_state =
- heap()->incremental_marking()->marking_state();
- for (Page* page : *this) {
- CHECK(page->SweepingDone());
- PagedSpaceObjectIterator it(heap(), this, page);
- int black_size = 0;
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- // All the interior pointers should be contained in the heap.
- if (marking_state->IsBlack(object)) {
- black_size += object.Size();
- }
- }
- CHECK_LE(black_size, marking_state->live_bytes(page));
- }
-}
-#endif // VERIFY_HEAP
-
-#ifdef DEBUG
-void PagedSpace::VerifyCountersAfterSweeping(Heap* heap) {
- size_t total_capacity = 0;
- size_t total_allocated = 0;
- for (Page* page : *this) {
- DCHECK(page->SweepingDone());
- total_capacity += page->area_size();
- PagedSpaceObjectIterator it(heap, this, page);
- size_t real_allocated = 0;
- for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- if (!object.IsFreeSpaceOrFiller()) {
- real_allocated += object.Size();
- }
- }
- total_allocated += page->allocated_bytes();
- // The real size can be smaller than the accounted size if array trimming,
- // object slack tracking happened after sweeping.
- DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
- DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
- }
- DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
- DCHECK_EQ(total_allocated, accounting_stats_.Size());
-}
-
-void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
- // We need to refine the counters on pages that are already swept and have
- // not been moved over to the actual space. Otherwise, the AccountingStats
- // are just an over approximation.
- RefillFreeList();
-
- size_t total_capacity = 0;
- size_t total_allocated = 0;
- auto marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (Page* page : *this) {
- size_t page_allocated =
- page->SweepingDone()
- ? page->allocated_bytes()
- : static_cast<size_t>(marking_state->live_bytes(page));
- total_capacity += page->area_size();
- total_allocated += page_allocated;
- DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
- }
- DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
- DCHECK_EQ(total_allocated, accounting_stats_.Size());
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// NewSpace implementation
-
-NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
- size_t initial_semispace_capacity,
- size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
- to_space_(heap, kToSpace),
- from_space_(heap, kFromSpace) {
- DCHECK(initial_semispace_capacity <= max_semispace_capacity);
-
- to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
- from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
- if (!to_space_.Commit()) {
- V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
- }
- DCHECK(!from_space_.is_committed()); // No need to use memory yet.
- ResetLinearAllocationArea();
-}
-
-void NewSpace::TearDown() {
- allocation_info_.Reset(kNullAddress, kNullAddress);
-
- to_space_.TearDown();
- from_space_.TearDown();
-}
-
-void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
-
-
-void NewSpace::Grow() {
- // Double the semispace size but only up to maximum capacity.
- DCHECK(TotalCapacity() < MaximumCapacity());
- size_t new_capacity =
- Min(MaximumCapacity(),
- static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
- if (to_space_.GrowTo(new_capacity)) {
- // Only grow from space if we managed to grow to-space.
- if (!from_space_.GrowTo(new_capacity)) {
- // If we managed to grow to-space but couldn't grow from-space,
- // attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- FATAL("inconsistent state");
- }
- }
- }
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::Shrink() {
- size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
- size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
- if (rounded_new_capacity < TotalCapacity() &&
- to_space_.ShrinkTo(rounded_new_capacity)) {
- // Only shrink from-space if we managed to shrink to-space.
- from_space_.Reset();
- if (!from_space_.ShrinkTo(rounded_new_capacity)) {
- // If we managed to shrink to-space but couldn't shrink from
- // space, attempt to grow to-space again.
- if (!to_space_.GrowTo(from_space_.current_capacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- FATAL("inconsistent state");
- }
- }
- }
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-bool NewSpace::Rebalance() {
- // Order here is important to make use of the page pool.
- return to_space_.EnsureCurrentCapacity() &&
- from_space_.EnsureCurrentCapacity();
-}
-
-bool SemiSpace::EnsureCurrentCapacity() {
- if (is_committed()) {
- const int expected_pages =
- static_cast<int>(current_capacity_ / Page::kPageSize);
- MemoryChunk* current_page = first_page();
- int actual_pages = 0;
-
- // First iterate through the pages list until expected pages if so many
- // pages exist.
- while (current_page != nullptr && actual_pages < expected_pages) {
- actual_pages++;
- current_page = current_page->list_node().next();
- }
-
- // Free all overallocated pages which are behind current_page.
- while (current_page) {
- MemoryChunk* next_current = current_page->list_node().next();
- memory_chunk_list_.Remove(current_page);
- // Clear new space flags to avoid this page being treated as a new
- // space page that is potentially being swept.
- current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
- current_page);
- current_page = next_current;
- }
-
- // Add more pages if we have less than expected_pages.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- while (actual_pages < expected_pages) {
- actual_pages++;
- current_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (current_page == nullptr) return false;
- DCHECK_NOT_NULL(current_page);
- memory_chunk_list_.PushBack(current_page);
- marking_state->ClearLiveness(current_page);
- current_page->SetFlags(first_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
- heap()->CreateFillerObjectAt(current_page->area_start(),
- static_cast<int>(current_page->area_size()),
- ClearRecordedSlots::kNo);
- }
- }
- return true;
-}
-
LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
if (IsValid()) {
MakeIterable();
@@ -2400,110 +397,6 @@ LocalAllocationBuffer& LocalAllocationBuffer::operator=(
other.allocation_info_.Reset(kNullAddress, kNullAddress);
return *this;
}
-
-void NewSpace::UpdateLinearAllocationArea() {
- // Make sure there is no unaccounted allocations.
- DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
-
- Address new_top = to_space_.page_low();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(new_top, to_space_.page_high());
- // The order of the following two stores is important.
- // See the corresponding loads in ConcurrentMarking::Run.
- original_limit_.store(limit(), std::memory_order_relaxed);
- original_top_.store(top(), std::memory_order_release);
- StartNextInlineAllocationStep();
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-void NewSpace::ResetLinearAllocationArea() {
- // Do a step to account for memory allocated so far before resetting.
- InlineAllocationStep(top(), top(), kNullAddress, 0);
- to_space_.Reset();
- UpdateLinearAllocationArea();
- // Clear all mark-bits in the to-space.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (Page* p : to_space_) {
- marking_state->ClearLiveness(p);
- // Concurrent marking may have local live bytes for this page.
- heap()->concurrent_marking()->ClearMemoryChunkData(p);
- }
-}
-
-void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
- Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
- allocation_info_.set_limit(new_limit);
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
- Address new_limit = ComputeLimit(top(), limit(), min_size);
- DCHECK_LE(new_limit, limit());
- DecreaseLimit(new_limit);
-}
-
-bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top();
- DCHECK(!OldSpace::IsAtPageStart(top));
-
- // Do a step to account for memory allocated on previous page.
- InlineAllocationStep(top, top, kNullAddress, 0);
-
- if (!to_space_.AdvancePage()) {
- // No more pages left to advance.
- return false;
- }
-
- // Clear remainder of current page.
- Address limit = Page::FromAllocationAreaAddress(top)->area_end();
- int remaining_in_page = static_cast<int>(limit - top);
- heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
- UpdateLinearAllocationArea();
-
- return true;
-}
-
-
-bool NewSpace::AddFreshPageSynchronized() {
- base::MutexGuard guard(&mutex_);
- return AddFreshPage();
-}
-
-
-bool NewSpace::EnsureAllocation(int size_in_bytes,
- AllocationAlignment alignment) {
- Address old_top = allocation_info_.top();
- Address high = to_space_.page_high();
- int filler_size = Heap::GetFillToAlign(old_top, alignment);
- int aligned_size_in_bytes = size_in_bytes + filler_size;
-
- if (old_top + aligned_size_in_bytes > high) {
- // Not enough room in the page, try to allocate a new one.
- if (!AddFreshPage()) {
- return false;
- }
-
- old_top = allocation_info_.top();
- high = to_space_.page_high();
- filler_size = Heap::GetFillToAlign(old_top, alignment);
- }
-
- DCHECK(old_top + aligned_size_in_bytes <= high);
-
- if (allocation_info_.limit() < high) {
- // Either the limit has been lowered because linear allocation was disabled
- // or because incremental marking wants to get a chance to do a step,
- // or because idle scavenge job wants to get a chance to post a task.
- // Set the new limit accordingly.
- Address new_top = old_top + aligned_size_in_bytes;
- Address soon_object = old_top + filler_size;
- InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
- UpdateInlineAllocationLimit(aligned_size_in_bytes);
- }
- return true;
-}
-
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
if (heap()->allocation_step_in_progress()) {
// If we are mid-way through an existing step, don't start a new one.
@@ -2570,1043 +463,6 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
}
}
-std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) {
- return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
-}
-
-#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceObjectIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify(Isolate* isolate) {
- // The allocation pointer should be in the space or at the very end.
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
- // There should be objects packed in from the low address up to the
- // allocation pointer.
- Address current = to_space_.first_page()->area_start();
- CHECK_EQ(current, to_space_.space_start());
-
- size_t external_space_bytes[kNumTypes];
- for (int i = 0; i < kNumTypes; i++) {
- external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- while (current != top()) {
- if (!Page::IsAlignedToPageSize(current)) {
- // The allocation pointer should not be in the middle of an object.
- CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
- current < top());
-
- HeapObject object = HeapObject::FromAddress(current);
-
- // The first word should be a map, and we expect all map pointers to
- // be in map space or read-only space.
- Map map = object.map();
- CHECK(map.IsMap());
- CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
-
- // The object should not be code or a map.
- CHECK(!object.IsMap());
- CHECK(!object.IsAbstractCode());
-
- // The object itself should look OK.
- object.ObjectVerify(isolate);
-
- // All the interior pointers should be contained in the heap.
- VerifyPointersVisitor visitor(heap());
- int size = object.Size();
- object.IterateBody(map, size, &visitor);
-
- if (object.IsExternalString()) {
- ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string.ExternalPayloadSize();
- external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object.IsJSArrayBuffer()) {
- JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
- if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = ArrayBufferTracker::Lookup(heap(), array_buffer)
- ->PerIsolateAccountingLength();
- external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
- }
- }
-
- current += size;
- } else {
- // At end of page, switch to next page.
- Page* page = Page::FromAllocationAreaAddress(current)->next_page();
- current = page->area_start();
- }
- }
-
- for (int i = 0; i < kNumTypes; i++) {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- i == ExternalBackingStoreType::kArrayBuffer)
- continue;
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
- }
-
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL) {
- size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
- CHECK_EQ(bytes,
- ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
- }
-
- // Check semi-spaces.
- CHECK_EQ(from_space_.id(), kFromSpace);
- CHECK_EQ(to_space_.id(), kToSpace);
- from_space_.Verify();
- to_space_.Verify();
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpace implementation
-
-void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
- DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
- minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
- current_capacity_ = minimum_capacity_;
- maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- committed_ = false;
-}
-
-
-void SemiSpace::TearDown() {
- // Properly uncommit memory to keep the allocator counters in sync.
- if (is_committed()) {
- Uncommit();
- }
- current_capacity_ = maximum_capacity_ = 0;
-}
-
-
-bool SemiSpace::Commit() {
- DCHECK(!is_committed());
- const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
- for (int pages_added = 0; pages_added < num_pages; pages_added++) {
- // Pages in the new spaces can be moved to the old space by the full
- // collector. Therefore, they must be initialized with the same FreeList as
- // old pages.
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (new_page == nullptr) {
- if (pages_added) RewindPages(pages_added);
- return false;
- }
- memory_chunk_list_.PushBack(new_page);
- }
- Reset();
- AccountCommitted(current_capacity_);
- if (age_mark_ == kNullAddress) {
- age_mark_ = first_page()->area_start();
- }
- committed_ = true;
- return true;
-}
-
-
-bool SemiSpace::Uncommit() {
- DCHECK(is_committed());
- while (!memory_chunk_list_.Empty()) {
- MemoryChunk* chunk = memory_chunk_list_.front();
- memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
- }
- current_page_ = nullptr;
- AccountUncommitted(current_capacity_);
- committed_ = false;
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
- return true;
-}
-
-
-size_t SemiSpace::CommittedPhysicalMemory() {
- if (!is_committed()) return 0;
- size_t size = 0;
- for (Page* p : *this) {
- size += p->CommittedPhysicalMemory();
- }
- return size;
-}
-
-bool SemiSpace::GrowTo(size_t new_capacity) {
- if (!is_committed()) {
- if (!Commit()) return false;
- }
- DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
- DCHECK_LE(new_capacity, maximum_capacity_);
- DCHECK_GT(new_capacity, current_capacity_);
- const size_t delta = new_capacity - current_capacity_;
- DCHECK(IsAligned(delta, AllocatePageSize()));
- const int delta_pages = static_cast<int>(delta / Page::kPageSize);
- DCHECK(last_page());
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
- Page* new_page =
- heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
- NOT_EXECUTABLE);
- if (new_page == nullptr) {
- if (pages_added) RewindPages(pages_added);
- return false;
- }
- memory_chunk_list_.PushBack(new_page);
- marking_state->ClearLiveness(new_page);
- // Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
- }
- AccountCommitted(delta);
- current_capacity_ = new_capacity;
- return true;
-}
-
-void SemiSpace::RewindPages(int num_pages) {
- DCHECK_GT(num_pages, 0);
- DCHECK(last_page());
- while (num_pages > 0) {
- MemoryChunk* last = last_page();
- memory_chunk_list_.Remove(last);
- heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
- num_pages--;
- }
-}
-
-bool SemiSpace::ShrinkTo(size_t new_capacity) {
- DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
- DCHECK_GE(new_capacity, minimum_capacity_);
- DCHECK_LT(new_capacity, current_capacity_);
- if (is_committed()) {
- const size_t delta = current_capacity_ - new_capacity;
- DCHECK(IsAligned(delta, Page::kPageSize));
- int delta_pages = static_cast<int>(delta / Page::kPageSize);
- RewindPages(delta_pages);
- AccountUncommitted(delta);
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
- }
- current_capacity_ = new_capacity;
- return true;
-}
-
-void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
- for (Page* page : *this) {
- page->set_owner(this);
- page->SetFlags(flags, mask);
- if (id_ == kToSpace) {
- page->ClearFlag(MemoryChunk::FROM_PAGE);
- page->SetFlag(MemoryChunk::TO_PAGE);
- page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
- page, 0);
- } else {
- page->SetFlag(MemoryChunk::FROM_PAGE);
- page->ClearFlag(MemoryChunk::TO_PAGE);
- }
- DCHECK(page->InYoungGeneration());
- }
-}
-
-
-void SemiSpace::Reset() {
- DCHECK(first_page());
- DCHECK(last_page());
- current_page_ = first_page();
- pages_used_ = 0;
-}
-
-void SemiSpace::RemovePage(Page* page) {
- if (current_page_ == page) {
- if (page->prev_page()) {
- current_page_ = page->prev_page();
- }
- }
- memory_chunk_list_.Remove(page);
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-void SemiSpace::PrependPage(Page* page) {
- page->SetFlags(current_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
- page->set_owner(this);
- memory_chunk_list_.PushFront(page);
- pages_used_++;
- for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
- }
-}
-
-void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
- // We won't be swapping semispaces without data in them.
- DCHECK(from->first_page());
- DCHECK(to->first_page());
-
- intptr_t saved_to_space_flags = to->current_page()->GetFlags();
-
- // We swap all properties but id_.
- std::swap(from->current_capacity_, to->current_capacity_);
- std::swap(from->maximum_capacity_, to->maximum_capacity_);
- std::swap(from->minimum_capacity_, to->minimum_capacity_);
- std::swap(from->age_mark_, to->age_mark_);
- std::swap(from->committed_, to->committed_);
- std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
- std::swap(from->current_page_, to->current_page_);
- std::swap(from->external_backing_store_bytes_,
- to->external_backing_store_bytes_);
-
- to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
- from->FixPagesFlags(0, 0);
-}
-
-void SemiSpace::set_age_mark(Address mark) {
- DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
- age_mark_ = mark;
- // Mark all pages up to the one containing mark.
- for (Page* p : PageRange(space_start(), mark)) {
- p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
- }
-}
-
-std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) {
- // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
- UNREACHABLE();
-}
-
-#ifdef DEBUG
-void SemiSpace::Print() {}
-#endif
-
-#ifdef VERIFY_HEAP
-void SemiSpace::Verify() {
- bool is_from_space = (id_ == kFromSpace);
- size_t external_backing_store_bytes[kNumTypes];
-
- for (int i = 0; i < kNumTypes; i++) {
- external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
- }
-
- for (Page* page : *this) {
- CHECK_EQ(page->owner(), this);
- CHECK(page->InNewSpace());
- CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
- : MemoryChunk::TO_PAGE));
- CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
- : MemoryChunk::FROM_PAGE));
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
- if (!is_from_space) {
- // The pointers-from-here-are-interesting flag isn't updated dynamically
- // on from-space pages, so it might be out of sync with the marking state.
- if (page->heap()->incremental_marking()->IsMarking()) {
- CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- } else {
- CHECK(
- !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
- }
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
- }
-
- CHECK_IMPLIES(page->list_node().prev(),
- page->list_node().prev()->list_node().next() == page);
- }
- for (int i = 0; i < kNumTypes; i++) {
- ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
- CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
- }
-}
-#endif
-
-#ifdef DEBUG
-void SemiSpace::AssertValidRange(Address start, Address end) {
- // Addresses belong to same semi-space
- Page* page = Page::FromAllocationAreaAddress(start);
- Page* end_page = Page::FromAllocationAreaAddress(end);
- SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
- DCHECK_EQ(space, end_page->owner());
- // Start address is before end address, either on same page,
- // or end address is on a later page in the linked list of
- // semi-space pages.
- if (page == end_page) {
- DCHECK_LE(start, end);
- } else {
- while (page != end_page) {
- page = page->next_page();
- }
- DCHECK(page);
- }
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpaceObjectIterator implementation.
-
-SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
- Initialize(space->first_allocatable_address(), space->top());
-}
-
-void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
- SemiSpace::AssertValidRange(start, end);
- current_ = start;
- limit_ = end;
-}
-
-size_t NewSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- size_t size = to_space_.CommittedPhysicalMemory();
- if (from_space_.is_committed()) {
- size += from_space_.CommittedPhysicalMemory();
- }
- return size;
-}
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces implementation
-
-void FreeListCategory::Reset(FreeList* owner) {
- if (is_linked(owner) && !top().is_null()) {
- owner->DecreaseAvailableBytes(available_);
- }
- set_top(FreeSpace());
- set_prev(nullptr);
- set_next(nullptr);
- available_ = 0;
-}
-
-FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
- size_t* node_size) {
- FreeSpace node = top();
- DCHECK(!node.is_null());
- DCHECK(Page::FromHeapObject(node)->CanAllocate());
- if (static_cast<size_t>(node.Size()) < minimum_size) {
- *node_size = 0;
- return FreeSpace();
- }
- set_top(node.next());
- *node_size = node.Size();
- UpdateCountersAfterAllocation(*node_size);
- return node;
-}
-
-FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
- size_t* node_size) {
- FreeSpace prev_non_evac_node;
- for (FreeSpace cur_node = top(); !cur_node.is_null();
- cur_node = cur_node.next()) {
- DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
- size_t size = cur_node.size();
- if (size >= minimum_size) {
- DCHECK_GE(available_, size);
- UpdateCountersAfterAllocation(size);
- if (cur_node == top()) {
- set_top(cur_node.next());
- }
- if (!prev_non_evac_node.is_null()) {
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
- if (chunk->owner_identity() == CODE_SPACE) {
- chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
- }
- prev_non_evac_node.set_next(cur_node.next());
- }
- *node_size = size;
- return cur_node;
- }
-
- prev_non_evac_node = cur_node;
- }
- return FreeSpace();
-}
-
-void FreeListCategory::Free(Address start, size_t size_in_bytes, FreeMode mode,
- FreeList* owner) {
- FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
- free_space.set_next(top());
- set_top(free_space);
- available_ += size_in_bytes;
- if (mode == kLinkCategory) {
- if (is_linked(owner)) {
- owner->IncreaseAvailableBytes(size_in_bytes);
- } else {
- owner->AddCategory(this);
- }
- }
-}
-
-void FreeListCategory::RepairFreeList(Heap* heap) {
- Map free_space_map = ReadOnlyRoots(heap).free_space_map();
- FreeSpace n = top();
- while (!n.is_null()) {
- ObjectSlot map_slot = n.map_slot();
- if (map_slot.contains_value(kNullAddress)) {
- map_slot.store(free_space_map);
- } else {
- DCHECK(map_slot.contains_value(free_space_map.ptr()));
- }
- n = n.next();
- }
-}
-
-void FreeListCategory::Relink(FreeList* owner) {
- DCHECK(!is_linked(owner));
- owner->AddCategory(this);
-}
-
-// ------------------------------------------------
-// Generic FreeList methods (alloc/free related)
-
-FreeList* FreeList::CreateFreeList() {
- switch (FLAG_gc_freelist_strategy) {
- case 0:
- return new FreeListLegacy();
- case 1:
- return new FreeListFastAlloc();
- case 2:
- return new FreeListMany();
- case 3:
- return new FreeListManyCached();
- case 4:
- return new FreeListManyCachedFastPath();
- case 5:
- return new FreeListManyCachedOrigin();
- default:
- FATAL("Invalid FreeList strategy");
- }
-}
-
-FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
- size_t minimum_size, size_t* node_size) {
- FreeListCategory* category = categories_[type];
- if (category == nullptr) return FreeSpace();
- FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DecreaseAvailableBytes(*node_size);
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- }
- if (category->is_empty()) {
- RemoveCategory(category);
- }
- return node;
-}
-
-FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
- size_t minimum_size,
- size_t* node_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->SearchForNodeInList(minimum_size, node_size);
- if (!node.is_null()) {
- DecreaseAvailableBytes(*node_size);
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- if (current->is_empty()) {
- RemoveCategory(current);
- }
- return node;
- }
- }
- return node;
-}
-
-size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
- Page* page = Page::FromAddress(start);
- page->DecreaseAllocatedBytes(size_in_bytes);
-
- // Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < min_block_size_) {
- page->add_wasted_memory(size_in_bytes);
- wasted_bytes_ += size_in_bytes;
- return size_in_bytes;
- }
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
- DCHECK_EQ(page->AvailableInFreeList(),
- page->AvailableInFreeListFromAllocatedBytes());
- return 0;
-}
-
-// ------------------------------------------------
-// FreeListLegacy implementation
-
-FreeListLegacy::FreeListLegacy() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kHuge + 1;
- last_category_ = kHuge;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
-
-FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- // First try the allocation fast path: try to allocate the minimum element
- // size of a free list category. This operation is constant time.
- FreeListCategoryType type =
- SelectFastAllocationFreeListCategoryType(size_in_bytes);
- for (int i = type; i < kHuge && node.is_null(); i++) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (node.is_null()) {
- // Next search the huge list for free list nodes. This takes linear time in
- // the number of huge elements.
- node = SearchForNodeInList(kHuge, size_in_bytes, node_size);
- }
-
- if (node.is_null() && type != kHuge) {
- // We didn't find anything in the huge list.
- type = SelectFreeListCategoryType(size_in_bytes);
-
- if (type == kTiniest) {
- // For this tiniest object, the tiny list hasn't been searched yet.
- // Now searching the tiny list.
- node = TryFindNodeIn(kTiny, size_in_bytes, node_size);
- }
-
- if (node.is_null()) {
- // Now search the best fitting free list for a node that has at least the
- // requested size.
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- }
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListFastAlloc implementation
-
-FreeListFastAlloc::FreeListFastAlloc() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kHuge + 1;
- last_category_ = kHuge;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
-
-FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- // Try to allocate the biggest element possible (to make the most of later
- // bump-pointer allocations).
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- for (int i = kHuge; i >= type && node.is_null(); i--) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListMany implementation
-
-constexpr unsigned int FreeListMany::categories_min[kNumberOfCategories];
-
-FreeListMany::FreeListMany() {
- // Initializing base (FreeList) fields
- number_of_categories_ = kNumberOfCategories;
- last_category_ = number_of_categories_ - 1;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-FreeListMany::~FreeListMany() { delete[] categories_; }
-
-size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
- if (maximum_freed < categories_min[0]) {
- return 0;
- }
- for (int cat = kFirstCategory + 1; cat <= last_category_; cat++) {
- if (maximum_freed < categories_min[cat]) {
- return categories_min[cat - 1];
- }
- }
- return maximum_freed;
-}
-
-Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
- FreeListCategoryType minimum_category =
- SelectFreeListCategoryType(size_in_bytes);
- Page* page = nullptr;
- for (int cat = minimum_category + 1; !page && cat <= last_category_; cat++) {
- page = GetPageForCategoryType(cat);
- }
- if (!page) {
- // Might return a page in which |size_in_bytes| will not fit.
- page = GetPageForCategoryType(minimum_category);
- }
- return page;
-}
-
-FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- for (int i = type; i < last_category_ && node.is_null(); i++) {
- node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
- }
-
- if (node.is_null()) {
- // Searching each element of the last category.
- node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
- }
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCached implementation
-
-FreeListManyCached::FreeListManyCached() { ResetCache(); }
-
-void FreeListManyCached::Reset() {
- ResetCache();
- FreeListMany::Reset();
-}
-
-bool FreeListManyCached::AddCategory(FreeListCategory* category) {
- bool was_added = FreeList::AddCategory(category);
-
- // Updating cache
- if (was_added) {
- UpdateCacheAfterAddition(category->type_);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- return was_added;
-}
-
-void FreeListManyCached::RemoveCategory(FreeListCategory* category) {
- FreeList::RemoveCategory(category);
-
- // Updating cache
- int type = category->type_;
- if (categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-}
-
-size_t FreeListManyCached::Free(Address start, size_t size_in_bytes,
- FreeMode mode) {
- Page* page = Page::FromAddress(start);
- page->DecreaseAllocatedBytes(size_in_bytes);
-
- // Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < min_block_size_) {
- page->add_wasted_memory(size_in_bytes);
- wasted_bytes_ += size_in_bytes;
- return size_in_bytes;
- }
-
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
-
- // Updating cache
- if (mode == kLinkCategory) {
- UpdateCacheAfterAddition(type);
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
- }
-
- DCHECK_EQ(page->AvailableInFreeList(),
- page->AvailableInFreeListFromAllocatedBytes());
- return 0;
-}
-
-FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- USE(origin);
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- FreeSpace node;
- FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- type = next_nonempty_category[type];
- for (; type < last_category_; type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
-
- if (node.is_null()) {
- // Searching each element of the last category.
- type = last_category_;
- node = SearchForNodeInList(type, size_in_bytes, node_size);
- }
-
- // Updating cache
- if (!node.is_null() && categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCachedFastPath implementation
-
-FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) {
- USE(origin);
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
- FreeSpace node;
-
- // Fast path part 1: searching the last categories
- FreeListCategoryType first_category =
- SelectFastAllocationFreeListCategoryType(size_in_bytes);
- FreeListCategoryType type = first_category;
- for (type = next_nonempty_category[type]; type <= last_category_;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
-
- // Fast path part 2: searching the medium categories for tiny objects
- if (node.is_null()) {
- if (size_in_bytes <= kTinyObjectMaxSize) {
- for (type = next_nonempty_category[kFastPathFallBackTiny];
- type < kFastPathFirstCategory;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
- }
- }
-
- // Searching the last category
- if (node.is_null()) {
- // Searching each element of the last category.
- type = last_category_;
- node = SearchForNodeInList(type, size_in_bytes, node_size);
- }
-
- // Finally, search the most precise category
- if (node.is_null()) {
- type = SelectFreeListCategoryType(size_in_bytes);
- for (type = next_nonempty_category[type]; type < first_category;
- type = next_nonempty_category[type + 1]) {
- node = TryFindNodeIn(type, size_in_bytes, node_size);
- if (!node.is_null()) break;
- }
- }
-
- // Updating cache
- if (!node.is_null() && categories_[type] == nullptr) {
- UpdateCacheAfterRemoval(type);
- }
-
-#ifdef DEBUG
- CheckCacheIntegrity();
-#endif
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
-}
-
-// ------------------------------------------------
-// FreeListManyCachedOrigin implementation
-
-FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) {
- if (origin == AllocationOrigin::kGC) {
- return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
- } else {
- return FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size,
- origin);
- }
-}
-
-// ------------------------------------------------
-// FreeListMap implementation
-
-FreeListMap::FreeListMap() {
- // Initializing base (FreeList) fields
- number_of_categories_ = 1;
- last_category_ = kOnlyCategory;
- min_block_size_ = kMinBlockSize;
- categories_ = new FreeListCategory*[number_of_categories_]();
-
- Reset();
-}
-
-size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
- return maximum_freed;
-}
-
-Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
- return GetPageForCategoryType(kOnlyCategory);
-}
-
-FreeListMap::~FreeListMap() { delete[] categories_; }
-
-FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
- AllocationOrigin origin) {
- DCHECK_GE(kMaxBlockSize, size_in_bytes);
-
- // The following DCHECK ensures that maps are allocated one by one (ie,
- // without folding). This assumption currently holds. However, if it were to
- // become untrue in the future, you'll get an error here. To fix it, I would
- // suggest removing the DCHECK, and replacing TryFindNodeIn by
- // SearchForNodeInList below.
- DCHECK_EQ(size_in_bytes, Map::kSize);
-
- FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
-
- if (!node.is_null()) {
- Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
- }
-
- DCHECK_IMPLIES(node.is_null(), IsEmpty());
- return node;
-}
-
-// ------------------------------------------------
-// Generic FreeList methods (non alloc/free related)
-
-void FreeList::Reset() {
- ForAllFreeListCategories(
- [this](FreeListCategory* category) { category->Reset(this); });
- for (int i = kFirstCategory; i < number_of_categories_; i++) {
- categories_[i] = nullptr;
- }
- wasted_bytes_ = 0;
- available_ = 0;
-}
-
-size_t FreeList::EvictFreeListItems(Page* page) {
- size_t sum = 0;
- page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
- sum += category->available();
- RemoveCategory(category);
- category->Reset(this);
- });
- return sum;
-}
-
-void FreeList::RepairLists(Heap* heap) {
- ForAllFreeListCategories(
- [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
-}
-
-bool FreeList::AddCategory(FreeListCategory* category) {
- FreeListCategoryType type = category->type_;
- DCHECK_LT(type, number_of_categories_);
- FreeListCategory* top = categories_[type];
-
- if (category->is_empty()) return false;
- DCHECK_NE(top, category);
-
- // Common double-linked list insertion.
- if (top != nullptr) {
- top->set_prev(category);
- }
- category->set_next(top);
- categories_[type] = category;
-
- IncreaseAvailableBytes(category->available());
- return true;
-}
-
-void FreeList::RemoveCategory(FreeListCategory* category) {
- FreeListCategoryType type = category->type_;
- DCHECK_LT(type, number_of_categories_);
- FreeListCategory* top = categories_[type];
-
- if (category->is_linked(this)) {
- DecreaseAvailableBytes(category->available());
- }
-
- // Common double-linked list removal.
- if (top == category) {
- categories_[type] = category->next();
- }
- if (category->prev() != nullptr) {
- category->prev()->set_next(category->next());
- }
- if (category->next() != nullptr) {
- category->next()->set_prev(category->prev());
- }
- category->set_next(nullptr);
- category->set_prev(nullptr);
-}
-
-void FreeList::PrintCategories(FreeListCategoryType type) {
- FreeListCategoryIterator it(this, type);
- PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
- static_cast<void*>(categories_[type]), type);
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- PrintF("%p -> ", static_cast<void*>(current));
- }
- PrintF("null\n");
-}
int MemoryChunk::FreeListsLength() {
int length = 0;
@@ -3619,250 +475,5 @@ int MemoryChunk::FreeListsLength() {
return length;
}
-size_t FreeListCategory::SumFreeList() {
- size_t sum = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- // We can't use "cur->map()" here because both cur's map and the
- // root can be null during bootstrapping.
- DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
- ->heap()
- ->isolate()
- ->root(RootIndex::kFreeSpaceMap)
- .ptr()));
- sum += cur.relaxed_read_size();
- cur = cur.next();
- }
- return sum;
-}
-int FreeListCategory::FreeListLength() {
- int length = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- length++;
- cur = cur.next();
- }
- return length;
-}
-
-#ifdef DEBUG
-bool FreeList::IsVeryLong() {
- int len = 0;
- for (int i = kFirstCategory; i < number_of_categories_; i++) {
- FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
- while (it.HasNext()) {
- len += it.Next()->FreeListLength();
- if (len >= FreeListCategory::kVeryLongFreeList) return true;
- }
- }
- return false;
-}
-
-
-// This can take a very long time because it is linear in the number of entries
-// on the free list, so it should not be called if FreeListLength returns
-// kVeryLongFreeList.
-size_t FreeList::SumFreeLists() {
- size_t sum = 0;
- ForAllFreeListCategories(
- [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
- return sum;
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// OldSpace implementation
-
-void PagedSpace::PrepareForMarkCompact() {
- // We don't have a linear allocation area while sweeping. It will be restored
- // on the first allocation after the sweep.
- FreeLinearAllocationArea();
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_->Reset();
-}
-
-size_t PagedSpace::SizeOfObjects() {
- CHECK_GE(limit(), top());
- DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
- return Size() - (limit() - top());
-}
-
-bool PagedSpace::EnsureSweptAndRetryAllocation(int size_in_bytes,
- AllocationOrigin origin) {
- DCHECK(!is_local_space());
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- // Wait for the sweeper threads here and complete the sweeping phase.
- collector->EnsureSweepingCompleted();
-
- // After waiting for the sweeper threads, there may be new free-list
- // entries.
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
- }
- return false;
-}
-
-bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- VMState<GC> state(heap()->isolate());
- RuntimeCallTimerScope runtime_timer(
- heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
- base::Optional<base::MutexGuard> optional_mutex;
-
- if (FLAG_concurrent_allocation && origin != AllocationOrigin::kGC &&
- identity() == OLD_SPACE) {
- optional_mutex.emplace(&allocation_mutex_);
- }
-
- return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
-}
-
-bool OffThreadSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
- return true;
-
- if (Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin);
- }
-
- return false;
-}
-
-bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin) {
- // Non-compaction local spaces are not supported.
- DCHECK_IMPLIES(is_local_space(), is_compaction_space());
-
- // Allocation in this space has failed.
- DCHECK_GE(size_in_bytes, 0);
- const int kMaxPagesToSweep = 1;
-
- if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
- return true;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- if (FLAG_concurrent_sweeping && !is_compaction_space() &&
- !collector->sweeper()->AreSweeperTasksRunning()) {
- collector->EnsureSweepingCompleted();
- }
-
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
- RefillFreeList();
-
- // Retry the free list allocation.
- if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin))
- return true;
-
- if (SweepAndRetryAllocation(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
- origin))
- return true;
- }
-
- if (is_compaction_space()) {
- // The main thread may have acquired all swept pages. Try to steal from
- // it. This can only happen during young generation evacuation.
- PagedSpace* main_space = heap()->paged_space(identity());
- Page* page = main_space->RemovePageSafe(size_in_bytes);
- if (page != nullptr) {
- AddPage(page);
- if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin))
- return true;
- }
- }
-
- if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
- DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
- return RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes), origin);
- }
-
- if (is_compaction_space()) {
- return SweepAndRetryAllocation(0, 0, size_in_bytes, origin);
-
- } else {
- // If sweeper threads are active, wait for them at that point and steal
- // elements from their free-lists. Allocation may still fail here which
- // would indicate that there is not enough memory for the given allocation.
- return EnsureSweptAndRetryAllocation(size_in_bytes, origin);
- }
-}
-
-bool PagedSpace::SweepAndRetryAllocation(int required_freed_bytes,
- int max_pages, int size_in_bytes,
- AllocationOrigin origin) {
- // Cleanup invalidated old-to-new refs for compaction space in the
- // final atomic pause.
- Sweeper::FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
- is_compaction_space() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
- : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), required_freed_bytes, max_pages,
- invalidated_slots_in_free_space);
- RefillFreeList();
- if (max_freed >= size_in_bytes)
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
- }
- return false;
-}
-
-// -----------------------------------------------------------------------------
-// MapSpace implementation
-
-// TODO(dmercadier): use a heap instead of sorting like that.
-// Using a heap will have multiple benefits:
-// - for now, SortFreeList is only called after sweeping, which is somewhat
-// late. Using a heap, sorting could be done online: FreeListCategories would
-// be inserted in a heap (ie, in a sorted manner).
-// - SortFreeList is a bit fragile: any change to FreeListMap (or to
-// MapSpace::free_list_) could break it.
-void MapSpace::SortFreeList() {
- using LiveBytesPagePair = std::pair<size_t, Page*>;
- std::vector<LiveBytesPagePair> pages;
- pages.reserve(CountTotalPages());
-
- for (Page* p : *this) {
- free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
- pages.push_back(std::make_pair(p->allocated_bytes(), p));
- }
-
- // Sorting by least-allocated-bytes first.
- std::sort(pages.begin(), pages.end(),
- [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
- return a.first < b.first;
- });
-
- for (LiveBytesPagePair const& p : pages) {
- // Since AddCategory inserts in head position, it reverts the order produced
- // by the sort above: least-allocated-bytes will be Added first, and will
- // therefore be the last element (and the first one will be
- // most-allocated-bytes).
- free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
- }
-}
-
-#ifdef VERIFY_HEAP
-void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 72ae96cadd2..31fb4b22f5b 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -6,34 +6,19 @@
#define V8_HEAP_SPACES_H_
#include <atomic>
-#include <list>
-#include <map>
#include <memory>
-#include <unordered_map>
-#include <unordered_set>
#include <vector>
-#include "src/base/atomic-utils.h"
-#include "src/base/bounded-page-allocator.h"
-#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
-#include "src/base/optional.h"
-#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
-#include "src/flags/flags.h"
+#include "src/heap/base-space.h"
#include "src/heap/basic-memory-chunk.h"
+#include "src/heap/free-list.h"
#include "src/heap/heap.h"
-#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
-#include "src/heap/marking.h"
#include "src/heap/memory-chunk.h"
-#include "src/heap/slot-set.h"
-#include "src/objects/free-space.h"
-#include "src/objects/heap-object.h"
-#include "src/objects/map.h"
#include "src/objects/objects.h"
-#include "src/tasks/cancelable-task.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
@@ -47,26 +32,15 @@ class TestCodePageAllocatorScope;
} // namespace heap
class AllocationObserver;
-class CompactionSpace;
-class CompactionSpaceCollection;
class FreeList;
class Isolate;
class LargeObjectSpace;
class LargePage;
class LinearAllocationArea;
class LocalArrayBufferTracker;
-class LocalSpace;
-class MemoryAllocator;
-class MemoryChunk;
-class MemoryChunkLayout;
-class OffThreadSpace;
class Page;
class PagedSpace;
class SemiSpace;
-class SlotsBuffer;
-class SlotSet;
-class TypedSlotSet;
-class Space;
// -----------------------------------------------------------------------------
// Heap structures:
@@ -130,272 +104,14 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-using FreeListCategoryType = int32_t;
-
-static const FreeListCategoryType kFirstCategory = 0;
-static const FreeListCategoryType kInvalidCategory = -1;
-
-enum FreeMode { kLinkCategory, kDoNotLinkCategory };
-
-enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
-
-// A free list category maintains a linked list of free memory blocks.
-class FreeListCategory {
- public:
- void Initialize(FreeListCategoryType type) {
- type_ = type;
- available_ = 0;
- prev_ = nullptr;
- next_ = nullptr;
- }
-
- void Reset(FreeList* owner);
-
- void RepairFreeList(Heap* heap);
-
- // Relinks the category into the currently owning free list. Requires that the
- // category is currently unlinked.
- void Relink(FreeList* owner);
-
- void Free(Address address, size_t size_in_bytes, FreeMode mode,
- FreeList* owner);
-
- // Performs a single try to pick a node of at least |minimum_size| from the
- // category. Stores the actual size in |node_size|. Returns nullptr if no
- // node is found.
- FreeSpace PickNodeFromList(size_t minimum_size, size_t* node_size);
-
- // Picks a node of at least |minimum_size| from the category. Stores the
- // actual size in |node_size|. Returns nullptr if no node is found.
- FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
-
- inline bool is_linked(FreeList* owner) const;
- bool is_empty() { return top().is_null(); }
- uint32_t available() const { return available_; }
-
- size_t SumFreeList();
- int FreeListLength();
-
- private:
- // For debug builds we accurately compute free lists lengths up until
- // {kVeryLongFreeList} by manually walking the list.
- static const int kVeryLongFreeList = 500;
-
- // Updates |available_|, |length_| and free_list_->Available() after an
- // allocation of size |allocation_size|.
- inline void UpdateCountersAfterAllocation(size_t allocation_size);
-
- FreeSpace top() { return top_; }
- void set_top(FreeSpace top) { top_ = top; }
- FreeListCategory* prev() { return prev_; }
- void set_prev(FreeListCategory* prev) { prev_ = prev; }
- FreeListCategory* next() { return next_; }
- void set_next(FreeListCategory* next) { next_ = next; }
-
- // |type_|: The type of this free list category.
- FreeListCategoryType type_ = kInvalidCategory;
-
- // |available_|: Total available bytes in all blocks of this free list
- // category.
- uint32_t available_ = 0;
-
- // |top_|: Points to the top FreeSpace in the free list category.
- FreeSpace top_;
-
- FreeListCategory* prev_ = nullptr;
- FreeListCategory* next_ = nullptr;
-
- friend class FreeList;
- friend class FreeListManyCached;
- friend class PagedSpace;
- friend class MapSpace;
-};
-
-// A free list maintains free blocks of memory. The free list is organized in
-// a way to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which is
-// divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
-class FreeList {
- public:
- // Creates a Freelist of the default class (FreeListLegacy for now).
- V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
-
- virtual ~FreeList() = default;
-
- // Returns how much memory can be allocated after freeing maximum_freed
- // memory.
- virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
-
- // Adds a node on the free list. The block of size {size_in_bytes} starting
- // at {start} is placed on the free list. The return value is the number of
- // bytes that were not added to the free list, because the freed memory block
- // was too small. Bookkeeping information will be written to the block, i.e.,
- // its contents will be destroyed. The start address should be word aligned,
- // and the size should be a non-zero multiple of the word size.
- virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
-
- // Allocates a free space node frome the free list of at least size_in_bytes
- // bytes. Returns the actual node size in node_size which can be bigger than
- // size_in_bytes. This method returns null if the allocation request cannot be
- // handled by the free list.
- virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) = 0;
-
- // Returns a page containing an entry for a given type, or nullptr otherwise.
- V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
-
- virtual void Reset();
-
- // Return the number of bytes available on the free list.
- size_t Available() {
- DCHECK(available_ == SumFreeLists());
- return available_;
- }
-
- // Update number of available bytes on the Freelists.
- void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
- void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
-
- bool IsEmpty() {
- bool empty = true;
- ForAllFreeListCategories([&empty](FreeListCategory* category) {
- if (!category->is_empty()) empty = false;
- });
- return empty;
- }
-
- // Used after booting the VM.
- void RepairLists(Heap* heap);
-
- V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
-
- int number_of_categories() { return number_of_categories_; }
- FreeListCategoryType last_category() { return last_category_; }
-
- size_t wasted_bytes() { return wasted_bytes_; }
-
- template <typename Callback>
- void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
- FreeListCategory* current = categories_[type];
- while (current != nullptr) {
- FreeListCategory* next = current->next();
- callback(current);
- current = next;
- }
- }
-
- template <typename Callback>
- void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < number_of_categories(); i++) {
- ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
- }
- }
-
- virtual bool AddCategory(FreeListCategory* category);
- virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
- void PrintCategories(FreeListCategoryType type);
-
- protected:
- class FreeListCategoryIterator final {
- public:
- FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
- : current_(free_list->categories_[type]) {}
-
- bool HasNext() const { return current_ != nullptr; }
-
- FreeListCategory* Next() {
- DCHECK(HasNext());
- FreeListCategory* tmp = current_;
- current_ = current_->next();
- return tmp;
- }
-
- private:
- FreeListCategory* current_;
- };
-
-#ifdef DEBUG
- V8_EXPORT_PRIVATE size_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
- // Tries to retrieve a node from the first category in a given |type|.
- // Returns nullptr if the category is empty or the top entry is smaller
- // than minimum_size.
- FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Returns the smallest category in which an object of |size_in_bytes| could
- // fit.
- virtual FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) = 0;
-
- FreeListCategory* top(FreeListCategoryType type) const {
- return categories_[type];
- }
-
- inline Page* GetPageForCategoryType(FreeListCategoryType type);
-
- int number_of_categories_ = 0;
- FreeListCategoryType last_category_ = 0;
- size_t min_block_size_ = 0;
-
- std::atomic<size_t> wasted_bytes_{0};
- FreeListCategory** categories_ = nullptr;
-
- // |available_|: The number of bytes in this freelist.
- size_t available_ = 0;
-
- friend class FreeListCategory;
- friend class Page;
- friend class MemoryChunk;
- friend class ReadOnlyPage;
- friend class MapSpace;
-};
-
-// FreeList used for spaces that don't have freelists
-// (only the LargeObject space for now).
-class NoFreeList final : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) final {
- FATAL("NoFreeList can't be used as a standard FreeList. ");
- }
- size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
- Page* GetPageForSize(size_t size_in_bytes) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
-
- private:
- FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
- FATAL("NoFreeList can't be used as a standard FreeList.");
- }
-};
-
// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class V8_EXPORT_PRIVATE Space : public Malloced {
+// Space is the abstract superclass for all allocation spaces that are not
+// sealed after startup (i.e. not ReadOnlySpace).
+class V8_EXPORT_PRIVATE Space : public BaseSpace {
public:
Space(Heap* heap, AllocationSpace id, FreeList* free_list)
- : allocation_observers_paused_(false),
- heap_(heap),
- id_(id),
- committed_(0),
- max_committed_(0),
+ : BaseSpace(heap, id),
+ allocation_observers_paused_(false),
free_list_(std::unique_ptr<FreeList>(free_list)) {
external_backing_store_bytes_ =
new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
@@ -407,22 +123,11 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
- virtual ~Space() {
+ ~Space() override {
delete[] external_backing_store_bytes_;
external_backing_store_bytes_ = nullptr;
}
- Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
- bool IsDetached() const { return heap_ == nullptr; }
-
- AllocationSpace identity() { return id_; }
-
- const char* name() { return Heap::GetSpaceName(id_); }
-
virtual void AddAllocationObserver(AllocationObserver* observer);
virtual void RemoveAllocationObserver(AllocationObserver* observer);
@@ -440,22 +145,10 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
// single allocation-folding group.
void AllocationStepAfterMerge(Address first_object_in_chunk, int size);
- // Return the total amount committed memory for this space, i.e., allocatable
- // memory and page headers.
- virtual size_t CommittedMemory() { return committed_; }
-
- virtual size_t MaximumCommittedMemory() { return max_committed_; }
-
- // Returns allocated size.
- virtual size_t Size() = 0;
-
// Returns size of objects. Can differ from the allocated size
// (e.g. see OldLargeObjectSpace).
virtual size_t SizeOfObjects() { return Size(); }
- // Approximate amount of physical memory committed for this space.
- virtual size_t CommittedPhysicalMemory() = 0;
-
// Return the available bytes without growing.
virtual size_t Available() = 0;
@@ -469,19 +162,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
- void AccountCommitted(size_t bytes) {
- DCHECK_GE(committed_ + bytes, committed_);
- committed_ += bytes;
- if (committed_ > max_committed_) {
- max_committed_ = committed_;
- }
- }
-
- void AccountUncommitted(size_t bytes) {
- DCHECK_GE(committed_, committed_ - bytes);
- committed_ -= bytes;
- }
-
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
@@ -494,15 +174,18 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return external_backing_store_bytes_[type];
}
- void* GetRandomMmapAddr();
-
MemoryChunk* first_page() { return memory_chunk_list_.front(); }
MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+ const MemoryChunk* first_page() const { return memory_chunk_list_.front(); }
+ const MemoryChunk* last_page() const { return memory_chunk_list_.back(); }
+
heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
FreeList* free_list() { return free_list_.get(); }
+ Address FirstPageAddress() const { return first_page()->address(); }
+
#ifdef DEBUG
virtual void Print() = 0;
#endif
@@ -513,8 +196,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return !allocation_observers_paused_ && !allocation_observers_.empty();
}
- void DetachFromHeap() { heap_ = nullptr; }
-
std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space.
@@ -524,36 +205,12 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
std::atomic<size_t>* external_backing_store_bytes_;
bool allocation_observers_paused_;
- Heap* heap_;
- AllocationSpace id_;
-
- // Keeps track of committed memory in a space.
- std::atomic<size_t> committed_;
- size_t max_committed_;
std::unique_ptr<FreeList> free_list_;
DISALLOW_COPY_AND_ASSIGN(Space);
};
-// The CodeObjectRegistry holds all start addresses of code objects of a given
-// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
-// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
-// to the actual code object.
-class V8_EXPORT_PRIVATE CodeObjectRegistry {
- public:
- void RegisterNewlyAllocatedCodeObject(Address code);
- void RegisterAlreadyExistingCodeObject(Address code);
- void Clear();
- void Finalize();
- bool Contains(Address code) const;
- Address GetCodeObjectStartFromInnerAddress(Address address) const;
-
- private:
- std::vector<Address> code_object_registry_already_existing_;
- std::set<Address> code_object_registry_newly_allocated_;
-};
-
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
@@ -609,6 +266,13 @@ class Page : public MemoryChunk {
Page* next_page() { return static_cast<Page*>(list_node_.next()); }
Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
+ const Page* next_page() const {
+ return static_cast<const Page*>(list_node_.next());
+ }
+ const Page* prev_page() const {
+ return static_cast<const Page*>(list_node_.prev());
+ }
+
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
for (int i = kFirstCategory;
@@ -617,17 +281,6 @@ class Page : public MemoryChunk {
}
}
- // Returns the offset of a given address to this page.
- inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
-
- // Returns the address for a given offset to the this page.
- Address OffsetToAddress(size_t offset) {
- Address address_in_page = address() + offset;
- DCHECK_GE(address_in_page, area_start());
- DCHECK_LT(address_in_page, area_end());
- return address_in_page;
- }
-
void AllocateLocalTracker();
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
bool contains_array_buffers();
@@ -643,25 +296,12 @@ class Page : public MemoryChunk {
return categories_[type];
}
- size_t wasted_memory() { return wasted_memory_; }
- void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
- size_t allocated_bytes() { return allocated_bytes_; }
- void IncreaseAllocatedBytes(size_t bytes) {
- DCHECK_LE(bytes, area_size());
- allocated_bytes_ += bytes;
- }
- void DecreaseAllocatedBytes(size_t bytes) {
- DCHECK_LE(bytes, area_size());
- DCHECK_GE(allocated_bytes(), bytes);
- allocated_bytes_ -= bytes;
- }
-
- void ResetAllocationStatistics();
-
size_t ShrinkToHighWaterMark();
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
+ V8_EXPORT_PRIVATE void CreateBlackAreaBackground(Address start, Address end);
void DestroyBlackArea(Address start, Address end);
+ void DestroyBlackAreaBackground(Address start, Address end);
void InitializeFreeListCategories();
void AllocateFreeListCategories();
@@ -679,403 +319,6 @@ STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-// The process-wide singleton that keeps track of code range regions with the
-// intention to reuse free code range regions as a workaround for CFG memory
-// leaks (see crbug.com/870054).
-class CodeRangeAddressHint {
- public:
- // Returns the most recently freed code range start address for the given
- // size. If there is no such entry, then a random address is returned.
- V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
-
- V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size);
-
- private:
- base::Mutex mutex_;
- // A map from code range size to an array of recently freed code range
- // addresses. There should be O(1) different code range sizes.
- // The length of each array is limited by the peak number of code ranges,
- // which should be also O(1).
- std::unordered_map<size_t, std::vector<Address>> recently_freed_;
-};
-
-// ----------------------------------------------------------------------------
-// A space acquires chunks of memory from the operating system. The memory
-// allocator allocates and deallocates pages for the paged heap spaces and large
-// pages for large object space.
-class MemoryAllocator {
- public:
- // Unmapper takes care of concurrently unmapping and uncommitting memory
- // chunks.
- class Unmapper {
- public:
- class UnmapFreeMemoryTask;
-
- Unmapper(Heap* heap, MemoryAllocator* allocator)
- : heap_(heap),
- allocator_(allocator),
- pending_unmapping_tasks_semaphore_(0),
- pending_unmapping_tasks_(0),
- active_unmapping_tasks_(0) {
- chunks_[kRegular].reserve(kReservedQueueingSlots);
- chunks_[kPooled].reserve(kReservedQueueingSlots);
- }
-
- void AddMemoryChunkSafe(MemoryChunk* chunk) {
- if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
- AddMemoryChunkSafe<kRegular>(chunk);
- } else {
- AddMemoryChunkSafe<kNonRegular>(chunk);
- }
- }
-
- MemoryChunk* TryGetPooledMemoryChunkSafe() {
- // Procedure:
- // (1) Try to get a chunk that was declared as pooled and already has
- // been uncommitted.
- // (2) Try to steal any memory chunk of kPageSize that would've been
- // unmapped.
- MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
- if (chunk == nullptr) {
- chunk = GetMemoryChunkSafe<kRegular>();
- if (chunk != nullptr) {
- // For stolen chunks we need to manually free any allocated memory.
- chunk->ReleaseAllAllocatedMemory();
- }
- }
- return chunk;
- }
-
- V8_EXPORT_PRIVATE void FreeQueuedChunks();
- void CancelAndWaitForPendingTasks();
- void PrepareForGC();
- V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
- V8_EXPORT_PRIVATE void TearDown();
- size_t NumberOfCommittedChunks();
- V8_EXPORT_PRIVATE int NumberOfChunks();
- size_t CommittedBufferedMemory();
-
- private:
- static const int kReservedQueueingSlots = 64;
- static const int kMaxUnmapperTasks = 4;
-
- enum ChunkQueueType {
- kRegular, // Pages of kPageSize that do not live in a CodeRange and
- // can thus be used for stealing.
- kNonRegular, // Large chunks and executable chunks.
- kPooled, // Pooled chunks, already uncommited and ready for reuse.
- kNumberOfChunkQueues,
- };
-
- enum class FreeMode {
- kUncommitPooled,
- kReleasePooled,
- };
-
- template <ChunkQueueType type>
- void AddMemoryChunkSafe(MemoryChunk* chunk) {
- base::MutexGuard guard(&mutex_);
- chunks_[type].push_back(chunk);
- }
-
- template <ChunkQueueType type>
- MemoryChunk* GetMemoryChunkSafe() {
- base::MutexGuard guard(&mutex_);
- if (chunks_[type].empty()) return nullptr;
- MemoryChunk* chunk = chunks_[type].back();
- chunks_[type].pop_back();
- return chunk;
- }
-
- bool MakeRoomForNewTasks();
-
- template <FreeMode mode>
- void PerformFreeMemoryOnQueuedChunks();
-
- void PerformFreeMemoryOnQueuedNonRegularChunks();
-
- Heap* const heap_;
- MemoryAllocator* const allocator_;
- base::Mutex mutex_;
- std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
- CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
- base::Semaphore pending_unmapping_tasks_semaphore_;
- intptr_t pending_unmapping_tasks_;
- std::atomic<intptr_t> active_unmapping_tasks_;
-
- friend class MemoryAllocator;
- };
-
- enum AllocationMode {
- kRegular,
- kPooled,
- };
-
- enum FreeMode {
- kFull,
- kAlreadyPooled,
- kPreFreeAndQueue,
- kPooledAndQueue,
- };
-
- V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
-
- // Computes the memory area of discardable memory within a given memory area
- // [addr, addr+size) and returns the result as base::AddressRegion. If the
- // memory is not discardable base::AddressRegion is an empty region.
- V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
- Address addr, size_t size);
-
- V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
- size_t code_range_size);
-
- V8_EXPORT_PRIVATE void TearDown();
-
- // Allocates a Page from the allocator. AllocationMode is used to indicate
- // whether pooled allocation, which only works for MemoryChunk::kPageSize,
- // should be tried first.
- template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
- typename SpaceType>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
-
- LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
- Executability executable);
-
- template <MemoryAllocator::FreeMode mode = kFull>
- EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- void Free(MemoryChunk* chunk);
-
- // Returns allocated spaces in bytes.
- size_t Size() { return size_; }
-
- // Returns allocated executable spaces in bytes.
- size_t SizeExecutable() { return size_executable_; }
-
- // Returns the maximum available bytes of heaps.
- size_t Available() {
- const size_t size = Size();
- return capacity_ < size ? 0 : capacity_ - size;
- }
-
- // Returns an indication of whether a pointer is in a space that has
- // been allocated by this MemoryAllocator.
- V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
- return address < lowest_ever_allocated_ ||
- address >= highest_ever_allocated_;
- }
-
- // Returns a MemoryChunk in which the memory region from commit_area_size to
- // reserve_area_size of the chunk area is reserved but not committed, it
- // could be committed later by calling MemoryChunk::CommitArea.
- V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
- size_t commit_area_size,
- Executability executable,
- Space* space);
-
- Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
- size_t alignment, Executability executable,
- void* hint, VirtualMemory* controller);
-
- void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
-
- // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
- // internally memory is freed from |start_free| to the end of the reservation.
- // Additional memory beyond the page is not accounted though, so
- // |bytes_to_free| is computed by the caller.
- void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
- size_t bytes_to_free, Address new_area_end);
-
- // Checks if an allocated MemoryChunk was intended to be used for executable
- // memory.
- bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
- return executable_memory_.find(chunk) != executable_memory_.end();
- }
-
- // Commit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool CommitMemory(VirtualMemory* reservation);
-
- // Uncommit memory region owned by given reservation object. Returns true if
- // it succeeded and false otherwise.
- bool UncommitMemory(VirtualMemory* reservation);
-
- // Zaps a contiguous block of memory [start..(start+size)[ with
- // a given zap value.
- void ZapBlock(Address start, size_t size, uintptr_t zap_value);
-
- V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
- Address start,
- size_t commit_size,
- size_t reserved_size);
-
- // Page allocator instance for allocating non-executable pages.
- // Guaranteed to be a valid pointer.
- v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
-
- // Page allocator instance for allocating executable pages.
- // Guaranteed to be a valid pointer.
- v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
-
- // Returns page allocator suitable for allocating pages with requested
- // executability.
- v8::PageAllocator* page_allocator(Executability executable) {
- return executable == EXECUTABLE ? code_page_allocator_
- : data_page_allocator_;
- }
-
- // A region of memory that may contain executable code including reserved
- // OS page with read-write access in the beginning.
- const base::AddressRegion& code_range() const {
- // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
- DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
- DCHECK_IMPLIES(!code_range_.is_empty(),
- code_range_.contains(code_page_allocator_instance_->begin(),
- code_page_allocator_instance_->size()));
- return code_range_;
- }
-
- Unmapper* unmapper() { return &unmapper_; }
-
- // Performs all necessary bookkeeping to free the memory, but does not free
- // it.
- void UnregisterMemory(MemoryChunk* chunk);
-
- private:
- void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
- size_t requested);
-
- // PreFreeMemory logically frees the object, i.e., it unregisters the memory,
- // logs a delete event and adds the chunk to remembered unmapped pages.
- void PreFreeMemory(MemoryChunk* chunk);
-
- // PerformFreeMemory can be called concurrently when PreFree was executed
- // before.
- void PerformFreeMemory(MemoryChunk* chunk);
-
- // See AllocatePage for public interface. Note that currently we only support
- // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
- template <typename SpaceType>
- MemoryChunk* AllocatePagePooled(SpaceType* owner);
-
- // Initializes pages in a chunk. Returns the first page address.
- // This function and GetChunkId() are provided for the mark-compact
- // collector to rebuild page headers in the from space, which is
- // used as a marking stack and its page headers are destroyed.
- Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
- PagedSpace* owner);
-
- void UpdateAllocatedSpaceLimits(Address low, Address high) {
- // The use of atomic primitives does not guarantee correctness (wrt.
- // desired semantics) by default. The loop here ensures that we update the
- // values only if they did not change in between.
- Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
- while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
- ptr, low, std::memory_order_acq_rel)) {
- }
- ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
- while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
- ptr, high, std::memory_order_acq_rel)) {
- }
- }
-
- void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
- DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
- executable_memory_.insert(chunk);
- }
-
- void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
- DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
- executable_memory_.erase(chunk);
- chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
- }
-
- Isolate* isolate_;
-
- // This object controls virtual space reserved for code on the V8 heap. This
- // is only valid for 64-bit architectures where kRequiresCodeRange.
- VirtualMemory code_reservation_;
-
- // Page allocator used for allocating data pages. Depending on the
- // configuration it may be a page allocator instance provided by v8::Platform
- // or a BoundedPageAllocator (when pointer compression is enabled).
- v8::PageAllocator* data_page_allocator_;
-
- // Page allocator used for allocating code pages. Depending on the
- // configuration it may be a page allocator instance provided by v8::Platform
- // or a BoundedPageAllocator (when pointer compression is enabled or
- // on those 64-bit architectures where pc-relative 32-bit displacement
- // can be used for call and jump instructions).
- v8::PageAllocator* code_page_allocator_;
-
- // A part of the |code_reservation_| that may contain executable code
- // including reserved page with read-write access in the beginning.
- // See details below.
- base::AddressRegion code_range_;
-
- // This unique pointer owns the instance of bounded code allocator
- // that controls executable pages allocation. It does not control the
- // optionally existing page in the beginning of the |code_range_|.
- // So, summarizing all above, the following conditions hold:
- // 1) |code_reservation_| >= |code_range_|
- // 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
- // 3) |code_reservation_| is AllocatePageSize()-aligned
- // 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
- // 5) |code_range_| is CommitPageSize()-aligned
- std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
-
- // Maximum space size in bytes.
- size_t capacity_;
-
- // Allocated space size in bytes.
- std::atomic<size_t> size_;
- // Allocated executable space size in bytes.
- std::atomic<size_t> size_executable_;
-
- // We keep the lowest and highest addresses allocated as a quick way
- // of determining that pointers are outside the heap. The estimate is
- // conservative, i.e. not all addresses in 'allocated' space are allocated
- // to our heap. The range is [lowest, highest[, inclusive on the low end
- // and exclusive on the high end.
- std::atomic<Address> lowest_ever_allocated_;
- std::atomic<Address> highest_ever_allocated_;
-
- VirtualMemory last_chunk_;
- Unmapper unmapper_;
-
- // Data structure to remember allocated executable memory chunks.
- std::unordered_set<MemoryChunk*> executable_memory_;
-
- friend class heap::TestCodePageAllocatorScope;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
-};
-
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-extern template EXPORT_TEMPLATE_DECLARE(
- V8_EXPORT_PRIVATE) void MemoryAllocator::
- Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
-
// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
// object iterators.
@@ -1107,6 +350,7 @@ class PageIteratorImpl
};
using PageIterator = PageIteratorImpl<Page>;
+using ConstPageIterator = PageIteratorImpl<const Page>;
using LargePageIterator = PageIteratorImpl<LargePage>;
class PageRange {
@@ -1125,44 +369,6 @@ class PageRange {
};
// -----------------------------------------------------------------------------
-// Heap object iterator in new/old/map spaces.
-//
-// A PagedSpaceObjectIterator iterates objects from the bottom of the given
-// space to its top or from the bottom of the given page to its top.
-//
-// If objects are allocated in the page during iteration the iterator may
-// or may not iterate over those objects. The caller must create a new
-// iterator in order to be sure to visit these new objects.
-class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
- public:
- // Creates a new object iterator in a given space.
- PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
- PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
-
- // Creates a new object iterator in a given off-thread space.
- explicit PagedSpaceObjectIterator(OffThreadSpace* space);
-
- // Advance to the next object, skipping free spaces and other fillers and
- // skipping the special garbage section of which there is one per space.
- // Returns nullptr when the iteration has ended.
- inline HeapObject Next() override;
-
- private:
- // Fast (inlined) path of next().
- inline HeapObject FromCurrentPage();
-
- // Slow path of next(), goes into the next page. Returns false if the
- // iteration has ended.
- bool AdvanceToNextPage();
-
- Address cur_addr_; // Current iteration point.
- Address cur_end_; // End iteration point.
- PagedSpace* space_;
- PageRange page_range_;
- PageRange::iterator current_page_;
-};
-
-// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
@@ -1211,477 +417,6 @@ class LinearAllocationArea {
Address limit_;
};
-// An abstraction of the accounting statistics of a page-structured space.
-//
-// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in conjunction
-// with capacity, or else they always balance increases and decreases to the
-// non-capacity stats.
-class AllocationStats {
- public:
- AllocationStats() { Clear(); }
-
- AllocationStats& operator=(const AllocationStats& stats) V8_NOEXCEPT {
- capacity_ = stats.capacity_.load();
- max_capacity_ = stats.max_capacity_;
- size_.store(stats.size_);
-#ifdef DEBUG
- allocated_on_page_ = stats.allocated_on_page_;
-#endif
- return *this;
- }
-
- // Zero out all the allocation statistics (i.e., no capacity).
- void Clear() {
- capacity_ = 0;
- max_capacity_ = 0;
- ClearSize();
- }
-
- void ClearSize() {
- size_ = 0;
-#ifdef DEBUG
- allocated_on_page_.clear();
-#endif
- }
-
- // Accessors for the allocation statistics.
- size_t Capacity() { return capacity_; }
- size_t MaxCapacity() { return max_capacity_; }
- size_t Size() { return size_; }
-#ifdef DEBUG
- size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
-#endif
-
- void IncreaseAllocatedBytes(size_t bytes, Page* page) {
-#ifdef DEBUG
- size_t size = size_;
- DCHECK_GE(size + bytes, size);
-#endif
- size_.fetch_add(bytes);
-#ifdef DEBUG
- allocated_on_page_[page] += bytes;
-#endif
- }
-
- void DecreaseAllocatedBytes(size_t bytes, Page* page) {
- DCHECK_GE(size_, bytes);
- size_.fetch_sub(bytes);
-#ifdef DEBUG
- DCHECK_GE(allocated_on_page_[page], bytes);
- allocated_on_page_[page] -= bytes;
-#endif
- }
-
- void DecreaseCapacity(size_t bytes) {
- DCHECK_GE(capacity_, bytes);
- DCHECK_GE(capacity_ - bytes, size_);
- capacity_ -= bytes;
- }
-
- void IncreaseCapacity(size_t bytes) {
- DCHECK_GE(capacity_ + bytes, capacity_);
- capacity_ += bytes;
- if (capacity_ > max_capacity_) {
- max_capacity_ = capacity_;
- }
- }
-
- private:
- // |capacity_|: The number of object-area bytes (i.e., not including page
- // bookkeeping structures) currently in the space.
- // During evacuation capacity of the main spaces is accessed from multiple
- // threads to check the old generation hard limit.
- std::atomic<size_t> capacity_;
-
- // |max_capacity_|: The maximum capacity ever observed.
- size_t max_capacity_;
-
- // |size_|: The number of allocated bytes.
- std::atomic<size_t> size_;
-
-#ifdef DEBUG
- std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
-#endif
-};
-
-// The free list is organized in categories as follows:
-// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
-// allocation, when categories >= small do not have entries anymore.
-// 11-31 words (tiny): The tiny blocks are only used for allocation, when
-// categories >= small do not have entries anymore.
-// 32-255 words (small): Used for allocating free space between 1-31 words in
-// size.
-// 256-2047 words (medium): Used for allocating free space between 32-255 words
-// in size.
-// 1048-16383 words (large): Used for allocating free space between 256-2047
-// words in size.
-// At least 16384 words (huge): This list is for objects of 2048 words or
-// larger. Empty pages are also added to this list.
-class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override {
- if (maximum_freed <= kTiniestListMax) {
- // Since we are not iterating over all list entries, we cannot guarantee
- // that we can find the maximum freed block in that free list.
- return 0;
- } else if (maximum_freed <= kTinyListMax) {
- return kTinyAllocationMax;
- } else if (maximum_freed <= kSmallListMax) {
- return kSmallAllocationMax;
- } else if (maximum_freed <= kMediumListMax) {
- return kMediumAllocationMax;
- } else if (maximum_freed <= kLargeListMax) {
- return kLargeAllocationMax;
- }
- return maximum_freed;
- }
-
- inline Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListLegacy();
- ~FreeListLegacy() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
-
- static const size_t kMinBlockSize = 3 * kTaggedSize;
-
- // This is a conservative upper bound. The actual maximum block size takes
- // padding and alignment of data and code pages into account.
- static const size_t kMaxBlockSize = Page::kPageSize;
-
- static const size_t kTiniestListMax = 0xa * kTaggedSize;
- static const size_t kTinyListMax = 0x1f * kTaggedSize;
- static const size_t kSmallListMax = 0xff * kTaggedSize;
- static const size_t kMediumListMax = 0x7ff * kTaggedSize;
- static const size_t kLargeListMax = 0x1fff * kTaggedSize;
- static const size_t kTinyAllocationMax = kTiniestListMax;
- static const size_t kSmallAllocationMax = kTinyListMax;
- static const size_t kMediumAllocationMax = kSmallListMax;
- static const size_t kLargeAllocationMax = kMediumListMax;
-
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- if (size_in_bytes <= kTiniestListMax) {
- return kTiniest;
- } else if (size_in_bytes <= kTinyListMax) {
- return kTiny;
- } else if (size_in_bytes <= kSmallListMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumListMax) {
- return kMedium;
- } else if (size_in_bytes <= kLargeListMax) {
- return kLarge;
- }
- return kHuge;
- }
-
- // Returns the category to be used to allocate |size_in_bytes| in the fast
- // path. The tiny categories are not used for fast allocation.
- FreeListCategoryType SelectFastAllocationFreeListCategoryType(
- size_t size_in_bytes) {
- if (size_in_bytes <= kSmallAllocationMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumAllocationMax) {
- return kMedium;
- } else if (size_in_bytes <= kLargeAllocationMax) {
- return kLarge;
- }
- return kHuge;
- }
-
- friend class FreeListCategory;
- friend class heap::HeapTester;
-};
-
-// Inspired by FreeListLegacy.
-// Only has 3 categories: Medium, Large and Huge.
-// Any block that would have belong to tiniest, tiny or small in FreeListLegacy
-// is considered wasted.
-// Allocation is done only in Huge, Medium and Large (in that order),
-// using a first-fit strategy (only the first block of each freelist is ever
-// considered though). Performances is supposed to be better than
-// FreeListLegacy, but memory usage should be higher (because fragmentation will
-// probably be higher).
-class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override {
- if (maximum_freed <= kMediumListMax) {
- // Since we are not iterating over all list entries, we cannot guarantee
- // that we can find the maximum freed block in that free list.
- return 0;
- } else if (maximum_freed <= kLargeListMax) {
- return kLargeAllocationMax;
- }
- return kHugeAllocationMax;
- }
-
- inline Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListFastAlloc();
- ~FreeListFastAlloc() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- enum { kMedium, kLarge, kHuge };
-
- static const size_t kMinBlockSize = 0xff * kTaggedSize;
-
- // This is a conservative upper bound. The actual maximum block size takes
- // padding and alignment of data and code pages into account.
- static const size_t kMaxBlockSize = Page::kPageSize;
-
- static const size_t kMediumListMax = 0x7ff * kTaggedSize;
- static const size_t kLargeListMax = 0x1fff * kTaggedSize;
- static const size_t kMediumAllocationMax = kMinBlockSize;
- static const size_t kLargeAllocationMax = kMediumListMax;
- static const size_t kHugeAllocationMax = kLargeListMax;
-
- // Returns the category used to hold an object of size |size_in_bytes|.
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- if (size_in_bytes <= kMediumListMax) {
- return kMedium;
- } else if (size_in_bytes <= kLargeListMax) {
- return kLarge;
- }
- return kHuge;
- }
-};
-
-// Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
-// larger sizes. See the variable |categories_min| for the size of each
-// Freelist. Allocation is done using a best-fit strategy (considering only the
-// first element of each category though).
-// Performances are expected to be worst than FreeListLegacy, but memory
-// consumption should be lower (since fragmentation should be lower).
-class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override;
-
- Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListMany();
- ~FreeListMany() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- protected:
- static const size_t kMinBlockSize = 3 * kTaggedSize;
-
- // This is a conservative upper bound. The actual maximum block size takes
- // padding and alignment of data and code pages into account.
- static const size_t kMaxBlockSize = Page::kPageSize;
- // Largest size for which categories are still precise, and for which we can
- // therefore compute the category in constant time.
- static const size_t kPreciseCategoryMaxSize = 256;
-
- // Categories boundaries generated with:
- // perl -E '
- // @cat = (24, map {$_*16} 2..16, 48, 64);
- // while ($cat[-1] <= 32768) {
- // push @cat, $cat[-1]*2
- // }
- // say join ", ", @cat;
- // say "\n", scalar @cat'
- static const int kNumberOfCategories = 24;
- static constexpr unsigned int categories_min[kNumberOfCategories] = {
- 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192,
- 208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
-
- // Return the smallest category that could hold |size_in_bytes| bytes.
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- if (size_in_bytes <= kPreciseCategoryMaxSize) {
- if (size_in_bytes < categories_min[1]) return 0;
- return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
- }
- for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
- cat++) {
- if (size_in_bytes < categories_min[cat + 1]) {
- return cat;
- }
- }
- return last_category_;
- }
-
- FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
- FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
-};
-
-// Same as FreeListMany but uses a cache to know which categories are empty.
-// The cache (|next_nonempty_category|) is maintained in a way such that for
-// each category c, next_nonempty_category[c] contains the first non-empty
-// category greater or equal to c, that may hold an object of size c.
-// Allocation is done using the same strategy as FreeListMany (ie, best fit).
-class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
- public:
- FreeListManyCached();
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
-
- void Reset() override;
-
- bool AddCategory(FreeListCategory* category) override;
- void RemoveCategory(FreeListCategory* category) override;
-
- protected:
- // Updates the cache after adding something in the category |cat|.
- void UpdateCacheAfterAddition(FreeListCategoryType cat) {
- for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
- i--) {
- next_nonempty_category[i] = cat;
- }
- }
-
- // Updates the cache after emptying category |cat|.
- void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
- for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
- i--) {
- next_nonempty_category[i] = next_nonempty_category[cat + 1];
- }
- }
-
-#ifdef DEBUG
- void CheckCacheIntegrity() {
- for (int i = 0; i <= last_category_; i++) {
- DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
- categories_[next_nonempty_category[i]] != nullptr);
- for (int j = i; j < next_nonempty_category[i]; j++) {
- DCHECK(categories_[j] == nullptr);
- }
- }
- }
-#endif
-
- // The cache is overallocated by one so that the last element is always
- // defined, and when updating the cache, we can always use cache[i+1] as long
- // as i is < kNumberOfCategories.
- int next_nonempty_category[kNumberOfCategories + 1];
-
- private:
- void ResetCache() {
- for (int i = 0; i < kNumberOfCategories; i++) {
- next_nonempty_category[i] = kNumberOfCategories;
- }
- // Setting the after-last element as well, as explained in the cache's
- // declaration.
- next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
- }
-};
-
-// Same as FreeListManyCached but uses a fast path.
-// The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
-// is: we want the fast path to always overallocate, even for larger
-// categories. Therefore, we have two choices: either overallocate by
-// "size_in_bytes * something" or overallocate by "size_in_bytes +
-// something". We choose the later, as the former will tend to overallocate too
-// much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
-// for tiny objects (size <= 128 bytes), the first category considered is the
-// 36th (which holds objects of 2k to 3k), while for larger objects, the first
-// category considered will be one that guarantees a 1.85k+ bytes
-// overallocation. Using 2k rather than 1.85k would have resulted in either a
-// more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
-// category (2k to 3k) not being used; both of which are undesirable.
-// A secondary fast path is used for tiny objects (size <= 128), in order to
-// consider categories from 256 to 2048 bytes for them.
-// Note that this class uses a precise GetPageForSize (inherited from
-// FreeListMany), which makes its fast path less fast in the Scavenger. This is
-// done on purpose, since this class's only purpose is to be used by
-// FreeListManyCachedOrigin, which is precise for the scavenger.
-class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
- public:
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- protected:
- // Objects in the 18th category are at least 2048 bytes
- static const FreeListCategoryType kFastPathFirstCategory = 18;
- static const size_t kFastPathStart = 2048;
- static const size_t kTinyObjectMaxSize = 128;
- static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
- // Objects in the 15th category are at least 256 bytes
- static const FreeListCategoryType kFastPathFallBackTiny = 15;
-
- STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
- STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
- kTinyObjectMaxSize * 2);
-
- FreeListCategoryType SelectFastAllocationFreeListCategoryType(
- size_t size_in_bytes) {
- DCHECK(size_in_bytes < kMaxBlockSize);
-
- if (size_in_bytes >= categories_min[last_category_]) return last_category_;
-
- size_in_bytes += kFastPathOffset;
- for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
- if (size_in_bytes <= categories_min[cat]) {
- return cat;
- }
- }
- return last_category_;
- }
-
- FRIEND_TEST(
- SpacesTest,
- FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
-};
-
-// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
-// The reasonning behind this FreeList is the following: the GC runs in
-// parallel, and therefore, more expensive allocations there are less
-// noticeable. On the other hand, the generated code and runtime need to be very
-// fast. Therefore, the strategy for the former is one that is not very
-// efficient, but reduces fragmentation (FreeListManyCached), while the strategy
-// for the later is one that is very efficient, but introduces some
-// fragmentation (FreeListManyCachedFastPath).
-class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
- : public FreeListManyCachedFastPath {
- public:
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-};
-
-// FreeList for maps: since maps are all the same size, uses a single freelist.
-class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
- public:
- size_t GuaranteedAllocatable(size_t maximum_freed) override;
-
- Page* GetPageForSize(size_t size_in_bytes) override;
-
- FreeListMap();
- ~FreeListMap() override;
-
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size,
- AllocationOrigin origin) override;
-
- private:
- static const size_t kMinBlockSize = Map::kSize;
- static const size_t kMaxBlockSize = Page::kPageSize;
- static const FreeListCategoryType kOnlyCategory = 0;
-
- FreeListCategoryType SelectFreeListCategoryType(
- size_t size_in_bytes) override {
- return kOnlyCategory;
- }
-};
// LocalAllocationBuffer represents a linear allocation area that is created
// from a given {AllocationResult} and can be used to allocate memory without
@@ -1740,6 +475,9 @@ class LocalAllocationBuffer {
V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
void MakeIterable();
+ Address top() const { return allocation_info_.top(); }
+ Address limit() const { return allocation_info_.limit(); }
+
private:
V8_EXPORT_PRIVATE LocalAllocationBuffer(
Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
@@ -1811,794 +549,6 @@ class SpaceWithLinearArea : public Space {
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
-class V8_EXPORT_PRIVATE PagedSpace
- : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
- public:
- using iterator = PageIterator;
-
- static const size_t kCompactionMemoryWanted = 500 * KB;
-
- // Creates a space with an id.
- PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
- FreeList* free_list,
- LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
-
- ~PagedSpace() override { TearDown(); }
-
- // Checks whether an object/address is in this space.
- inline bool Contains(Address a);
- inline bool Contains(Object o);
- bool ContainsSlow(Address addr);
-
- // Does the space need executable memory?
- Executability executable() { return executable_; }
-
- // Prepares for a mark-compact GC.
- void PrepareForMarkCompact();
-
- // Current capacity without growing (Size() + Available()).
- size_t Capacity() { return accounting_stats_.Capacity(); }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
-
- // Sets the capacity, the available space and the wasted space to zero.
- // The stats are rebuilt during sweeping by adding each page to the
- // capacity and the size when it is encountered. As free spaces are
- // discovered during the sweeping they are subtracted from the size and added
- // to the available and wasted totals. The free list is cleared as well.
- void ClearAllocatorState() {
- accounting_stats_.ClearSize();
- free_list_->Reset();
- }
-
- // Available bytes without growing. These are the bytes on the free list.
- // The bytes in the linear allocation area are not included in this total
- // because updating the stats would slow down allocation. New pages are
- // immediately added to the free list so they show up here.
- size_t Available() override { return free_list_->Available(); }
-
- // Allocated bytes in this space. Garbage bytes that were not found due to
- // concurrent sweeping are counted as being allocated! The bytes in the
- // current linear allocation area (between top and limit) are also counted
- // here.
- size_t Size() override { return accounting_stats_.Size(); }
-
- // As size, but the bytes in lazily swept pages are estimated and the bytes
- // in the current linear allocation area are not included.
- size_t SizeOfObjects() override;
-
- // Wasted bytes in this space. These are just the bytes that were thrown away
- // due to being too small to use for allocation.
- virtual size_t Waste() { return free_list_->wasted_bytes(); }
-
- // Allocate the requested number of bytes in the space if possible, return a
- // failure object if not.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
- int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space double aligned if
- // possible, return a failure object if not.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space and consider allocation
- // alignment if needed.
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Allocate the requested number of bytes in the space from a background
- // thread.
- V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
- SlowGetLinearAllocationAreaBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin);
-
- size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
- if (size_in_bytes == 0) return 0;
- heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
- ClearRecordedSlots::kNo);
- if (mode == SpaceAccountingMode::kSpaceAccounted) {
- return AccountedFree(start, size_in_bytes);
- } else {
- return UnaccountedFree(start, size_in_bytes);
- }
- }
-
- // Give a block of memory to the space's free list. It might be added to
- // the free list or accounted as waste.
- // If add_to_freelist is false then just accounting stats are updated and
- // no attempt to add area to free list is made.
- size_t AccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
- Page* page = Page::FromAddress(start);
- accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
- DCHECK_GE(size_in_bytes, wasted);
- return size_in_bytes - wasted;
- }
-
- size_t UnaccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
- DCHECK_GE(size_in_bytes, wasted);
- return size_in_bytes - wasted;
- }
-
- inline bool TryFreeLast(HeapObject object, int object_size);
-
- void ResetFreeList();
-
- // Empty space linear allocation area, returning unused area to free list.
- void FreeLinearAllocationArea();
-
- void MarkLinearAllocationAreaBlack();
- void UnmarkLinearAllocationArea();
-
- void DecreaseAllocatedBytes(size_t bytes, Page* page) {
- accounting_stats_.DecreaseAllocatedBytes(bytes, page);
- }
- void IncreaseAllocatedBytes(size_t bytes, Page* page) {
- accounting_stats_.IncreaseAllocatedBytes(bytes, page);
- }
- void DecreaseCapacity(size_t bytes) {
- accounting_stats_.DecreaseCapacity(bytes);
- }
- void IncreaseCapacity(size_t bytes) {
- accounting_stats_.IncreaseCapacity(bytes);
- }
-
- void RefineAllocatedBytesAfterSweeping(Page* page);
-
- Page* InitializePage(MemoryChunk* chunk);
-
- void ReleasePage(Page* page);
-
- // Adds the page to this space and returns the number of bytes added to the
- // free list of the space.
- size_t AddPage(Page* page);
- void RemovePage(Page* page);
- // Remove a page if it has at least |size_in_bytes| bytes available that can
- // be used for allocation.
- Page* RemovePageSafe(int size_in_bytes);
-
- void SetReadable();
- void SetReadAndExecutable();
- void SetReadAndWritable();
-
- void SetDefaultCodePermissions() {
- if (FLAG_jitless) {
- SetReadable();
- } else {
- SetReadAndExecutable();
- }
- }
-
-#ifdef VERIFY_HEAP
- // Verify integrity of this space.
- virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
-
- void VerifyLiveBytes();
-
- // Overridden by subclasses to verify space-specific object
- // properties (e.g., only maps or free-list nodes are in map space).
- virtual void VerifyObject(HeapObject obj) {}
-#endif
-
-#ifdef DEBUG
- void VerifyCountersAfterSweeping(Heap* heap);
- void VerifyCountersBeforeConcurrentSweeping();
- // Print meta info and objects in this space.
- void Print() override;
-
- // Report code object related statistics
- static void ReportCodeStatistics(Isolate* isolate);
- static void ResetCodeStatistics(Isolate* isolate);
-#endif
-
- bool CanExpand(size_t size);
-
- // Returns the number of total pages in this space.
- int CountTotalPages();
-
- // Return size of allocatable area on a page in this space.
- inline int AreaSize() { return static_cast<int>(area_size_); }
-
- bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
-
- bool is_off_thread_space() {
- return local_space_kind_ == LocalSpaceKind::kOffThreadSpace;
- }
-
- bool is_compaction_space() {
- return base::IsInRange(local_space_kind_,
- LocalSpaceKind::kFirstCompactionSpace,
- LocalSpaceKind::kLastCompactionSpace);
- }
-
- LocalSpaceKind local_space_kind() { return local_space_kind_; }
-
- // Merges {other} into the current space. Note that this modifies {other},
- // e.g., removes its bump pointer area and resets statistics.
- void MergeLocalSpace(LocalSpace* other);
-
- // Refills the free list from the corresponding free list filled by the
- // sweeper.
- virtual void RefillFreeList();
-
- base::Mutex* mutex() { return &space_mutex_; }
-
- inline void UnlinkFreeListCategories(Page* page);
- inline size_t RelinkFreeListCategories(Page* page);
-
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
-
- iterator begin() { return iterator(first_page()); }
- iterator end() { return iterator(nullptr); }
-
- // Shrink immortal immovable pages of the space to be exactly the size needed
- // using the high water mark.
- void ShrinkImmortalImmovablePages();
-
- size_t ShrinkPageToHighWaterMark(Page* page);
-
- std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
-
- void SetLinearAllocationArea(Address top, Address limit);
-
- private:
- // Set space linear allocation area.
- void SetTopAndLimit(Address top, Address limit) {
- DCHECK(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.Reset(top, limit);
- }
- void DecreaseLimit(Address new_limit);
- void UpdateInlineAllocationLimit(size_t min_size) override;
- bool SupportsInlineAllocation() override {
- return identity() == OLD_SPACE && !is_local_space();
- }
-
- protected:
- // PagedSpaces that should be included in snapshots have different, i.e.,
- // smaller, initial pages.
- virtual bool snapshotable() { return true; }
-
- bool HasPages() { return first_page() != nullptr; }
-
- // Cleans up the space, frees all pages in this space except those belonging
- // to the initial chunk, uncommits addresses in the initial chunk.
- void TearDown();
-
- // Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS, or if the hard heap
- // size limit has been hit.
- bool Expand();
-
- // Sets up a linear allocation area that fits the given number of bytes.
- // Returns false if there is not enough space and the caller has to retry
- // after collecting garbage.
- inline bool EnsureLinearAllocationArea(int size_in_bytes,
- AllocationOrigin origin);
- // Allocates an object from the linear allocation area. Assumes that the
- // linear allocation area is large enought to fit the object.
- inline HeapObject AllocateLinearly(int size_in_bytes);
- // Tries to allocate an aligned object from the linear allocation area.
- // Returns nullptr if the linear allocation area does not fit the object.
- // Otherwise, returns the object pointer and writes the allocation size
- // (object size + alignment filler size) to the size_in_bytes.
- inline HeapObject TryAllocateLinearlyAligned(int* size_in_bytes,
- AllocationAlignment alignment);
-
- V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
- size_t size_in_bytes, AllocationOrigin origin);
-
- // If sweeping is still in progress try to sweep unswept pages. If that is
- // not successful, wait for the sweeper threads and retry free-list
- // allocation. Returns false if there is not enough space and the caller
- // has to retry after collecting garbage.
- V8_WARN_UNUSED_RESULT bool EnsureSweptAndRetryAllocation(
- int size_in_bytes, AllocationOrigin origin);
-
- V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(int required_freed_bytes,
- int max_pages,
- int size_in_bytes,
- AllocationOrigin origin);
-
- // Slow path of AllocateRaw. This function is space-dependent. Returns false
- // if there is not enough space and the caller has to retry after
- // collecting garbage.
- V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin);
-
- // Implementation of SlowAllocateRaw. Returns false if there is not enough
- // space and the caller has to retry after collecting garbage.
- V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin);
-
- V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
- TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationAlignment alignment,
- AllocationOrigin origin);
-
- Executability executable_;
-
- LocalSpaceKind local_space_kind_;
-
- size_t area_size_;
-
- // Accounting information for this space.
- AllocationStats accounting_stats_;
-
- // Mutex guarding any concurrent access to the space.
- base::Mutex space_mutex_;
-
- // Mutex guarding concurrent allocation.
- base::Mutex allocation_mutex_;
-
- friend class IncrementalMarking;
- friend class MarkCompactCollector;
-
- // Used in cctest.
- friend class heap::HeapTester;
-};
-
-enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
-
-// -----------------------------------------------------------------------------
-// SemiSpace in young generation
-//
-// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
-// The mark-compact collector uses the memory of the first page in the from
-// space as a marking stack when tracing live objects.
-class SemiSpace : public Space {
- public:
- using iterator = PageIterator;
-
- static void Swap(SemiSpace* from, SemiSpace* to);
-
- SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE, new NoFreeList()),
- current_capacity_(0),
- maximum_capacity_(0),
- minimum_capacity_(0),
- age_mark_(kNullAddress),
- committed_(false),
- id_(semispace),
- current_page_(nullptr),
- pages_used_(0) {}
-
- inline bool Contains(HeapObject o);
- inline bool Contains(Object o);
- inline bool ContainsSlow(Address a);
-
- void SetUp(size_t initial_capacity, size_t maximum_capacity);
- void TearDown();
-
- bool Commit();
- bool Uncommit();
- bool is_committed() { return committed_; }
-
- // Grow the semispace to the new capacity. The new capacity requested must
- // be larger than the current capacity and less than the maximum capacity.
- bool GrowTo(size_t new_capacity);
-
- // Shrinks the semispace to the new capacity. The new capacity requested
- // must be more than the amount of used memory in the semispace and less
- // than the current capacity.
- bool ShrinkTo(size_t new_capacity);
-
- bool EnsureCurrentCapacity();
-
- Address space_end() { return memory_chunk_list_.back()->area_end(); }
-
- // Returns the start address of the first page of the space.
- Address space_start() {
- DCHECK_NE(memory_chunk_list_.front(), nullptr);
- return memory_chunk_list_.front()->area_start();
- }
-
- Page* current_page() { return current_page_; }
- int pages_used() { return pages_used_; }
-
- // Returns the start address of the current page of the space.
- Address page_low() { return current_page_->area_start(); }
-
- // Returns one past the end address of the current page of the space.
- Address page_high() { return current_page_->area_end(); }
-
- bool AdvancePage() {
- Page* next_page = current_page_->next_page();
- // We cannot expand if we reached the maximum number of pages already. Note
- // that we need to account for the next page already for this check as we
- // could potentially fill the whole page after advancing.
- const bool reached_max_pages = (pages_used_ + 1) == max_pages();
- if (next_page == nullptr || reached_max_pages) {
- return false;
- }
- current_page_ = next_page;
- pages_used_++;
- return true;
- }
-
- // Resets the space to using the first page.
- void Reset();
-
- void RemovePage(Page* page);
- void PrependPage(Page* page);
-
- Page* InitializePage(MemoryChunk* chunk);
-
- // Age mark accessors.
- Address age_mark() { return age_mark_; }
- void set_age_mark(Address mark);
-
- // Returns the current capacity of the semispace.
- size_t current_capacity() { return current_capacity_; }
-
- // Returns the maximum capacity of the semispace.
- size_t maximum_capacity() { return maximum_capacity_; }
-
- // Returns the initial capacity of the semispace.
- size_t minimum_capacity() { return minimum_capacity_; }
-
- SemiSpaceId id() { return id_; }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
-
- // If we don't have these here then SemiSpace will be abstract. However
- // they should never be called:
-
- size_t Size() override { UNREACHABLE(); }
-
- size_t SizeOfObjects() override { return Size(); }
-
- size_t Available() override { UNREACHABLE(); }
-
- Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
- Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
-
- iterator begin() { return iterator(first_page()); }
- iterator end() { return iterator(nullptr); }
-
- std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
-
-#ifdef DEBUG
- V8_EXPORT_PRIVATE void Print() override;
- // Validate a range of of addresses in a SemiSpace.
- // The "from" address must be on a page prior to the "to" address,
- // in the linked page order, or it must be earlier on the same page.
- static void AssertValidRange(Address from, Address to);
-#else
- // Do nothing.
- inline static void AssertValidRange(Address from, Address to) {}
-#endif
-
-#ifdef VERIFY_HEAP
- virtual void Verify();
-#endif
-
- private:
- void RewindPages(int num_pages);
-
- inline int max_pages() {
- return static_cast<int>(current_capacity_ / Page::kPageSize);
- }
-
- // Copies the flags into the masked positions on all pages in the space.
- void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
-
- // The currently committed space capacity.
- size_t current_capacity_;
-
- // The maximum capacity that can be used by this space. A space cannot grow
- // beyond that size.
- size_t maximum_capacity_;
-
- // The minimum capacity for the space. A space cannot shrink below this size.
- size_t minimum_capacity_;
-
- // Used to govern object promotion during mark-compact collection.
- Address age_mark_;
-
- bool committed_;
- SemiSpaceId id_;
-
- Page* current_page_;
-
- int pages_used_;
-
- friend class NewSpace;
- friend class SemiSpaceObjectIterator;
-};
-
-// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
-// semispace of the heap's new space. It iterates over the objects in the
-// semispace from a given start address (defaulting to the bottom of the
-// semispace) to the top of the semispace. New objects allocated after the
-// iterator is created are not iterated.
-class SemiSpaceObjectIterator : public ObjectIterator {
- public:
- // Create an iterator over the allocated objects in the given to-space.
- explicit SemiSpaceObjectIterator(NewSpace* space);
-
- inline HeapObject Next() override;
-
- private:
- void Initialize(Address start, Address end);
-
- // The current iteration point.
- Address current_;
- // The end of iteration.
- Address limit_;
-};
-
-// -----------------------------------------------------------------------------
-// The young generation space.
-//
-// The new space consists of a contiguous pair of semispaces. It simply
-// forwards most functions to the appropriate semispace.
-
-class V8_EXPORT_PRIVATE NewSpace
- : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
- public:
- using iterator = PageIterator;
-
- NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
- size_t initial_semispace_capacity, size_t max_semispace_capacity);
-
- ~NewSpace() override { TearDown(); }
-
- inline bool ContainsSlow(Address a);
- inline bool Contains(Object o);
- inline bool Contains(HeapObject o);
-
- // Tears down the space. Heap memory was not allocated by the space, so it
- // is not deallocated here.
- void TearDown();
-
- // Flip the pair of spaces.
- void Flip();
-
- // Grow the capacity of the semispaces. Assumes that they are not at
- // their maximum capacity.
- void Grow();
-
- // Shrink the capacity of the semispaces.
- void Shrink();
-
- // Return the allocated bytes in the active semispace.
- size_t Size() final {
- DCHECK_GE(top(), to_space_.page_low());
- return to_space_.pages_used() *
- MemoryChunkLayout::AllocatableMemoryInDataPage() +
- static_cast<size_t>(top() - to_space_.page_low());
- }
-
- size_t SizeOfObjects() final { return Size(); }
-
- // Return the allocatable capacity of a semispace.
- size_t Capacity() {
- SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
- return (to_space_.current_capacity() / Page::kPageSize) *
- MemoryChunkLayout::AllocatableMemoryInDataPage();
- }
-
- // Return the current size of a semispace, allocatable and non-allocatable
- // memory.
- size_t TotalCapacity() {
- DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
- return to_space_.current_capacity();
- }
-
- // Committed memory for NewSpace is the committed memory of both semi-spaces
- // combined.
- size_t CommittedMemory() final {
- return from_space_.CommittedMemory() + to_space_.CommittedMemory();
- }
-
- size_t MaximumCommittedMemory() final {
- return from_space_.MaximumCommittedMemory() +
- to_space_.MaximumCommittedMemory();
- }
-
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() final;
-
- // Return the available bytes without growing.
- size_t Available() final {
- DCHECK_GE(Capacity(), Size());
- return Capacity() - Size();
- }
-
- size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- type == ExternalBackingStoreType::kArrayBuffer)
- return heap()->YoungArrayBufferBytes();
- DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
- return to_space_.ExternalBackingStoreBytes(type);
- }
-
- size_t ExternalBackingStoreBytes() {
- size_t result = 0;
- for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
- result +=
- ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
- }
- return result;
- }
-
- size_t AllocatedSinceLastGC() {
- const Address age_mark = to_space_.age_mark();
- DCHECK_NE(age_mark, kNullAddress);
- DCHECK_NE(top(), kNullAddress);
- Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
- Page* const last_page = Page::FromAllocationAreaAddress(top());
- Page* current_page = age_mark_page;
- size_t allocated = 0;
- if (current_page != last_page) {
- DCHECK_EQ(current_page, age_mark_page);
- DCHECK_GE(age_mark_page->area_end(), age_mark);
- allocated += age_mark_page->area_end() - age_mark;
- current_page = current_page->next_page();
- } else {
- DCHECK_GE(top(), age_mark);
- return top() - age_mark;
- }
- while (current_page != last_page) {
- DCHECK_NE(current_page, age_mark_page);
- allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
- current_page = current_page->next_page();
- }
- DCHECK_GE(top(), current_page->area_start());
- allocated += top() - current_page->area_start();
- DCHECK_LE(allocated, Size());
- return allocated;
- }
-
- void MovePageFromSpaceToSpace(Page* page) {
- DCHECK(page->IsFromPage());
- from_space_.RemovePage(page);
- to_space_.PrependPage(page);
- }
-
- bool Rebalance();
-
- // Return the maximum capacity of a semispace.
- size_t MaximumCapacity() {
- DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
- return to_space_.maximum_capacity();
- }
-
- bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
-
- // Returns the initial capacity of a semispace.
- size_t InitialTotalCapacity() {
- DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
- return to_space_.minimum_capacity();
- }
-
- void ResetOriginalTop() {
- DCHECK_GE(top(), original_top_);
- DCHECK_LE(top(), original_limit_);
- original_top_.store(top(), std::memory_order_release);
- }
-
- Address original_top_acquire() {
- return original_top_.load(std::memory_order_acquire);
- }
- Address original_limit_relaxed() {
- return original_limit_.load(std::memory_order_relaxed);
- }
-
- // Return the address of the first allocatable address in the active
- // semispace. This may be the address where the first object resides.
- Address first_allocatable_address() { return to_space_.space_start(); }
-
- // Get the age mark of the inactive semispace.
- Address age_mark() { return from_space_.age_mark(); }
- // Set the age mark in the active semispace.
- void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
- int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment,
- AllocationOrigin origin = AllocationOrigin::kRuntime);
-
- // Reset the allocation pointer to the beginning of the active semispace.
- void ResetLinearAllocationArea();
-
- // When inline allocation stepping is active, either because of incremental
- // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
- // inline allocation every once in a while. This is done by setting
- // allocation_info_.limit to be lower than the actual limit and and increasing
- // it in steps to guarantee that the observers are notified periodically.
- void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
-
- inline bool ToSpaceContainsSlow(Address a);
- inline bool ToSpaceContains(Object o);
- inline bool FromSpaceContains(Object o);
-
- // Try to switch the active semispace to a new, empty, page.
- // Returns false if this isn't possible or reasonable (i.e., there
- // are no pages, or the current page is already empty), or true
- // if successful.
- bool AddFreshPage();
- bool AddFreshPageSynchronized();
-
-#ifdef VERIFY_HEAP
- // Verify the active semispace.
- virtual void Verify(Isolate* isolate);
-#endif
-
-#ifdef DEBUG
- // Print the active semispace.
- void Print() override { to_space_.Print(); }
-#endif
-
- // Return whether the operation succeeded.
- bool CommitFromSpaceIfNeeded() {
- if (from_space_.is_committed()) return true;
- return from_space_.Commit();
- }
-
- bool UncommitFromSpace() {
- if (!from_space_.is_committed()) return true;
- return from_space_.Uncommit();
- }
-
- bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
-
- SemiSpace* active_space() { return &to_space_; }
-
- Page* first_page() { return to_space_.first_page(); }
- Page* last_page() { return to_space_.last_page(); }
-
- iterator begin() { return to_space_.begin(); }
- iterator end() { return to_space_.end(); }
-
- std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
-
- SemiSpace& from_space() { return from_space_; }
- SemiSpace& to_space() { return to_space_; }
-
- private:
- // Update linear allocation area to match the current to-space page.
- void UpdateLinearAllocationArea();
-
- base::Mutex mutex_;
-
- // The top and the limit at the time of setting the linear allocation area.
- // These values can be accessed by background tasks.
- std::atomic<Address> original_top_;
- std::atomic<Address> original_limit_;
-
- // The semispaces.
- SemiSpace to_space_;
- SemiSpace from_space_;
- VirtualMemory reservation_;
-
- bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
- bool SupportsInlineAllocation() override { return true; }
-
- friend class SemiSpaceObjectIterator;
-};
-
class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
public:
explicit PauseAllocationObserversScope(Heap* heap);
@@ -2609,180 +559,6 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
};
-// -----------------------------------------------------------------------------
-// Base class for compaction space and off-thread space.
-
-class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
- public:
- LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
- LocalSpaceKind local_space_kind)
- : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
- local_space_kind) {
- DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
- }
-
- protected:
- // The space is temporary and not included in any snapshots.
- bool snapshotable() override { return false; }
-};
-
-// -----------------------------------------------------------------------------
-// Compaction space that is used temporarily during compaction.
-
-class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
- public:
- CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
- LocalSpaceKind local_space_kind)
- : LocalSpace(heap, id, executable, local_space_kind) {
- DCHECK(is_compaction_space());
- }
-
- protected:
- V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin) override;
-};
-
-// A collection of |CompactionSpace|s used by a single compaction task.
-class CompactionSpaceCollection : public Malloced {
- public:
- explicit CompactionSpaceCollection(Heap* heap,
- LocalSpaceKind local_space_kind)
- : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
- local_space_kind),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
- local_space_kind) {}
-
- CompactionSpace* Get(AllocationSpace space) {
- switch (space) {
- case OLD_SPACE:
- return &old_space_;
- case CODE_SPACE:
- return &code_space_;
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- }
-
- private:
- CompactionSpace old_space_;
- CompactionSpace code_space_;
-};
-
-// -----------------------------------------------------------------------------
-// Old generation regular object space.
-
-class OldSpace : public PagedSpace {
- public:
- // Creates an old space object. The constructor does not allocate pages
- // from OS.
- explicit OldSpace(Heap* heap)
- : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
- FreeList::CreateFreeList()) {}
-
- static bool IsAtPageStart(Address addr) {
- return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
- MemoryChunkLayout::ObjectStartOffsetInDataPage();
- }
-
- size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
- if (V8_ARRAY_BUFFER_EXTENSION_BOOL &&
- type == ExternalBackingStoreType::kArrayBuffer)
- return heap()->OldArrayBufferBytes();
- return external_backing_store_bytes_[type];
- }
-};
-
-// -----------------------------------------------------------------------------
-// Old generation code object space.
-
-class CodeSpace : public PagedSpace {
- public:
- // Creates an old space object. The constructor does not allocate pages
- // from OS.
- explicit CodeSpace(Heap* heap)
- : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
-};
-
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_DCHECK((space).page_low() <= (info).top() && \
- (info).top() <= (space).page_high() && \
- (info).limit() <= (space).page_high())
-
-// -----------------------------------------------------------------------------
-// Old space for all map objects
-
-class MapSpace : public PagedSpace {
- public:
- // Creates a map space object.
- explicit MapSpace(Heap* heap)
- : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
-
- int RoundSizeDownToObjectAlignment(int size) override {
- if (base::bits::IsPowerOfTwo(Map::kSize)) {
- return RoundDown(size, Map::kSize);
- } else {
- return (size / Map::kSize) * Map::kSize;
- }
- }
-
- void SortFreeList();
-
-#ifdef VERIFY_HEAP
- void VerifyObject(HeapObject obj) override;
-#endif
-};
-
-// -----------------------------------------------------------------------------
-// Off-thread space that is used for folded allocation on a different thread.
-
-class V8_EXPORT_PRIVATE OffThreadSpace : public LocalSpace {
- public:
- explicit OffThreadSpace(Heap* heap)
- : LocalSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
- LocalSpaceKind::kOffThreadSpace) {
-#ifdef V8_ENABLE_THIRD_PARTY_HEAP
- // OffThreadSpace doesn't work with third-party heap.
- UNREACHABLE();
-#endif
- }
-
- protected:
- V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
- int size_in_bytes, AllocationOrigin origin) override;
-
- void RefillFreeList() override;
-};
-
-// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space or to evacuation candidates.
-class OldGenerationMemoryChunkIterator {
- public:
- inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
-
- // Return nullptr when the iterator is done.
- inline MemoryChunk* next();
-
- private:
- enum State {
- kOldSpaceState,
- kMapState,
- kCodeState,
- kLargeObjectState,
- kCodeLargeObjectState,
- kFinishedState
- };
- Heap* heap_;
- State state_;
- PageIterator old_iterator_;
- PageIterator code_iterator_;
- PageIterator map_iterator_;
- LargePageIterator lo_iterator_;
- LargePageIterator code_lo_iterator_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index 155b970ef64..c6019b0c086 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -6,10 +6,12 @@
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/code-object-registry.h"
+#include "src/heap/free-list-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
-#include "src/heap/remembered-set-inl.h"
+#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -245,6 +247,13 @@ void Sweeper::EnsureCompleted() {
sweeping_in_progress_ = false;
}
+void Sweeper::SupportConcurrentSweeping() {
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ const int kMaxPagesToSweepPerSpace = 1;
+ ParallelSweepSpace(space, 0, kMaxPagesToSweepPerSpace);
+ });
+}
+
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
@@ -257,14 +266,15 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
if (free_space_mode == ZAP_FREE_SPACE) {
ZapCode(free_start, size);
}
+ ClearFreedMemoryMode clear_memory_mode =
+ (free_list_mode == REBUILD_FREE_LIST)
+ ? ClearFreedMemoryMode::kDontClearFreedMemory
+ : ClearFreedMemoryMode::kClearFreedMemory;
+ page->heap()->CreateFillerObjectAtBackground(
+ free_start, static_cast<int>(size), clear_memory_mode);
if (free_list_mode == REBUILD_FREE_LIST) {
- freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
- free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
-
- } else {
- Heap::CreateFillerObjectAt(ReadOnlyRoots(page->heap()), free_start,
- static_cast<int>(size),
- ClearFreedMemoryMode::kClearFreedMemory);
+ freed_bytes =
+ reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(free_start, size);
}
if (should_reduce_memory_) page->DiscardUnusedMemory(free_start, size);
diff --git a/chromium/v8/src/heap/sweeper.h b/chromium/v8/src/heap/sweeper.h
index 3bc199a92d2..7cd1bafd4fb 100644
--- a/chromium/v8/src/heap/sweeper.h
+++ b/chromium/v8/src/heap/sweeper.h
@@ -47,7 +47,7 @@ class Sweeper {
// after exiting this scope.
class FilterSweepingPagesScope final {
public:
- explicit FilterSweepingPagesScope(
+ FilterSweepingPagesScope(
Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope);
~FilterSweepingPagesScope();
@@ -108,6 +108,9 @@ class Sweeper {
void EnsureCompleted();
bool AreSweeperTasksRunning();
+ // Support concurrent sweepers from main thread
+ void SupportConcurrentSweeping();
+
Page* GetSweptPageSafe(PagedSpace* space);
void AddPageForIterability(Page* page);