summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/cppgc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2020-07-13 10:39:42 +0200
committerMichaël Zasso <targos@protonmail.com>2020-07-13 14:41:41 +0200
commit12478684aab233942e0d5dc24f195930c8a5e59d (patch)
tree97dbee955ab91d4df480bcb82274d710a2195e64 /deps/v8/src/heap/cppgc
parent913d36d97da187a3804f6cfa96b4d24a8b7be78a (diff)
downloadnode-new-12478684aab233942e0d5dc24f195930c8a5e59d.tar.gz
deps: update V8 to 8.4.371.19
PR-URL: https://github.com/nodejs/node/pull/33579 Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com> Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
Diffstat (limited to 'deps/v8/src/heap/cppgc')
-rw-r--r--deps/v8/src/heap/cppgc/allocation.cc11
-rw-r--r--deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc39
-rw-r--r--deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc52
-rw-r--r--deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S32
-rw-r--r--deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc53
-rw-r--r--deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S48
-rw-r--r--deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc94
-rw-r--r--deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc35
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers.S52
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc94
-rw-r--r--deps/v8/src/heap/cppgc/asm/x64/push_registers_masm.S (renamed from deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S)0
-rw-r--r--deps/v8/src/heap/cppgc/free-list.cc190
-rw-r--r--deps/v8/src/heap/cppgc/free-list.h62
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.cc2
-rw-r--r--deps/v8/src/heap/cppgc/gc-info-table.h2
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc2
-rw-r--r--deps/v8/src/heap/cppgc/globals.h9
-rw-r--r--deps/v8/src/heap/cppgc/heap-inl.h35
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header-inl.h15
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.cc4
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h14
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc201
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h181
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.cc58
-rw-r--r--deps/v8/src/heap/cppgc/heap-space.h127
-rw-r--r--deps/v8/src/heap/cppgc/heap-visitor.h88
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc122
-rw-r--r--deps/v8/src/heap/cppgc/heap.h123
-rw-r--r--deps/v8/src/heap/cppgc/liveness-broker.cc15
-rw-r--r--deps/v8/src/heap/cppgc/logging.cc29
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc152
-rw-r--r--deps/v8/src/heap/cppgc/marker.h121
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.cc143
-rw-r--r--deps/v8/src/heap/cppgc/marking-visitor.h70
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator-inl.h74
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc87
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h40
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap-inl.h95
-rw-r--r--deps/v8/src/heap/cppgc/object-start-bitmap.h80
-rw-r--r--deps/v8/src/heap/cppgc/page-memory-inl.h57
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.cc211
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.h237
-rw-r--r--deps/v8/src/heap/cppgc/persistent-node.cc60
-rw-r--r--deps/v8/src/heap/cppgc/pointer-policies.cc35
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc66
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.h44
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.cc32
-rw-r--r--deps/v8/src/heap/cppgc/raw-heap.h106
-rw-r--r--deps/v8/src/heap/cppgc/sanitizers.h39
-rw-r--r--deps/v8/src/heap/cppgc/source-location.cc16
-rw-r--r--deps/v8/src/heap/cppgc/stack.cc49
-rw-r--r--deps/v8/src/heap/cppgc/stack.h12
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc213
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h38
-rw-r--r--deps/v8/src/heap/cppgc/visitor.h23
-rw-r--r--deps/v8/src/heap/cppgc/worklist.h473
58 files changed, 4328 insertions, 130 deletions
diff --git a/deps/v8/src/heap/cppgc/allocation.cc b/deps/v8/src/heap/cppgc/allocation.cc
index 7e98d1eec9..32f917da5a 100644
--- a/deps/v8/src/heap/cppgc/allocation.cc
+++ b/deps/v8/src/heap/cppgc/allocation.cc
@@ -11,6 +11,9 @@
namespace cppgc {
namespace internal {
+STATIC_ASSERT(api_constants::kLargeObjectSizeThreshold ==
+ kLargeObjectSizeThreshold);
+
// static
void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
size_t size,
@@ -19,5 +22,13 @@ void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
return Heap::From(heap)->Allocate(size, index);
}
+// static
+void* MakeGarbageCollectedTraitInternal::Allocate(
+ cppgc::Heap* heap, size_t size, GCInfoIndex index,
+ CustomSpaceIndex space_index) {
+ DCHECK_NOT_NULL(heap);
+ return Heap::From(heap)->Allocate(size, index, space_index);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
new file mode 100644
index 0000000000..5246c3f6c3
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
@@ -0,0 +1,39 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 8-byte alignment at calls by pushing an additional
+// non-callee-saved register (r3).
+//
+// Calling convention source:
+// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A32)
+// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html
+asm(".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ // Only {r4-r11} are callee-saved registers. Push r3 in addition to align
+ // the stack back to 8 bytes.
+ " push {r3-r11, lr} \n"
+ // Pass 1st parameter (r0) unchanged (Stack*).
+ // Pass 2nd parameter (r1) unchanged (StackVisitor*).
+ // Save 3rd parameter (r2; IterateStackCallback).
+ " mov r3, r2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mov r2, sp \n"
+ // Call the callback.
+ " blx r3 \n"
+ // Discard all the registers.
+ " add sp, sp, #36 \n"
+ // Pop lr into pc which returns and switches mode if needed.
+ " pop {pc} \n");
diff --git a/deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
new file mode 100644
index 0000000000..30d4de1f30
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
@@ -0,0 +1,52 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 16-byte alignment.
+//
+// Calling convention source:
+// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A64)
+
+asm(
+#if defined(__APPLE__)
+ ".globl _PushAllRegistersAndIterateStack \n"
+ ".private_extern _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !defined(__APPLE__)
+ ".globl PushAllRegistersAndIterateStack \n"
+#if !defined(_WIN64)
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+#endif // !defined(_WIN64)
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !defined(__APPLE__)
+ // x19-x29 are callee-saved.
+ " stp x19, x20, [sp, #-16]! \n"
+ " stp x21, x22, [sp, #-16]! \n"
+ " stp x23, x24, [sp, #-16]! \n"
+ " stp x25, x26, [sp, #-16]! \n"
+ " stp x27, x28, [sp, #-16]! \n"
+ " stp fp, lr, [sp, #-16]! \n"
+ // Maintain frame pointer.
+ " mov fp, sp \n"
+ // Pass 1st parameter (x0) unchanged (Stack*).
+ // Pass 2nd parameter (x1) unchanged (StackVisitor*).
+ // Save 3rd parameter (x2; IterateStackCallback)
+ " mov x7, x2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
+ " mov x2, sp \n"
+ " blr x7 \n"
+ // Load return address.
+ " ldr lr, [sp, #8] \n"
+ // Restore frame pointer and pop all callee-saved registers.
+ " ldr fp, [sp], #96 \n"
+ " ret \n");
diff --git a/deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
new file mode 100644
index 0000000000..9773654ffc
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
@@ -0,0 +1,32 @@
+; Copyright 2020 the V8 project authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+; This file is exactly the same as push_registers_asm.cc, just formatted for
+; the Microsoft Arm Assembler.
+
+ AREA |.text|, CODE, ALIGN=4, READONLY
+ EXPORT PushAllRegistersAndIterateStack
+PushAllRegistersAndIterateStack
+ ; x19-x29 are callee-saved
+ STP x19, x20, [sp, #-16]!
+ STP x21, x22, [sp, #-16]!
+ STP x23, x24, [sp, #-16]!
+ STP x25, x26, [sp, #-16]!
+ STP x27, x28, [sp, #-16]!
+ STP fp, lr, [sp, #-16]!
+ ; Maintain frame pointer
+ MOV fp, sp
+ ; Pass 1st parameter (x0) unchanged (Stack*).
+ ; Pass 2nd parameter (x1) unchanged (StackVisitor*).
+ ; Save 3rd parameter (x2; IterateStackCallback)
+ MOV x7, x2
+ ; Pass 3rd parameter as sp (stack pointer)
+ MOV x2, sp
+ BLR x7
+ ; Load return address
+ LDR lr, [sp, #8]
+ ; Restore frame pointer and pop all callee-saved registers.
+ LDR fp, [sp], #96
+ RET
+ END \ No newline at end of file
diff --git a/deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
new file mode 100644
index 0000000000..ed9c14a50e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// We maintain 16-byte alignment at calls. There is an 4-byte return address
+// on the stack and we push 28 bytes which maintains 16-byte stack alignment
+// at the call.
+//
+// The following assumes cdecl calling convention.
+// Source: https://en.wikipedia.org/wiki/X86_calling_conventions#cdecl
+asm(
+#ifdef _WIN32
+ ".globl _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !_WIN32
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !_WIN32
+ // [ IterateStackCallback ]
+ // [ StackVisitor* ]
+ // [ Stack* ]
+ // [ ret ]
+ // ebp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %ebp \n"
+ " movl %esp, %ebp \n"
+ " push %ebx \n"
+ " push %esi \n"
+ " push %edi \n"
+ // Save 3rd parameter (IterateStackCallback).
+ " movl 28(%esp), %ecx \n"
+ // Pass 3rd parameter as esp (stack pointer).
+ " push %esp \n"
+ // Pass 2nd parameter (StackVisitor*).
+ " push 28(%esp) \n"
+ // Pass 1st parameter (Stack*).
+ " push 28(%esp) \n"
+ " call *%ecx \n"
+ // Pop the callee-saved registers.
+ " addl $24, %esp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %ebp \n"
+ " ret \n");
diff --git a/deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
new file mode 100644
index 0000000000..a35fd6e527
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
@@ -0,0 +1,48 @@
+;; Copyright 2020 the V8 project authors. All rights reserved.
+;; Use of this source code is governed by a BSD-style license that can be
+;; found in the LICENSE file.
+
+;; MASM syntax
+;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
+
+.model flat, C
+
+public PushAllRegistersAndIterateStack
+
+.code
+PushAllRegistersAndIterateStack:
+ ;; Push all callee-saved registers to get them on the stack for conservative
+ ;; stack scanning.
+ ;;
+ ;; We maintain 16-byte alignment at calls. There is an 8-byte return address
+ ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
+ ;; at the call.
+ ;;
+ ;; The following assumes cdecl calling convention.
+ ;; Source: https://docs.microsoft.com/en-us/cpp/cpp/cdecl?view=vs-2019
+ ;;
+ ;; [ IterateStackCallback ]
+ ;; [ StackVisitor* ]
+ ;; [ Stack* ]
+ ;; [ ret ]
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+ ;; Save 3rd parameter (IterateStackCallback).
+ mov ecx, [ esp + 28 ]
+ ;; Pass 3rd parameter as esp (stack pointer).
+ push esp
+ ;; Pass 2nd parameter (StackVisitor*).
+ push [ esp + 28 ]
+ ;; Pass 1st parameter (Stack*).
+ push [ esp + 28 ]
+ call ecx
+ ;; Pop the callee-saved registers.
+ add esp, 24
+ ;; Restore rbp as it was used as frame pointer.
+ pop ebp
+ ret
+
+end
diff --git a/deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
new file mode 100644
index 0000000000..4a46caa6c5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".set noreorder \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addiu $sp, $sp, -48 \n"
+ " sw $ra, 44($sp) \n"
+ " sw $s8, 40($sp) \n"
+ " sw $sp, 36($sp) \n"
+ " sw $gp, 32($sp) \n"
+ " sw $s7, 28($sp) \n"
+ " sw $s6, 24($sp) \n"
+ " sw $s5, 20($sp) \n"
+ " sw $s4, 16($sp) \n"
+ " sw $s3, 12($sp) \n"
+ " sw $s2, 8($sp) \n"
+ " sw $s1, 4($sp) \n"
+ " sw $s0, 0($sp) \n"
+ // Maintain frame pointer.
+ " move $s8, $sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " move $a3, $a2 \n"
+ // Call the callback.
+ " jalr $a3 \n"
+ // Delay slot: Pass 3rd parameter as sp (stack pointer).
+ " move $a2, $sp \n"
+ // Load return address.
+ " lw $ra, 44($sp) \n"
+ // Restore frame pointer.
+ " lw $s8, 40($sp) \n"
+ " jr $ra \n"
+ // Delay slot: Discard all callee-saved registers.
+ " addiu $sp, $sp, 48 \n");
diff --git a/deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
new file mode 100644
index 0000000000..6befa3bcc0
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".set noreorder \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " daddiu $sp, $sp, -96 \n"
+ " sd $ra, 88($sp) \n"
+ " sd $s8, 80($sp) \n"
+ " sd $sp, 72($sp) \n"
+ " sd $gp, 64($sp) \n"
+ " sd $s7, 56($sp) \n"
+ " sd $s6, 48($sp) \n"
+ " sd $s5, 40($sp) \n"
+ " sd $s4, 32($sp) \n"
+ " sd $s3, 24($sp) \n"
+ " sd $s2, 16($sp) \n"
+ " sd $s1, 8($sp) \n"
+ " sd $s0, 0($sp) \n"
+ // Maintain frame pointer.
+ " move $s8, $sp \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " move $a3, $a2 \n"
+ // Call the callback.
+ " jalr $a3 \n"
+ // Delay slot: Pass 3rd parameter as sp (stack pointer).
+ " move $a2, $sp \n"
+ // Load return address.
+ " ld $ra, 88($sp) \n"
+ // Restore frame pointer.
+ " ld $s8, 80($sp) \n"
+ " jr $ra \n"
+ // Delay slot: Discard all callee-saved registers.
+ " daddiu $sp, $sp, 96 \n");
diff --git a/deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
new file mode 100644
index 0000000000..6936819ba2
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// PPC ABI source:
+// http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.html
+
+// AIX Runtime process stack:
+// https://www.ibm.com/support/knowledgecenter/ssw_aix_71/assembler/idalangref_runtime_process.html
+asm(
+#if defined(_AIX)
+ ".globl .PushAllRegistersAndIterateStack, hidden \n"
+ ".csect .text[PR] \n"
+ ".PushAllRegistersAndIterateStack: \n"
+#else
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif
+ // Push all callee-saved registers.
+ // lr, TOC pointer, r16 to r31. 160 bytes.
+ // The parameter save area shall be allocated by the caller. 112 btes.
+ // At anytime, SP (r1) needs to be multiple of 16 (i.e. 16-aligned).
+ " mflr 0 \n"
+ " std 0, 16(1) \n"
+#if defined(_AIX)
+ " std 2, 40(1) \n"
+#else
+ " std 2, 24(1) \n"
+#endif
+ " stdu 1, -256(1) \n"
+ " std 14, 112(1) \n"
+ " std 15, 120(1) \n"
+ " std 16, 128(1) \n"
+ " std 17, 136(1) \n"
+ " std 18, 144(1) \n"
+ " std 19, 152(1) \n"
+ " std 20, 160(1) \n"
+ " std 21, 168(1) \n"
+ " std 22, 176(1) \n"
+ " std 23, 184(1) \n"
+ " std 24, 192(1) \n"
+ " std 25, 200(1) \n"
+ " std 26, 208(1) \n"
+ " std 27, 216(1) \n"
+ " std 28, 224(1) \n"
+ " std 29, 232(1) \n"
+ " std 30, 240(1) \n"
+ " std 31, 248(1) \n"
+ // Pass 1st parameter (r3) unchanged (Stack*).
+ // Pass 2nd parameter (r4) unchanged (StackVisitor*).
+ // Save 3rd parameter (r5; IterateStackCallback).
+ " mr 6, 5 \n"
+#if defined(_AIX)
+ // Set up TOC for callee.
+ " ld 2,8(5) \n"
+ // AIX uses function decorators, which means that
+ // pointers to functions do not point to code, but
+ // instead point to metadata about them, hence
+ // need to deterrence.
+ " ld 6,0(6) \n"
+#endif
+ // Pass 3rd parameter as sp (stack pointer).
+ " mr 5, 1 \n"
+#if !defined(_AIX)
+ // Set up r12 to be equal to the callee address (in order for TOC
+ // relocation). Only needed on LE Linux.
+ " mr 12, 6 \n"
+#endif
+ // Call the callback.
+ " mtctr 6 \n"
+ " bctrl \n"
+ // Discard all the registers.
+ " addi 1, 1, 256 \n"
+ // Restore lr.
+ " ld 0, 16(1) \n"
+ " mtlr 0 \n"
+#if defined(_AIX)
+ // Restore TOC pointer.
+ " ld 2, 40(1) \n"
+#else
+ " ld 2, 24(1) \n"
+#endif
+ " blr \n");
diff --git a/deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
new file mode 100644
index 0000000000..6b9b2c0853
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+
+// S390 ABI source:
+// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
+asm(".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers.
+ // r6-r13, r14 and sp(r15)
+ " stmg %r6, %sp, 48(%sp) \n"
+ // Allocate frame.
+ " lay %sp, -160(%sp) \n"
+ // Pass 1st parameter (r2) unchanged (Stack*).
+ // Pass 2nd parameter (r3) unchanged (StackVisitor*).
+ // Save 3rd parameter (r4; IterateStackCallback).
+ " lgr %r5, %r4 \n"
+ // Pass sp as 3rd parameter. 160+48 to point
+ // to callee saved region stored above.
+ " lay %r4, 208(%sp) \n"
+ // Call the callback.
+ " basr %r14, %r5 \n"
+ " lmg %r14,%sp, 272(%sp) \n"
+ " br %r14 \n");
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers.S b/deps/v8/src/heap/cppgc/asm/x64/push_registers.S
deleted file mode 100644
index 018859d5c0..0000000000
--- a/deps/v8/src/heap/cppgc/asm/x64/push_registers.S
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-.att_syntax
-
-.text
-
-#ifdef V8_TARGET_OS_MACOSX
-
-.globl _PushAllRegistersAndIterateStack
-_PushAllRegistersAndIterateStack:
-
-#else // !V8_TARGET_OS_MACOSX
-
-.type PushAllRegistersAndIterateStack, %function
-.global PushAllRegistersAndIterateStack
-.hidden PushAllRegistersAndIterateStack
-PushAllRegistersAndIterateStack:
-
-#endif // !V8_TARGET_OS_MACOSX
-
- // Push all callee-saved registers to get them on the stack for conservative
- // stack scanning.
- //
- // We maintain 16-byte alignment at calls. There is an 8-byte return address
- // on the stack and we push 56 bytes which maintains 16-byte stack alignment
- // at the call.
- // Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
- //
- // rbp is callee-saved. Maintain proper frame pointer for debugging.
- push %rbp
- mov %rsp, %rbp
- push $0xCDCDCD // Dummy for alignment.
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- // Pass 1st parameter (rdi) unchanged (Stack*).
- // Pass 2nd parameter (rsi) unchanged (StackVisitor*).
- // Save 3rd parameter (rdx; IterateStackCallback)
- mov %rdx, %r8
- // Pass 3rd parameter as rsp (stack pointer).
- mov %rsp, %rdx
- // Call the callback.
- call *%r8
- // Pop the callee-saved registers.
- add $48, %rsp
- // Restore rbp as it was used as frame pointer.
- pop %rbp
- ret
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc b/deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
new file mode 100644
index 0000000000..68f7918c93
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// We cannot rely on clang generating the function and right symbol mangling
+// as `__attribite__((naked))` does not prevent clang from generating TSAN
+// function entry stubs (`__tsan_func_entry`). Even with
+// `__attribute__((no_sanitize_thread)` annotation clang generates the entry
+// stub.
+// See https://bugs.llvm.org/show_bug.cgi?id=45400.
+
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+// _WIN64 Defined as 1 when the compilation target is 64-bit ARM or x64.
+// Otherwise, undefined.
+#ifdef _WIN64
+
+// We maintain 16-byte alignment at calls. There is an 8-byte return address
+// on the stack and we push 72 bytes which maintains 16-byte stack alignment
+// at the call.
+// Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
+asm(".globl PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // rbp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %rbp \n"
+ " mov %rsp, %rbp \n"
+ // Dummy for alignment.
+ " push $0xCDCDCD \n"
+ " push %rsi \n"
+ " push %rdi \n"
+ " push %rbx \n"
+ " push %r12 \n"
+ " push %r13 \n"
+ " push %r14 \n"
+ " push %r15 \n"
+ // Pass 1st parameter (rcx) unchanged (Stack*).
+ // Pass 2nd parameter (rdx) unchanged (StackVisitor*).
+ // Save 3rd parameter (r8; IterateStackCallback)
+ " mov %r8, %r9 \n"
+ // Pass 3rd parameter as rsp (stack pointer).
+ " mov %rsp, %r8 \n"
+ // Call the callback.
+ " call *%r9 \n"
+ // Pop the callee-saved registers.
+ " add $64, %rsp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %rbp \n"
+ " ret \n");
+
+#else // !_WIN64
+
+// We maintain 16-byte alignment at calls. There is an 8-byte return address
+// on the stack and we push 56 bytes which maintains 16-byte stack alignment
+// at the call.
+// Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
+asm(
+#ifdef __APPLE__
+ ".globl _PushAllRegistersAndIterateStack \n"
+ ".private_extern _PushAllRegistersAndIterateStack \n"
+ "_PushAllRegistersAndIterateStack: \n"
+#else // !__APPLE__
+ ".globl PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+#endif // !__APPLE__
+ // rbp is callee-saved. Maintain proper frame pointer for debugging.
+ " push %rbp \n"
+ " mov %rsp, %rbp \n"
+ // Dummy for alignment.
+ " push $0xCDCDCD \n"
+ " push %rbx \n"
+ " push %r12 \n"
+ " push %r13 \n"
+ " push %r14 \n"
+ " push %r15 \n"
+ // Pass 1st parameter (rdi) unchanged (Stack*).
+ // Pass 2nd parameter (rsi) unchanged (StackVisitor*).
+ // Save 3rd parameter (rdx; IterateStackCallback)
+ " mov %rdx, %r8 \n"
+ // Pass 3rd parameter as rsp (stack pointer).
+ " mov %rsp, %rdx \n"
+ // Call the callback.
+ " call *%r8 \n"
+ // Pop the callee-saved registers.
+ " add $48, %rsp \n"
+ // Restore rbp as it was used as frame pointer.
+ " pop %rbp \n"
+ " ret \n");
+
+#endif // !_WIN64
diff --git a/deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S b/deps/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
index 627843830f..627843830f 100644
--- a/deps/v8/src/heap/cppgc/asm/x64/push_registers_win.S
+++ b/deps/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
diff --git a/deps/v8/src/heap/cppgc/free-list.cc b/deps/v8/src/heap/cppgc/free-list.cc
new file mode 100644
index 0000000000..e5e6b70793
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/free-list.cc
@@ -0,0 +1,190 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/free-list.h"
+
+#include <algorithm>
+
+#include "include/cppgc/internal/logging.h"
+#include "src/base/bits.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+uint32_t BucketIndexForSize(uint32_t size) {
+ return v8::base::bits::WhichPowerOfTwo(
+ v8::base::bits::RoundDownToPowerOfTwo32(size));
+}
+} // namespace
+
+class FreeList::Entry : public HeapObjectHeader {
+ public:
+ explicit Entry(size_t size) : HeapObjectHeader(size, kFreeListGCInfoIndex) {
+ static_assert(sizeof(Entry) == kFreeListEntrySize, "Sizes must match");
+ }
+
+ Entry* Next() const { return next_; }
+ void SetNext(Entry* next) { next_ = next; }
+
+ void Link(Entry** previous_next) {
+ next_ = *previous_next;
+ *previous_next = this;
+ }
+ void Unlink(Entry** previous_next) {
+ *previous_next = next_;
+ next_ = nullptr;
+ }
+
+ private:
+ Entry* next_ = nullptr;
+};
+
+FreeList::FreeList() { Clear(); }
+
+FreeList::FreeList(FreeList&& other) V8_NOEXCEPT
+ : free_list_heads_(std::move(other.free_list_heads_)),
+ free_list_tails_(std::move(other.free_list_tails_)),
+ biggest_free_list_index_(std::move(other.biggest_free_list_index_)) {
+ other.Clear();
+}
+
+FreeList& FreeList::operator=(FreeList&& other) V8_NOEXCEPT {
+ Clear();
+ Append(std::move(other));
+ DCHECK(other.IsEmpty());
+ return *this;
+}
+
+void FreeList::Add(FreeList::Block block) {
+ const size_t size = block.size;
+ DCHECK_GT(kPageSize, size);
+ DCHECK_LE(sizeof(HeapObjectHeader), size);
+
+ if (block.size < sizeof(Entry)) {
+ // Create wasted entry. This can happen when an almost emptied linear
+ // allocation buffer is returned to the freelist.
+ new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
+ return;
+ }
+
+ // Make sure the freelist header is writable.
+ SET_MEMORY_ACCESIBLE(block.address, sizeof(Entry));
+ Entry* entry = new (block.address) Entry(size);
+ const size_t index = BucketIndexForSize(static_cast<uint32_t>(size));
+ entry->Link(&free_list_heads_[index]);
+ biggest_free_list_index_ = std::max(biggest_free_list_index_, index);
+ if (!entry->Next()) {
+ free_list_tails_[index] = entry;
+ }
+}
+
+void FreeList::Append(FreeList&& other) {
+#if DEBUG
+ const size_t expected_size = Size() + other.Size();
+#endif
+ // Newly created entries get added to the head.
+ for (size_t index = 0; index < free_list_tails_.size(); ++index) {
+ Entry* other_tail = other.free_list_tails_[index];
+ Entry*& this_head = this->free_list_heads_[index];
+ if (other_tail) {
+ other_tail->SetNext(this_head);
+ if (!this_head) {
+ this->free_list_tails_[index] = other_tail;
+ }
+ this_head = other.free_list_heads_[index];
+ other.free_list_heads_[index] = nullptr;
+ other.free_list_tails_[index] = nullptr;
+ }
+ }
+
+ biggest_free_list_index_ =
+ std::max(biggest_free_list_index_, other.biggest_free_list_index_);
+ other.biggest_free_list_index_ = 0;
+#if DEBUG
+ DCHECK_EQ(expected_size, Size());
+#endif
+ DCHECK(other.IsEmpty());
+}
+
+FreeList::Block FreeList::Allocate(size_t allocation_size) {
+ // Try reusing a block from the largest bin. The underlying reasoning
+ // being that we want to amortize this slow allocation call by carving
+ // off as a large a free block as possible in one go; a block that will
+ // service this block and let following allocations be serviced quickly
+ // by bump allocation.
+ // bucket_size represents minimal size of entries in a bucket.
+ size_t bucket_size = static_cast<size_t>(1) << biggest_free_list_index_;
+ size_t index = biggest_free_list_index_;
+ for (; index > 0; --index, bucket_size >>= 1) {
+ DCHECK(IsConsistent(index));
+ Entry* entry = free_list_heads_[index];
+ if (allocation_size > bucket_size) {
+ // Final bucket candidate; check initial entry if it is able
+ // to service this allocation. Do not perform a linear scan,
+ // as it is considered too costly.
+ if (!entry || entry->GetSize() < allocation_size) break;
+ }
+ if (entry) {
+ if (!entry->Next()) {
+ DCHECK_EQ(entry, free_list_tails_[index]);
+ free_list_tails_[index] = nullptr;
+ }
+ entry->Unlink(&free_list_heads_[index]);
+ biggest_free_list_index_ = index;
+ return {entry, entry->GetSize()};
+ }
+ }
+ biggest_free_list_index_ = index;
+ return {nullptr, 0u};
+}
+
+void FreeList::Clear() {
+ std::fill(free_list_heads_.begin(), free_list_heads_.end(), nullptr);
+ std::fill(free_list_tails_.begin(), free_list_tails_.end(), nullptr);
+ biggest_free_list_index_ = 0;
+}
+
+size_t FreeList::Size() const {
+ size_t size = 0;
+ for (auto* entry : free_list_heads_) {
+ while (entry) {
+ size += entry->GetSize();
+ entry = entry->Next();
+ }
+ }
+ return size;
+}
+
+bool FreeList::IsEmpty() const {
+ return std::all_of(free_list_heads_.cbegin(), free_list_heads_.cend(),
+ [](const auto* entry) { return !entry; });
+}
+
+bool FreeList::Contains(Block block) const {
+ for (Entry* list : free_list_heads_) {
+ for (Entry* entry = list; entry; entry = entry->Next()) {
+ if (entry <= block.address &&
+ (reinterpret_cast<Address>(block.address) + block.size <=
+ reinterpret_cast<Address>(entry) + entry->GetSize()))
+ return true;
+ }
+ }
+ return false;
+}
+
+bool FreeList::IsConsistent(size_t index) const {
+ // Check that freelist head and tail pointers are consistent, i.e.
+ // - either both are nulls (no entries in the bucket);
+ // - or both are non-nulls and the tail points to the end.
+ return (!free_list_heads_[index] && !free_list_tails_[index]) ||
+ (free_list_heads_[index] && free_list_tails_[index] &&
+ !free_list_tails_[index]->Next());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/free-list.h b/deps/v8/src/heap/cppgc/free-list.h
new file mode 100644
index 0000000000..ba578f3820
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/free-list.h
@@ -0,0 +1,62 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_FREE_LIST_H_
+#define V8_HEAP_CPPGC_FREE_LIST_H_
+
+#include <array>
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE FreeList {
+ public:
+ struct Block {
+ void* address;
+ size_t size;
+ };
+
+ FreeList();
+
+ FreeList(const FreeList&) = delete;
+ FreeList& operator=(const FreeList&) = delete;
+
+ FreeList(FreeList&& freelist) V8_NOEXCEPT;
+ FreeList& operator=(FreeList&& freelist) V8_NOEXCEPT;
+
+ // Allocates entries which are at least of the provided size.
+ Block Allocate(size_t);
+
+ // Adds block to the freelist. The minimal block size is two words.
+ void Add(Block);
+
+ // Append other freelist into this.
+ void Append(FreeList&&);
+
+ void Clear();
+
+ size_t Size() const;
+ bool IsEmpty() const;
+
+ bool Contains(Block) const;
+
+ private:
+ class Entry;
+
+ bool IsConsistent(size_t) const;
+
+ // All |Entry|s in the nth list have size >= 2^n.
+ std::array<Entry*, kPageSizeLog2> free_list_heads_;
+ std::array<Entry*, kPageSizeLog2> free_list_tails_;
+ size_t biggest_free_list_index_ = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_FREE_LIST_H_
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.cc b/deps/v8/src/heap/cppgc/gc-info-table.cc
index 580ff4d069..dda5f0a7e8 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.cc
+++ b/deps/v8/src/heap/cppgc/gc-info-table.cc
@@ -8,7 +8,7 @@
#include <limits>
#include <memory>
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "include/cppgc/platform.h"
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
diff --git a/deps/v8/src/heap/cppgc/gc-info-table.h b/deps/v8/src/heap/cppgc/gc-info-table.h
index c5ccec2a38..25141f5d1c 100644
--- a/deps/v8/src/heap/cppgc/gc-info-table.h
+++ b/deps/v8/src/heap/cppgc/gc-info-table.h
@@ -7,7 +7,7 @@
#include <stdint.h>
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "include/cppgc/platform.h"
#include "include/v8config.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index 21492825cc..007eab3a33 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "include/v8config.h"
#include "src/heap/cppgc/gc-info-table.h"
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index 18a7e3189e..734abd508e 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -8,6 +8,8 @@
#include <stddef.h>
#include <stdint.h>
+#include "include/cppgc/internal/gc-info.h"
+
namespace cppgc {
namespace internal {
@@ -31,8 +33,15 @@ constexpr size_t kPageSize = 1 << kPageSizeLog2;
constexpr size_t kPageOffsetMask = kPageSize - 1;
constexpr size_t kPageBaseMask = ~kPageOffsetMask;
+// Guard pages are always put into memory. Whether they are actually protected
+// depends on the allocator provided to the garbage collector.
+constexpr size_t kGuardPageSize = 4096;
+
constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
+constexpr GCInfoIndex kFreeListGCInfoIndex = 0;
+constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-inl.h b/deps/v8/src/heap/cppgc/heap-inl.h
index 28a4a14139..4fe3186230 100644
--- a/deps/v8/src/heap/cppgc/heap-inl.h
+++ b/deps/v8/src/heap/cppgc/heap-inl.h
@@ -2,32 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/heap/cppgc/heap.h"
-
-#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap-object-header-inl.h"
-
#ifndef V8_HEAP_CPPGC_HEAP_INL_H_
#define V8_HEAP_CPPGC_HEAP_INL_H_
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/object-allocator-inl.h"
+
namespace cppgc {
namespace internal {
void* Heap::Allocate(size_t size, GCInfoIndex index) {
- // TODO(chromium:1056170): This is merely a dummy implementation and will be
- // replaced with proper allocation code throughout the migration.
- size_t allocation_size = size + sizeof(HeapObjectHeader);
- // The allocation size calculation can overflow for large sizes.
- CHECK_GT(allocation_size, size);
- // calloc() provides stricter alignment guarantees than the GC. Allocate
- // a multiple of kAllocationGranularity to follow restrictions of
- // HeapObjectHeader.
- allocation_size = (allocation_size + kAllocationMask) & ~kAllocationMask;
- void* memory = calloc(1, allocation_size);
- HeapObjectHeader* header =
- new (memory) HeapObjectHeader(allocation_size, index);
- objects_.push_back(header);
- return header->Payload();
+ DCHECK(is_allocation_allowed());
+ void* result = object_allocator_.AllocateObject(size, index);
+ objects_.push_back(&HeapObjectHeader::FromPayload(result));
+ return result;
+}
+
+void* Heap::Allocate(size_t size, GCInfoIndex index,
+ CustomSpaceIndex space_index) {
+ DCHECK(is_allocation_allowed());
+ void* result = object_allocator_.AllocateObject(size, index, space_index);
+ objects_.push_back(&HeapObjectHeader::FromPayload(result));
+ return result;
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-object-header-inl.h b/deps/v8/src/heap/cppgc/heap-object-header-inl.h
index a0bcda464b..cba7b24a4c 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header-inl.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header-inl.h
@@ -6,11 +6,12 @@
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_INL_H_
#include "include/cppgc/allocation.h"
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
@@ -33,7 +34,7 @@ HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
USE(padding_);
#endif // defined(V8_TARGET_ARCH_64_BIT)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
- DCHECK_EQ(0u, size & kAllocationMask);
+ DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
encoded_high_ = GCInfoIndexField::encode(gc_info_index);
encoded_low_ = EncodeSize(size);
@@ -111,6 +112,16 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
+template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsFree() const {
+ return GetGCInfoIndex() == kFreeListGCInfoIndex;
+}
+
+bool HeapObjectHeader::IsFinalizable() const {
+ const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
+ return gc_info.finalize;
+}
+
template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.cc b/deps/v8/src/heap/cppgc/heap-object-header.cc
index bd90d5930c..ccc660fcee 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.cc
+++ b/deps/v8/src/heap/cppgc/heap-object-header.cc
@@ -4,7 +4,7 @@
#include "src/heap/cppgc/heap-object-header.h"
-#include "include/cppgc/internals.h"
+#include "include/cppgc/internal/api-constants.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
@@ -12,6 +12,8 @@
namespace cppgc {
namespace internal {
+STATIC_ASSERT((kAllocationGranularity % sizeof(HeapObjectHeader)) == 0);
+
void HeapObjectHeader::CheckApiConstants() {
STATIC_ASSERT(api_constants::kFullyConstructedBitMask ==
FullyConstructedField::kMask);
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index 738f9d9ab9..b517617dd1 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -6,9 +6,10 @@
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
#include <stdint.h>
+
#include <atomic>
-#include "include/cppgc/gc-info.h"
+#include "include/cppgc/internal/gc-info.h"
#include "src/base/bit-field.h"
#include "src/heap/cppgc/globals.h"
@@ -41,12 +42,13 @@ namespace internal {
// stored in |LargeObjectPage::PayloadSize()|.
// - |mark bit| and |in construction| bits are located in separate 16-bit halves
// to allow potentially accessing them non-atomically.
-class HeapObjectHeader final {
+class HeapObjectHeader {
public:
enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
static constexpr size_t kSizeLog2 = 17;
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
+ static constexpr uint16_t kLargeObjectSizeInHeader = 0;
inline static HeapObjectHeader& FromPayload(void* address);
inline static const HeapObjectHeader& FromPayload(const void* address);
@@ -77,13 +79,15 @@ class HeapObjectHeader final {
void Unmark();
inline bool TryMarkAtomic();
+ template <AccessMode = AccessMode::kNonAtomic>
+ bool IsFree() const;
+
+ inline bool IsFinalizable() const;
void Finalize();
private:
enum class EncodedHalf : uint8_t { kLow, kHigh };
- static constexpr uint16_t kLargeObjectSizeInHeader = 0;
-
// Used in |encoded_high_|.
using FullyConstructedField = v8::base::BitField16<bool, 0, 1>;
using UnusedField1 = FullyConstructedField::Next<bool, 1>;
@@ -102,7 +106,7 @@ class HeapObjectHeader final {
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
using SizeField = UnusedField2::Next<size_t, 14>;
- return SizeField::encode(size) / kAllocationGranularity;
+ return SizeField::encode(size / kAllocationGranularity);
}
V8_EXPORT_PRIVATE void CheckApiConstants();
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
new file mode 100644
index 0000000000..e8afbafbd2
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -0,0 +1,201 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-page.h"
+
+#include <algorithm>
+
+#include "include/cppgc/internal/api-constants.h"
+#include "src/base/logging.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+Address AlignAddress(Address address, size_t alignment) {
+ return reinterpret_cast<Address>(
+ RoundUp(reinterpret_cast<uintptr_t>(address), alignment));
+}
+
+} // namespace
+
+STATIC_ASSERT(kPageSize == api_constants::kPageAlignment);
+
+// static
+BasePage* BasePage::FromPayload(void* payload) {
+ return reinterpret_cast<BasePage*>(
+ (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+}
+
+// static
+const BasePage* BasePage::FromPayload(const void* payload) {
+ return reinterpret_cast<const BasePage*>(
+ (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
+ kPageBaseMask) +
+ kGuardPageSize);
+}
+
+HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(void* address) {
+ return const_cast<HeapObjectHeader*>(
+ ObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
+}
+
+const HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(
+ const void* address) {
+ if (is_large()) {
+ return LargePage::From(this)->ObjectHeader();
+ }
+ ObjectStartBitmap& bitmap = NormalPage::From(this)->object_start_bitmap();
+ HeapObjectHeader* header =
+ bitmap.FindHeader(static_cast<ConstAddress>(address));
+ DCHECK_LT(address,
+ reinterpret_cast<ConstAddress>(header) +
+ header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ DCHECK_NE(kFreeListGCInfoIndex,
+ header->GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
+ return header;
+}
+
+BasePage::BasePage(Heap* heap, BaseSpace* space, PageType type)
+ : heap_(heap), space_(space), type_(type) {
+ DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
+ kPageOffsetMask);
+ DCHECK_EQ(reinterpret_cast<void*>(&heap_),
+ FromPayload(this) + api_constants::kHeapOffset);
+ DCHECK_EQ(&heap_->raw_heap(), space_->raw_heap());
+}
+
+// static
+NormalPage* NormalPage::Create(NormalPageSpace* space) {
+ DCHECK(space);
+ Heap* heap = space->raw_heap()->heap();
+ DCHECK(heap);
+ void* memory = heap->page_backend()->AllocateNormalPageMemory(space->index());
+ auto* normal_page = new (memory) NormalPage(heap, space);
+ space->AddPage(normal_page);
+ space->AddToFreeList(normal_page->PayloadStart(), normal_page->PayloadSize());
+ return normal_page;
+}
+
+// static
+void NormalPage::Destroy(NormalPage* page) {
+ DCHECK(page);
+ BaseSpace* space = page->space();
+ DCHECK_EQ(space->end(), std::find(space->begin(), space->end(), page));
+ page->~NormalPage();
+ PageBackend* backend = page->heap()->page_backend();
+ backend->FreeNormalPageMemory(space->index(),
+ reinterpret_cast<Address>(page));
+}
+
+NormalPage::NormalPage(Heap* heap, BaseSpace* space)
+ : BasePage(heap, space, PageType::kNormal),
+ object_start_bitmap_(PayloadStart()) {
+ DCHECK_LT(kLargeObjectSizeThreshold,
+ static_cast<size_t>(PayloadEnd() - PayloadStart()));
+}
+
+NormalPage::~NormalPage() = default;
+
+NormalPage::iterator NormalPage::begin() {
+ const auto& lab = NormalPageSpace::From(space())->linear_allocation_buffer();
+ return iterator(reinterpret_cast<HeapObjectHeader*>(PayloadStart()),
+ lab.start(), lab.size());
+}
+
+NormalPage::const_iterator NormalPage::begin() const {
+ const auto& lab = NormalPageSpace::From(space())->linear_allocation_buffer();
+ return const_iterator(
+ reinterpret_cast<const HeapObjectHeader*>(PayloadStart()), lab.start(),
+ lab.size());
+}
+
+Address NormalPage::PayloadStart() {
+ return AlignAddress((reinterpret_cast<Address>(this + 1)),
+ kAllocationGranularity);
+}
+
+ConstAddress NormalPage::PayloadStart() const {
+ return const_cast<NormalPage*>(this)->PayloadStart();
+}
+
+Address NormalPage::PayloadEnd() { return PayloadStart() + PayloadSize(); }
+
+ConstAddress NormalPage::PayloadEnd() const {
+ return const_cast<NormalPage*>(this)->PayloadEnd();
+}
+
+// static
+size_t NormalPage::PayloadSize() {
+ const size_t header_size =
+ RoundUp(sizeof(NormalPage), kAllocationGranularity);
+ return kPageSize - 2 * kGuardPageSize - header_size;
+}
+
+LargePage::LargePage(Heap* heap, BaseSpace* space, size_t size)
+ : BasePage(heap, space, PageType::kLarge), payload_size_(size) {}
+
+LargePage::~LargePage() = default;
+
+// static
+LargePage* LargePage::Create(LargePageSpace* space, size_t size) {
+ DCHECK(space);
+ DCHECK_LE(kLargeObjectSizeThreshold, size);
+ const size_t page_header_size =
+ RoundUp(sizeof(LargePage), kAllocationGranularity);
+ const size_t allocation_size = page_header_size + size;
+
+ Heap* heap = space->raw_heap()->heap();
+ void* memory = heap->page_backend()->AllocateLargePageMemory(allocation_size);
+ LargePage* page = new (memory) LargePage(heap, space, size);
+ space->AddPage(page);
+ return page;
+}
+
+// static
+void LargePage::Destroy(LargePage* page) {
+ DCHECK(page);
+#if DEBUG
+ BaseSpace* space = page->space();
+ DCHECK_EQ(space->end(), std::find(space->begin(), space->end(), page));
+#endif
+ page->~LargePage();
+ PageBackend* backend = page->heap()->page_backend();
+ backend->FreeLargePageMemory(reinterpret_cast<Address>(page));
+}
+
+HeapObjectHeader* LargePage::ObjectHeader() {
+ return reinterpret_cast<HeapObjectHeader*>(PayloadStart());
+}
+
+const HeapObjectHeader* LargePage::ObjectHeader() const {
+ return reinterpret_cast<const HeapObjectHeader*>(PayloadStart());
+}
+
+Address LargePage::PayloadStart() {
+ return AlignAddress((reinterpret_cast<Address>(this + 1)),
+ kAllocationGranularity);
+}
+
+ConstAddress LargePage::PayloadStart() const {
+ return const_cast<LargePage*>(this)->PayloadStart();
+}
+
+Address LargePage::PayloadEnd() { return PayloadStart() + PayloadSize(); }
+
+ConstAddress LargePage::PayloadEnd() const {
+ return const_cast<LargePage*>(this)->PayloadEnd();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
new file mode 100644
index 0000000000..c676bc4bde
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -0,0 +1,181 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_PAGE_H_
+#define V8_HEAP_CPPGC_HEAP_PAGE_H_
+
+#include "src/base/iterator.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+
+namespace cppgc {
+namespace internal {
+
+class BaseSpace;
+class NormalPageSpace;
+class LargePageSpace;
+class Heap;
+class PageBackend;
+
+class V8_EXPORT_PRIVATE BasePage {
+ public:
+ static BasePage* FromPayload(void*);
+ static const BasePage* FromPayload(const void*);
+
+ BasePage(const BasePage&) = delete;
+ BasePage& operator=(const BasePage&) = delete;
+
+ Heap* heap() { return heap_; }
+ const Heap* heap() const { return heap_; }
+
+ BaseSpace* space() { return space_; }
+ const BaseSpace* space() const { return space_; }
+ void set_space(BaseSpace* space) { space_ = space; }
+
+ bool is_large() const { return type_ == PageType::kLarge; }
+
+ // |address| must refer to real object.
+ HeapObjectHeader* ObjectHeaderFromInnerAddress(void* address);
+ const HeapObjectHeader* ObjectHeaderFromInnerAddress(const void* address);
+
+ protected:
+ enum class PageType { kNormal, kLarge };
+ BasePage(Heap*, BaseSpace*, PageType);
+
+ private:
+ Heap* heap_;
+ BaseSpace* space_;
+ PageType type_;
+};
+
+class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
+ template <typename T>
+ class IteratorImpl : v8::base::iterator<std::forward_iterator_tag, T> {
+ public:
+ explicit IteratorImpl(T* p, ConstAddress lab_start = nullptr,
+ size_t lab_size = 0)
+ : p_(p), lab_start_(lab_start), lab_size_(lab_size) {
+ DCHECK(p);
+ DCHECK_EQ(0, (lab_size & (sizeof(T) - 1)));
+ if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
+ p_ += (lab_size_ / sizeof(T));
+ }
+ }
+
+ T& operator*() { return *p_; }
+ const T& operator*() const { return *p_; }
+
+ bool operator==(IteratorImpl other) const { return p_ == other.p_; }
+ bool operator!=(IteratorImpl other) const { return !(*this == other); }
+
+ IteratorImpl& operator++() {
+ const size_t size = p_->GetSize();
+ DCHECK_EQ(0, (size & (sizeof(T) - 1)));
+ p_ += (size / sizeof(T));
+ if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
+ p_ += (lab_size_ / sizeof(T));
+ }
+ return *this;
+ }
+ IteratorImpl operator++(int) {
+ IteratorImpl temp(*this);
+ ++(*this);
+ return temp;
+ }
+
+ T* base() const { return p_; }
+
+ private:
+ T* p_;
+ ConstAddress lab_start_;
+ size_t lab_size_;
+ };
+
+ public:
+ using iterator = IteratorImpl<HeapObjectHeader>;
+ using const_iterator = IteratorImpl<const HeapObjectHeader>;
+
+ // Allocates a new page.
+ static NormalPage* Create(NormalPageSpace*);
+ // Destroys and frees the page. The page must be detached from the
+ // corresponding space (i.e. be swept when called).
+ static void Destroy(NormalPage*);
+
+ static NormalPage* From(BasePage* page) {
+ DCHECK(!page->is_large());
+ return static_cast<NormalPage*>(page);
+ }
+ static const NormalPage* From(const BasePage* page) {
+ return From(const_cast<BasePage*>(page));
+ }
+
+ iterator begin();
+ const_iterator begin() const;
+
+ iterator end() {
+ return iterator(reinterpret_cast<HeapObjectHeader*>(PayloadEnd()));
+ }
+ const_iterator end() const {
+ return const_iterator(
+ reinterpret_cast<const HeapObjectHeader*>(PayloadEnd()));
+ }
+
+ Address PayloadStart();
+ ConstAddress PayloadStart() const;
+ Address PayloadEnd();
+ ConstAddress PayloadEnd() const;
+
+ static size_t PayloadSize();
+
+ ObjectStartBitmap& object_start_bitmap() { return object_start_bitmap_; }
+ const ObjectStartBitmap& object_start_bitmap() const {
+ return object_start_bitmap_;
+ }
+
+ private:
+ NormalPage(Heap* heap, BaseSpace* space);
+ ~NormalPage();
+
+ ObjectStartBitmap object_start_bitmap_;
+};
+
+class V8_EXPORT_PRIVATE LargePage final : public BasePage {
+ public:
+ // Allocates a new page.
+ static LargePage* Create(LargePageSpace*, size_t);
+ // Destroys and frees the page. The page must be detached from the
+ // corresponding space (i.e. be swept when called).
+ static void Destroy(LargePage*);
+
+ static LargePage* From(BasePage* page) {
+ DCHECK(page->is_large());
+ return static_cast<LargePage*>(page);
+ }
+ static const LargePage* From(const BasePage* page) {
+ return From(const_cast<BasePage*>(page));
+ }
+
+ HeapObjectHeader* ObjectHeader();
+ const HeapObjectHeader* ObjectHeader() const;
+
+ Address PayloadStart();
+ ConstAddress PayloadStart() const;
+ Address PayloadEnd();
+ ConstAddress PayloadEnd() const;
+
+ size_t PayloadSize() const { return payload_size_; }
+
+ private:
+ LargePage(Heap* heap, BaseSpace* space, size_t);
+ ~LargePage();
+
+ size_t payload_size_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_PAGE_H_
diff --git a/deps/v8/src/heap/cppgc/heap-space.cc b/deps/v8/src/heap/cppgc/heap-space.cc
new file mode 100644
index 0000000000..70ddb93531
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-space.cc
@@ -0,0 +1,58 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-space.h"
+
+#include <algorithm>
+
+#include "src/base/logging.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+
+namespace cppgc {
+namespace internal {
+
+BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
+ : heap_(heap), index_(index), type_(type) {}
+
+void BaseSpace::AddPage(BasePage* page) {
+ DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
+ pages_.push_back(page);
+}
+
+void BaseSpace::RemovePage(BasePage* page) {
+ auto it = std::find(pages_.cbegin(), pages_.cend(), page);
+ DCHECK_NE(pages_.cend(), it);
+ pages_.erase(it);
+}
+
+BaseSpace::Pages BaseSpace::RemoveAllPages() {
+ Pages pages = std::move(pages_);
+ pages_.clear();
+ return pages;
+}
+
+NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index)
+ : BaseSpace(heap, index, PageType::kNormal) {}
+
+void NormalPageSpace::AddToFreeList(void* address, size_t size) {
+ free_list_.Add({address, size});
+ NormalPage::From(BasePage::FromPayload(address))
+ ->object_start_bitmap()
+ .SetBit(static_cast<Address>(address));
+}
+
+void NormalPageSpace::ResetLinearAllocationBuffer() {
+ if (current_lab_.size()) {
+ DCHECK_NOT_NULL(current_lab_.start());
+ AddToFreeList(current_lab_.start(), current_lab_.size());
+ current_lab_.Set(nullptr, 0);
+ }
+}
+
+LargePageSpace::LargePageSpace(RawHeap* heap, size_t index)
+ : BaseSpace(heap, index, PageType::kLarge) {}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-space.h b/deps/v8/src/heap/cppgc/heap-space.h
new file mode 100644
index 0000000000..d84207c2cd
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-space.h
@@ -0,0 +1,127 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_SPACE_H_
+#define V8_HEAP_CPPGC_HEAP_SPACE_H_
+
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/free-list.h"
+
+namespace cppgc {
+namespace internal {
+
+class RawHeap;
+class BasePage;
+
+// BaseSpace is responsible for page management.
+class V8_EXPORT_PRIVATE BaseSpace {
+ public:
+ using Pages = std::vector<BasePage*>;
+
+ using iterator = Pages::iterator;
+ using const_iterator = Pages::const_iterator;
+
+ BaseSpace(const BaseSpace&) = delete;
+ BaseSpace& operator=(const BaseSpace&) = delete;
+
+ iterator begin() { return pages_.begin(); }
+ const_iterator begin() const { return pages_.begin(); }
+ iterator end() { return pages_.end(); }
+ const_iterator end() const { return pages_.end(); }
+
+ size_t size() const { return pages_.size(); }
+
+ bool is_large() const { return type_ == PageType::kLarge; }
+ size_t index() const { return index_; }
+
+ RawHeap* raw_heap() { return heap_; }
+ const RawHeap* raw_heap() const { return heap_; }
+
+ // Page manipulation functions.
+ void AddPage(BasePage*);
+ void RemovePage(BasePage*);
+ Pages RemoveAllPages();
+
+ protected:
+ enum class PageType { kNormal, kLarge };
+ explicit BaseSpace(RawHeap* heap, size_t index, PageType type);
+
+ private:
+ RawHeap* heap_;
+ Pages pages_;
+ const size_t index_;
+ const PageType type_;
+};
+
+class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
+ public:
+ class LinearAllocationBuffer {
+ public:
+ Address Allocate(size_t alloc_size) {
+ DCHECK_GE(size_, alloc_size);
+ Address result = start_;
+ start_ += alloc_size;
+ size_ -= alloc_size;
+ return result;
+ }
+
+ void Set(Address ptr, size_t size) {
+ start_ = ptr;
+ size_ = size;
+ }
+
+ Address start() const { return start_; }
+ size_t size() const { return size_; }
+
+ private:
+ Address start_ = nullptr;
+ size_t size_ = 0;
+ };
+
+ static NormalPageSpace* From(BaseSpace* space) {
+ DCHECK(!space->is_large());
+ return static_cast<NormalPageSpace*>(space);
+ }
+ static const NormalPageSpace* From(const BaseSpace* space) {
+ return From(const_cast<BaseSpace*>(space));
+ }
+
+ NormalPageSpace(RawHeap* heap, size_t index);
+
+ void AddToFreeList(void*, size_t);
+ void ResetLinearAllocationBuffer();
+
+ LinearAllocationBuffer& linear_allocation_buffer() { return current_lab_; }
+ const LinearAllocationBuffer& linear_allocation_buffer() const {
+ return current_lab_;
+ }
+
+ FreeList& free_list() { return free_list_; }
+ const FreeList& free_list() const { return free_list_; }
+
+ private:
+ LinearAllocationBuffer current_lab_;
+ FreeList free_list_;
+};
+
+class V8_EXPORT_PRIVATE LargePageSpace final : public BaseSpace {
+ public:
+ static LargePageSpace* From(BaseSpace* space) {
+ DCHECK(space->is_large());
+ return static_cast<LargePageSpace*>(space);
+ }
+ static const LargePageSpace* From(const BaseSpace* space) {
+ return From(const_cast<BaseSpace*>(space));
+ }
+
+ LargePageSpace(RawHeap* heap, size_t index);
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_SPACE_H_
diff --git a/deps/v8/src/heap/cppgc/heap-visitor.h b/deps/v8/src/heap/cppgc/heap-visitor.h
new file mode 100644
index 0000000000..7fcbc1b980
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-visitor.h
@@ -0,0 +1,88 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_VISITOR_H_
+#define V8_HEAP_CPPGC_HEAP_VISITOR_H_
+
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+// Visitor for heap, which also implements the accept (traverse) interface.
+// Implements preorder traversal of the heap. The order of traversal is defined.
+// Implemented as a CRTP visitor to avoid virtual calls and support better
+// inlining.
+template <typename Derived>
+class HeapVisitor {
+ public:
+ void Traverse(RawHeap* heap) {
+ if (VisitHeapImpl(heap)) return;
+ for (auto& space : *heap) {
+ Traverse(space.get());
+ }
+ }
+
+ void Traverse(BaseSpace* space) {
+ const bool is_stopped =
+ space->is_large()
+ ? VisitLargePageSpaceImpl(LargePageSpace::From(space))
+ : VisitNormalPageSpaceImpl(NormalPageSpace::From(space));
+ if (is_stopped) return;
+ for (auto* page : *space) {
+ Traverse(page);
+ }
+ }
+
+ void Traverse(BasePage* page) {
+ if (page->is_large()) {
+ auto* large_page = LargePage::From(page);
+ if (VisitLargePageImpl(large_page)) return;
+ VisitHeapObjectHeaderImpl(large_page->ObjectHeader());
+ } else {
+ auto* normal_page = NormalPage::From(page);
+ if (VisitNormalPageImpl(normal_page)) return;
+ for (auto& header : *normal_page) {
+ VisitHeapObjectHeaderImpl(&header);
+ }
+ }
+ }
+
+ protected:
+ // Visitor functions return true if no deeper processing is required.
+ // Users are supposed to override functions that need special treatment.
+ bool VisitHeap(RawHeap*) { return false; }
+ bool VisitNormalPageSpace(NormalPageSpace*) { return false; }
+ bool VisitLargePageSpace(LargePageSpace*) { return false; }
+ bool VisitNormalPage(NormalPage*) { return false; }
+ bool VisitLargePage(LargePage*) { return false; }
+ bool VisitHeapObjectHeader(HeapObjectHeader*) { return false; }
+
+ private:
+ Derived& ToDerived() { return static_cast<Derived&>(*this); }
+
+ bool VisitHeapImpl(RawHeap* heap) { return ToDerived().VisitHeap(heap); }
+ bool VisitNormalPageSpaceImpl(NormalPageSpace* space) {
+ return ToDerived().VisitNormalPageSpace(space);
+ }
+ bool VisitLargePageSpaceImpl(LargePageSpace* space) {
+ return ToDerived().VisitLargePageSpace(space);
+ }
+ bool VisitNormalPageImpl(NormalPage* page) {
+ return ToDerived().VisitNormalPage(page);
+ }
+ bool VisitLargePageImpl(LargePage* page) {
+ return ToDerived().VisitLargePage(page);
+ }
+ bool VisitHeapObjectHeaderImpl(HeapObjectHeader* hoh) {
+ return ToDerived().VisitHeapObjectHeader(hoh);
+ }
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_VISITOR_H_
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index e60cb15573..ee400cee28 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -6,23 +6,131 @@
#include <memory>
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/stack.h"
+#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
-std::unique_ptr<Heap> Heap::Create() {
- return std::make_unique<internal::Heap>();
+namespace {
+
+void VerifyCustomSpaces(
+ const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces) {
+ // Ensures that user-provided custom spaces have indices that form a sequence
+ // starting at 0.
+#ifdef DEBUG
+ for (size_t i = 0; i < custom_spaces.size(); ++i) {
+ DCHECK_EQ(i, custom_spaces[i]->GetCustomSpaceIndex().value);
+ }
+#endif // DEBUG
+}
+
+} // namespace
+
+std::unique_ptr<Heap> Heap::Create(cppgc::Heap::HeapOptions options) {
+ VerifyCustomSpaces(options.custom_spaces);
+ return std::make_unique<internal::Heap>(options.custom_spaces.size());
+}
+
+void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
+ Heap::StackState stack_state) {
+ internal::Heap::From(this)->CollectGarbage({stack_state});
}
namespace internal {
-void Heap::CollectGarbage() {
- for (HeapObjectHeader* header : objects_) {
- header->Finalize();
- free(header);
+namespace {
+
+class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
+ friend class HeapVisitor<ObjectSizeCounter>;
+
+ public:
+ size_t GetSize(RawHeap* heap) {
+ Traverse(heap);
+ return accumulated_size_;
+ }
+
+ private:
+ static size_t ObjectSize(const HeapObjectHeader* header) {
+ const size_t size =
+ header->IsLargeObject()
+ ? static_cast<const LargePage*>(BasePage::FromPayload(header))
+ ->PayloadSize()
+ : header->GetSize();
+ DCHECK_GE(size, sizeof(HeapObjectHeader));
+ return size - sizeof(HeapObjectHeader);
+ }
+
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree()) return true;
+ accumulated_size_ += ObjectSize(header);
+ return true;
}
- objects_.clear();
+
+ size_t accumulated_size_ = 0;
+};
+
+} // namespace
+
+// static
+cppgc::LivenessBroker LivenessBrokerFactory::Create() {
+ return cppgc::LivenessBroker();
+}
+
+Heap::Heap(size_t custom_spaces)
+ : raw_heap_(this, custom_spaces),
+ page_backend_(std::make_unique<PageBackend>(&system_allocator_)),
+ object_allocator_(&raw_heap_),
+ sweeper_(&raw_heap_),
+ stack_(std::make_unique<Stack>(v8::base::Stack::GetStackStart())),
+ prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()) {}
+
+Heap::~Heap() {
+ NoGCScope no_gc(this);
+ // Finish already running GC if any, but don't finalize live objects.
+ sweeper_.Finish();
+}
+
+void Heap::CollectGarbage(GCConfig config) {
+ if (in_no_gc_scope()) return;
+
+ epoch_++;
+
+ // TODO(chromium:1056170): Replace with proper mark-sweep algorithm.
+ // "Marking".
+ marker_ = std::make_unique<Marker>(this);
+ marker_->StartMarking(Marker::MarkingConfig(config.stack_state));
+ marker_->FinishMarking();
+ // "Sweeping and finalization".
+ {
+ // Pre finalizers are forbidden from allocating objects
+ NoAllocationScope no_allocation_scope_(this);
+ marker_->ProcessWeakness();
+ prefinalizer_handler_->InvokePreFinalizers();
+ }
+ marker_.reset();
+ {
+ NoGCScope no_gc(this);
+ sweeper_.Start(Sweeper::Config::kAtomic);
+ }
+}
+
+size_t Heap::ObjectPayloadSize() const {
+ return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
+}
+
+Heap::NoGCScope::NoGCScope(Heap* heap) : heap_(heap) { heap_->no_gc_scope_++; }
+
+Heap::NoGCScope::~NoGCScope() { heap_->no_gc_scope_--; }
+
+Heap::NoAllocationScope::NoAllocationScope(Heap* heap) : heap_(heap) {
+ heap_->no_allocation_scope_++;
}
+Heap::NoAllocationScope::~NoAllocationScope() { heap_->no_allocation_scope_--; }
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index baf70d8f4e..fa19b74be5 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -5,28 +5,143 @@
#ifndef V8_HEAP_CPPGC_HEAP_H_
#define V8_HEAP_CPPGC_HEAP_H_
+#include <memory>
#include <vector>
-#include "include/cppgc/gc-info.h"
#include "include/cppgc/heap.h"
+#include "include/cppgc/internal/gc-info.h"
+#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/liveness-broker.h"
+#include "src/base/page-allocator.h"
#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
namespace internal {
+class Stack;
+
+class V8_EXPORT_PRIVATE LivenessBrokerFactory {
+ public:
+ static LivenessBroker Create();
+};
+
class V8_EXPORT_PRIVATE Heap final : public cppgc::Heap {
public:
+ // NoGCScope allows going over limits and avoids triggering garbage
+ // collection triggered through allocations or even explicitly.
+ class V8_EXPORT_PRIVATE NoGCScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoGCScope(Heap* heap);
+ ~NoGCScope();
+
+ NoGCScope(const NoGCScope&) = delete;
+ NoGCScope& operator=(const NoGCScope&) = delete;
+
+ private:
+ Heap* const heap_;
+ };
+
+ // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
+ // allocations during GC.
+ class V8_EXPORT_PRIVATE NoAllocationScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoAllocationScope(Heap* heap);
+ ~NoAllocationScope();
+
+ NoAllocationScope(const NoAllocationScope&) = delete;
+ NoAllocationScope& operator=(const NoAllocationScope&) = delete;
+
+ private:
+ Heap* const heap_;
+ };
+
+ struct GCConfig {
+ using StackState = Heap::StackState;
+
+ static GCConfig Default() { return {StackState::kMayContainHeapPointers}; }
+
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ };
+
static Heap* From(cppgc::Heap* heap) { return static_cast<Heap*>(heap); }
- Heap() = default;
- ~Heap() final = default;
+ explicit Heap(size_t custom_spaces);
+ ~Heap() final;
inline void* Allocate(size_t size, GCInfoIndex index);
+ inline void* Allocate(size_t size, GCInfoIndex index,
+ CustomSpaceIndex space_index);
- void CollectGarbage();
+ void CollectGarbage(GCConfig config = GCConfig::Default());
+
+ PreFinalizerHandler* prefinalizer_handler() {
+ return prefinalizer_handler_.get();
+ }
+
+ PersistentRegion& GetStrongPersistentRegion() {
+ return strong_persistent_region_;
+ }
+ const PersistentRegion& GetStrongPersistentRegion() const {
+ return strong_persistent_region_;
+ }
+ PersistentRegion& GetWeakPersistentRegion() {
+ return weak_persistent_region_;
+ }
+ const PersistentRegion& GetWeakPersistentRegion() const {
+ return weak_persistent_region_;
+ }
+
+ RawHeap& raw_heap() { return raw_heap_; }
+ const RawHeap& raw_heap() const { return raw_heap_; }
+
+ Stack* stack() { return stack_.get(); }
+
+ PageBackend* page_backend() { return page_backend_.get(); }
+ const PageBackend* page_backend() const { return page_backend_.get(); }
+
+ Sweeper& sweeper() { return sweeper_; }
+
+ size_t epoch() const { return epoch_; }
+
+ size_t ObjectPayloadSize() const;
+
+ // Temporary getter until proper visitation of on-stack objects is
+ // implemented.
+ std::vector<HeapObjectHeader*>& objects() { return objects_; }
private:
+ bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
+ bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
+
+ RawHeap raw_heap_;
+
+ v8::base::PageAllocator system_allocator_;
+ std::unique_ptr<PageBackend> page_backend_;
+ ObjectAllocator object_allocator_;
+ Sweeper sweeper_;
+
+ std::unique_ptr<Stack> stack_;
+ std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
+ std::unique_ptr<Marker> marker_;
std::vector<HeapObjectHeader*> objects_;
+
+ PersistentRegion strong_persistent_region_;
+ PersistentRegion weak_persistent_region_;
+
+ size_t epoch_ = 0;
+
+ size_t no_gc_scope_ = 0;
+ size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/liveness-broker.cc b/deps/v8/src/heap/cppgc/liveness-broker.cc
new file mode 100644
index 0000000000..bb912eb329
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/liveness-broker.cc
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/liveness-broker.h"
+
+#include "src/heap/cppgc/heap-object-header-inl.h"
+
+namespace cppgc {
+
+bool LivenessBroker::IsHeapObjectAliveImpl(const void* payload) const {
+ return internal::HeapObjectHeader::FromPayload(payload).IsMarked();
+}
+
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/logging.cc b/deps/v8/src/heap/cppgc/logging.cc
new file mode 100644
index 0000000000..e98ca28dfb
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/logging.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/logging.h"
+#include "include/cppgc/source-location.h"
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+void DCheckImpl(const char* message, const SourceLocation& loc) {
+ V8_Dcheck(loc.FileName(), static_cast<int>(loc.Line()), message);
+}
+
+void FatalImpl(const char* message, const SourceLocation& loc) {
+#if DEBUG
+ V8_Fatal(loc.FileName(), static_cast<int>(loc.Line()), "Check failed: %s.",
+ message);
+#elif !defined(OFFICIAL_BUILD)
+ V8_Fatal("Check failed: %s.", message);
+#else
+ V8_Fatal("ignored");
+#endif
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
new file mode 100644
index 0000000000..5a30c89f0d
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -0,0 +1,152 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/marker.h"
+
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marking-visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+template <typename Worklist, typename Callback>
+bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
+ Callback callback, int task_id) {
+ const size_t kDeadlineCheckInterval = 1250;
+
+ size_t processed_callback_count = 0;
+ typename Worklist::View view(worklist, task_id);
+ typename Worklist::EntryType item;
+ while (view.Pop(&item)) {
+ callback(item);
+ if (++processed_callback_count == kDeadlineCheckInterval) {
+ if (deadline <= v8::base::TimeTicks::Now()) {
+ return false;
+ }
+ processed_callback_count = 0;
+ }
+ }
+ return true;
+}
+} // namespace
+
+constexpr int Marker::kMutatorThreadId;
+
+Marker::Marker(Heap* heap)
+ : heap_(heap), marking_visitor_(CreateMutatorThreadMarkingVisitor()) {}
+
+Marker::~Marker() {
+ // The fixed point iteration may have found not-fully-constructed objects.
+ // Such objects should have already been found through the stack scan though
+ // and should thus already be marked.
+ if (!not_fully_constructed_worklist_.IsEmpty()) {
+#if DEBUG
+ DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state_);
+ NotFullyConstructedItem item;
+ NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
+ kMutatorThreadId);
+ while (view.Pop(&item)) {
+ // TODO(chromium:1056170): uncomment following check after implementing
+ // FromInnerAddress.
+ //
+ // HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
+ // reinterpret_cast<Address>(const_cast<void*>(item)));
+ // DCHECK(header->IsMarked())
+ }
+#else
+ not_fully_constructed_worklist_.Clear();
+#endif
+ }
+}
+
+void Marker::StartMarking(MarkingConfig config) {
+ config_ = config;
+ VisitRoots();
+}
+
+void Marker::FinishMarking() {
+ if (config_.stack_state_ == MarkingConfig::StackState::kNoHeapPointers) {
+ FlushNotFullyConstructedObjects();
+ }
+ AdvanceMarkingWithDeadline(v8::base::TimeDelta::Max());
+}
+
+void Marker::ProcessWeakness() {
+ heap_->GetWeakPersistentRegion().Trace(marking_visitor_.get());
+
+ // Call weak callbacks on objects that may now be pointing to dead objects.
+ WeakCallbackItem item;
+ LivenessBroker broker = LivenessBrokerFactory::Create();
+ WeakCallbackWorklist::View view(&weak_callback_worklist_, kMutatorThreadId);
+ while (view.Pop(&item)) {
+ item.callback(broker, item.parameter);
+ }
+ // Weak callbacks should not add any new objects for marking.
+ DCHECK(marking_worklist_.IsEmpty());
+}
+
+void Marker::VisitRoots() {
+ heap_->GetStrongPersistentRegion().Trace(marking_visitor_.get());
+ if (config_.stack_state_ != MarkingConfig::StackState::kNoHeapPointers)
+ heap_->stack()->IteratePointers(marking_visitor_.get());
+}
+
+std::unique_ptr<MutatorThreadMarkingVisitor>
+Marker::CreateMutatorThreadMarkingVisitor() {
+ return std::make_unique<MutatorThreadMarkingVisitor>(this);
+}
+
+bool Marker::AdvanceMarkingWithDeadline(v8::base::TimeDelta duration) {
+ MutatorThreadMarkingVisitor* visitor = marking_visitor_.get();
+ v8::base::TimeTicks deadline = v8::base::TimeTicks::Now() + duration;
+
+ do {
+ // Convert |previously_not_fully_constructed_worklist_| to
+ // |marking_worklist_|. This merely re-adds items with the proper
+ // callbacks.
+ if (!DrainWorklistWithDeadline(
+ deadline, &previously_not_fully_constructed_worklist_,
+ [visitor](NotFullyConstructedItem& item) {
+ visitor->DynamicallyMarkAddress(
+ reinterpret_cast<ConstAddress>(item));
+ },
+ kMutatorThreadId))
+ return false;
+
+ if (!DrainWorklistWithDeadline(
+ deadline, &marking_worklist_,
+ [visitor](const MarkingItem& item) {
+ const HeapObjectHeader& header =
+ HeapObjectHeader::FromPayload(item.base_object_payload);
+ DCHECK(!MutatorThreadMarkingVisitor::IsInConstruction(header));
+ item.callback(visitor, item.base_object_payload);
+ visitor->AccountMarkedBytes(header);
+ },
+ kMutatorThreadId))
+ return false;
+ } while (!marking_worklist_.IsLocalViewEmpty(kMutatorThreadId));
+
+ return true;
+}
+
+void Marker::FlushNotFullyConstructedObjects() {
+ if (!not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId)) {
+ not_fully_constructed_worklist_.FlushToGlobal(kMutatorThreadId);
+ previously_not_fully_constructed_worklist_.MergeGlobalPool(
+ &not_fully_constructed_worklist_);
+ }
+ DCHECK(not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId));
+}
+
+void Marker::ClearAllWorklistsForTesting() {
+ marking_worklist_.Clear();
+ not_fully_constructed_worklist_.Clear();
+ previously_not_fully_constructed_worklist_.Clear();
+ weak_callback_worklist_.Clear();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
new file mode 100644
index 0000000000..c18c23df2c
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -0,0 +1,121 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_MARKER_H_
+#define V8_HEAP_CPPGC_MARKER_H_
+
+#include <memory>
+
+#include "include/cppgc/heap.h"
+#include "include/cppgc/trace-trait.h"
+#include "include/cppgc/visitor.h"
+#include "src/base/platform/time.h"
+#include "src/heap/cppgc/worklist.h"
+
+namespace cppgc {
+namespace internal {
+
+class Heap;
+class MutatorThreadMarkingVisitor;
+
+class V8_EXPORT_PRIVATE Marker {
+ static constexpr int kNumConcurrentMarkers = 0;
+ static constexpr int kNumMarkers = 1 + kNumConcurrentMarkers;
+
+ public:
+ static constexpr int kMutatorThreadId = 0;
+
+ using MarkingItem = cppgc::TraceDescriptor;
+ using NotFullyConstructedItem = const void*;
+ struct WeakCallbackItem {
+ cppgc::WeakCallback callback;
+ const void* parameter;
+ };
+
+ // Segment size of 512 entries necessary to avoid throughput regressions.
+ // Since the work list is currently a temporary object this is not a problem.
+ using MarkingWorklist =
+ Worklist<MarkingItem, 512 /* local entries */, kNumMarkers>;
+ using NotFullyConstructedWorklist =
+ Worklist<NotFullyConstructedItem, 16 /* local entries */, kNumMarkers>;
+ using WeakCallbackWorklist =
+ Worklist<WeakCallbackItem, 64 /* local entries */, kNumMarkers>;
+
+ struct MarkingConfig {
+ using StackState = cppgc::Heap::StackState;
+ enum class IncrementalMarking : uint8_t { kDisabled };
+ enum class ConcurrentMarking : uint8_t { kDisabled };
+
+ static MarkingConfig Default() {
+ return {StackState::kMayContainHeapPointers,
+ IncrementalMarking::kDisabled, ConcurrentMarking::kDisabled};
+ }
+
+ explicit MarkingConfig(StackState stack_state)
+ : MarkingConfig(stack_state, IncrementalMarking::kDisabled,
+ ConcurrentMarking::kDisabled) {}
+
+ MarkingConfig(StackState stack_state,
+ IncrementalMarking incremental_marking_state,
+ ConcurrentMarking concurrent_marking_state)
+ : stack_state_(stack_state),
+ incremental_marking_state_(incremental_marking_state),
+ concurrent_marking_state_(concurrent_marking_state) {}
+
+ StackState stack_state_;
+ IncrementalMarking incremental_marking_state_;
+ ConcurrentMarking concurrent_marking_state_;
+ };
+
+ explicit Marker(Heap* heap);
+ virtual ~Marker();
+
+ Marker(const Marker&) = delete;
+ Marker& operator=(const Marker&) = delete;
+
+ // Initialize marking according to the given config. This method will
+ // trigger incremental/concurrent marking if needed.
+ void StartMarking(MarkingConfig config);
+ // Finalize marking. This method stops incremental/concurrent marking
+ // if exsists and performs atomic pause marking.
+ void FinishMarking();
+
+ void ProcessWeakness();
+
+ Heap* heap() { return heap_; }
+ MarkingWorklist* marking_worklist() { return &marking_worklist_; }
+ NotFullyConstructedWorklist* not_fully_constructed_worklist() {
+ return &not_fully_constructed_worklist_;
+ }
+ WeakCallbackWorklist* weak_callback_worklist() {
+ return &weak_callback_worklist_;
+ }
+
+ void ClearAllWorklistsForTesting();
+
+ protected:
+ virtual std::unique_ptr<MutatorThreadMarkingVisitor>
+ CreateMutatorThreadMarkingVisitor();
+
+ private:
+ void VisitRoots();
+
+ bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
+ void FlushNotFullyConstructedObjects();
+
+ Heap* const heap_;
+ MarkingConfig config_ = MarkingConfig::Default();
+
+ std::unique_ptr<MutatorThreadMarkingVisitor> marking_visitor_;
+
+ MarkingWorklist marking_worklist_;
+ NotFullyConstructedWorklist not_fully_constructed_worklist_;
+ NotFullyConstructedWorklist previously_not_fully_constructed_worklist_;
+ WeakCallbackWorklist weak_callback_worklist_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_MARKER_H_
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.cc b/deps/v8/src/heap/cppgc/marking-visitor.cc
new file mode 100644
index 0000000000..9647f9b3ca
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marking-visitor.cc
@@ -0,0 +1,143 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/marking-visitor.h"
+
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/internal/accessors.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+bool MarkingVisitor::IsInConstruction(const HeapObjectHeader& header) {
+ return header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>();
+}
+
+MarkingVisitor::MarkingVisitor(Marker* marking_handler, int task_id)
+ : marker_(marking_handler),
+ marking_worklist_(marking_handler->marking_worklist(), task_id),
+ not_fully_constructed_worklist_(
+ marking_handler->not_fully_constructed_worklist(), task_id),
+ weak_callback_worklist_(marking_handler->weak_callback_worklist(),
+ task_id) {}
+
+void MarkingVisitor::AccountMarkedBytes(const HeapObjectHeader& header) {
+ marked_bytes_ +=
+ header.IsLargeObject()
+ ? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
+ ->PayloadSize()
+ : header.GetSize();
+}
+
+void MarkingVisitor::Visit(const void* object, TraceDescriptor desc) {
+ DCHECK_NOT_NULL(object);
+ if (desc.base_object_payload ==
+ cppgc::GarbageCollectedMixin::kNotFullyConstructedObject) {
+ // This means that the objects are not-yet-fully-constructed. See comments
+ // on GarbageCollectedMixin for how those objects are handled.
+ not_fully_constructed_worklist_.Push(object);
+ return;
+ }
+ MarkHeader(&HeapObjectHeader::FromPayload(
+ const_cast<void*>(desc.base_object_payload)),
+ desc);
+}
+
+void MarkingVisitor::VisitWeak(const void* object, TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_member) {
+ // Filter out already marked values. The write barrier for WeakMember
+ // ensures that any newly set value after this point is kept alive and does
+ // not require the callback.
+ if (desc.base_object_payload !=
+ cppgc::GarbageCollectedMixin::kNotFullyConstructedObject &&
+ HeapObjectHeader::FromPayload(desc.base_object_payload)
+ .IsMarked<HeapObjectHeader::AccessMode::kAtomic>())
+ return;
+ RegisterWeakCallback(weak_callback, weak_member);
+}
+
+void MarkingVisitor::VisitRoot(const void* object, TraceDescriptor desc) {
+ Visit(object, desc);
+}
+
+void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
+ WeakCallback weak_callback,
+ const void* weak_root) {
+ if (desc.base_object_payload ==
+ cppgc::GarbageCollectedMixin::kNotFullyConstructedObject) {
+ // This method is only called at the end of marking. If the object is in
+ // construction, then it should be reachable from the stack.
+ return;
+ }
+ // Since weak roots arev only traced at the end of marking, we can execute
+ // the callback instead of registering it.
+ weak_callback(LivenessBrokerFactory::Create(), weak_root);
+}
+
+void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
+ TraceDescriptor desc) {
+ DCHECK(header);
+ DCHECK_NOT_NULL(desc.callback);
+
+ if (IsInConstruction(*header)) {
+ not_fully_constructed_worklist_.Push(header->Payload());
+ } else if (MarkHeaderNoTracing(header)) {
+ marking_worklist_.Push(desc);
+ }
+}
+
+bool MarkingVisitor::MarkHeaderNoTracing(HeapObjectHeader* header) {
+ DCHECK(header);
+ // A GC should only mark the objects that belong in its heap.
+ DCHECK_EQ(marker_->heap(), BasePage::FromPayload(header)->heap());
+ // Never mark free space objects. This would e.g. hint to marking a promptly
+ // freed backing store.
+ DCHECK(!header->IsFree());
+
+ return header->TryMarkAtomic();
+}
+
+void MarkingVisitor::RegisterWeakCallback(WeakCallback callback,
+ const void* object) {
+ weak_callback_worklist_.Push({callback, object});
+}
+
+void MarkingVisitor::FlushWorklists() {
+ marking_worklist_.FlushToGlobal();
+ not_fully_constructed_worklist_.FlushToGlobal();
+ weak_callback_worklist_.FlushToGlobal();
+}
+
+void MarkingVisitor::DynamicallyMarkAddress(ConstAddress address) {
+ for (auto* header : marker_->heap()->objects()) {
+ if (address >= header->Payload() &&
+ address < (header->Payload() + header->GetSize())) {
+ header->TryMarkAtomic();
+ }
+ }
+ // TODO(chromium:1056170): Implement dynamically getting HeapObjectHeader
+ // for handling previously_not_fully_constructed objects. Requires object
+ // start bitmap.
+}
+
+void MarkingVisitor::VisitPointer(const void* address) {
+ for (auto* header : marker_->heap()->objects()) {
+ if (address >= header->Payload() &&
+ address < (header->Payload() + header->GetSize())) {
+ header->TryMarkAtomic();
+ }
+ }
+ // TODO(chromium:1056170): Implement proper conservative scanning for
+ // on-stack objects. Requires page bloom filter.
+}
+
+MutatorThreadMarkingVisitor::MutatorThreadMarkingVisitor(Marker* marker)
+ : MarkingVisitor(marker, Marker::kMutatorThreadId) {}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/marking-visitor.h b/deps/v8/src/heap/cppgc/marking-visitor.h
new file mode 100644
index 0000000000..33616b3784
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/marking-visitor.h
@@ -0,0 +1,70 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_MARKING_VISITOR_H_
+#define V8_HEAP_CPPGC_MARKING_VISITOR_H_
+
+#include "include/cppgc/source-location.h"
+#include "include/cppgc/trace-trait.h"
+#include "include/v8config.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/stack.h"
+#include "src/heap/cppgc/visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+class MarkingVisitor : public VisitorBase, public StackVisitor {
+ public:
+ MarkingVisitor(Marker*, int);
+ virtual ~MarkingVisitor() = default;
+
+ MarkingVisitor(const MarkingVisitor&) = delete;
+ MarkingVisitor& operator=(const MarkingVisitor&) = delete;
+
+ void FlushWorklists();
+
+ void DynamicallyMarkAddress(ConstAddress);
+
+ void AccountMarkedBytes(const HeapObjectHeader&);
+ size_t marked_bytes() const { return marked_bytes_; }
+
+ static bool IsInConstruction(const HeapObjectHeader&);
+
+ protected:
+ void Visit(const void*, TraceDescriptor) override;
+ void VisitWeak(const void*, TraceDescriptor, WeakCallback,
+ const void*) override;
+ void VisitRoot(const void*, TraceDescriptor) override;
+ void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
+ const void*) override;
+
+ void VisitPointer(const void*) override;
+
+ private:
+ void MarkHeader(HeapObjectHeader*, TraceDescriptor);
+ bool MarkHeaderNoTracing(HeapObjectHeader*);
+ void RegisterWeakCallback(WeakCallback, const void*) override;
+
+ Marker* const marker_;
+ Marker::MarkingWorklist::View marking_worklist_;
+ Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist_;
+ Marker::WeakCallbackWorklist::View weak_callback_worklist_;
+
+ size_t marked_bytes_;
+};
+
+class V8_EXPORT_PRIVATE MutatorThreadMarkingVisitor : public MarkingVisitor {
+ public:
+ explicit MutatorThreadMarkingVisitor(Marker*);
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_MARKING_VISITOR_H_
diff --git a/deps/v8/src/heap/cppgc/object-allocator-inl.h b/deps/v8/src/heap/cppgc/object-allocator-inl.h
new file mode 100644
index 0000000000..7d8d126d63
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-allocator-inl.h
@@ -0,0 +1,74 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_ALLOCATOR_INL_H_
+#define V8_HEAP_CPPGC_OBJECT_ALLOCATOR_INL_H_
+
+#include <new>
+
+#include "src/base/logging.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
+ const size_t allocation_size =
+ RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
+ const RawHeap::RegularSpaceType type =
+ GetInitialSpaceIndexForSize(allocation_size);
+ return AllocateObjectOnSpace(NormalPageSpace::From(raw_heap_->Space(type)),
+ allocation_size, gcinfo);
+}
+
+void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
+ CustomSpaceIndex space_index) {
+ const size_t allocation_size =
+ RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
+ return AllocateObjectOnSpace(
+ NormalPageSpace::From(raw_heap_->CustomSpace(space_index)),
+ allocation_size, gcinfo);
+}
+
+// static
+RawHeap::RegularSpaceType ObjectAllocator::GetInitialSpaceIndexForSize(
+ size_t size) {
+ if (size < 64) {
+ if (size < 32) return RawHeap::RegularSpaceType::kNormal1;
+ return RawHeap::RegularSpaceType::kNormal2;
+ }
+ if (size < 128) return RawHeap::RegularSpaceType::kNormal3;
+ return RawHeap::RegularSpaceType::kNormal4;
+}
+
+void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
+ size_t size, GCInfoIndex gcinfo) {
+ DCHECK_LT(0u, gcinfo);
+
+ NormalPageSpace::LinearAllocationBuffer& current_lab =
+ space->linear_allocation_buffer();
+ if (current_lab.size() < size) {
+ return OutOfLineAllocate(space, size, gcinfo);
+ }
+
+ void* raw = current_lab.Allocate(size);
+ SET_MEMORY_ACCESIBLE(raw, size);
+ auto* header = new (raw) HeapObjectHeader(size, gcinfo);
+
+ NormalPage::From(BasePage::FromPayload(header))
+ ->object_start_bitmap()
+ .SetBit(reinterpret_cast<ConstAddress>(header));
+
+ return header->Payload();
+}
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_ALLOCATOR_INL_H_
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
new file mode 100644
index 0000000000..df83d8ee9d
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -0,0 +1,87 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/object-allocator.h"
+
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/object-allocator-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/sweeper.h"
+
+namespace cppgc {
+namespace internal {
+namespace {
+
+void* AllocateLargeObject(RawHeap* raw_heap, LargePageSpace* space, size_t size,
+ GCInfoIndex gcinfo) {
+ LargePage* page = LargePage::Create(space, size);
+ auto* header = new (page->ObjectHeader())
+ HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
+
+ return header->Payload();
+}
+
+} // namespace
+
+ObjectAllocator::ObjectAllocator(RawHeap* heap) : raw_heap_(heap) {}
+
+void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
+ GCInfoIndex gcinfo) {
+ DCHECK_EQ(0, size & kAllocationMask);
+ DCHECK_LE(kFreeListEntrySize, size);
+
+ // 1. If this allocation is big enough, allocate a large object.
+ if (size >= kLargeObjectSizeThreshold) {
+ auto* large_space = LargePageSpace::From(
+ raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
+ return AllocateLargeObject(raw_heap_, large_space, size, gcinfo);
+ }
+
+ // 2. Try to allocate from the freelist.
+ if (void* result = AllocateFromFreeList(space, size, gcinfo)) {
+ return result;
+ }
+
+ // 3. Lazily sweep pages of this heap until we find a freed area for
+ // this allocation or we finish sweeping all pages of this heap.
+ // TODO(chromium:1056170): Add lazy sweep.
+
+ // 4. Complete sweeping.
+ raw_heap_->heap()->sweeper().Finish();
+
+ // 5. Add a new page to this heap.
+ NormalPage::Create(space);
+
+ // 6. Try to allocate from the freelist. This allocation must succeed.
+ void* result = AllocateFromFreeList(space, size, gcinfo);
+ CPPGC_CHECK(result);
+
+ return result;
+}
+
+void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace* space, size_t size,
+ GCInfoIndex gcinfo) {
+ const FreeList::Block entry = space->free_list().Allocate(size);
+ if (!entry.address) return nullptr;
+
+ auto& current_lab = space->linear_allocation_buffer();
+ if (current_lab.size()) {
+ space->AddToFreeList(current_lab.start(), current_lab.size());
+ }
+
+ current_lab.Set(static_cast<Address>(entry.address), entry.size);
+ NormalPage::From(BasePage::FromPayload(current_lab.start()))
+ ->object_start_bitmap()
+ .ClearBit(current_lab.start());
+ return AllocateObjectOnSpace(space, size, gcinfo);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
new file mode 100644
index 0000000000..510a935f56
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -0,0 +1,40 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
+#define V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
+
+#include "include/cppgc/internal/gc-info.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE ObjectAllocator final {
+ public:
+ explicit ObjectAllocator(RawHeap* heap);
+
+ inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
+ inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
+ CustomSpaceIndex space_index);
+
+ private:
+ // Returns the initially tried SpaceType to allocate an object of |size| bytes
+ // on. Returns the largest regular object size bucket for large objects.
+ inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
+ size_t size);
+
+ inline void* AllocateObjectOnSpace(NormalPageSpace* space, size_t size,
+ GCInfoIndex gcinfo);
+ void* OutOfLineAllocate(NormalPageSpace*, size_t, GCInfoIndex);
+ void* AllocateFromFreeList(NormalPageSpace*, size_t, GCInfoIndex);
+
+ RawHeap* raw_heap_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap-inl.h b/deps/v8/src/heap/cppgc/object-start-bitmap-inl.h
new file mode 100644
index 0000000000..93243979aa
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap-inl.h
@@ -0,0 +1,95 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_START_BITMAP_INL_H_
+#define V8_HEAP_CPPGC_OBJECT_START_BITMAP_INL_H_
+
+#include <algorithm>
+
+#include "src/base/bits.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+
+namespace cppgc {
+namespace internal {
+
+ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
+ Clear();
+}
+
+HeapObjectHeader* ObjectStartBitmap::FindHeader(
+ ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
+ size_t object_offset =
+ address_maybe_pointing_to_the_middle_of_object - offset_;
+ size_t object_start_number = object_offset / kAllocationGranularity;
+ size_t cell_index = object_start_number / kBitsPerCell;
+ DCHECK_GT(object_start_bit_map_.size(), cell_index);
+ const size_t bit = object_start_number & kCellMask;
+ uint8_t byte = object_start_bit_map_[cell_index] & ((1 << (bit + 1)) - 1);
+ while (!byte && cell_index) {
+ DCHECK_LT(0u, cell_index);
+ byte = object_start_bit_map_[--cell_index];
+ }
+ const int leading_zeroes = v8::base::bits::CountLeadingZeros(byte);
+ object_start_number =
+ (cell_index * kBitsPerCell) + (kBitsPerCell - 1) - leading_zeroes;
+ object_offset = object_start_number * kAllocationGranularity;
+ return reinterpret_cast<HeapObjectHeader*>(object_offset + offset_);
+}
+
+void ObjectStartBitmap::SetBit(ConstAddress header_address) {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ object_start_bit_map_[cell_index] |= (1 << object_bit);
+}
+
+void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ object_start_bit_map_[cell_index] &= ~(1 << object_bit);
+}
+
+bool ObjectStartBitmap::CheckBit(ConstAddress header_address) const {
+ size_t cell_index, object_bit;
+ ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
+ return object_start_bit_map_[cell_index] & (1 << object_bit);
+}
+
+void ObjectStartBitmap::ObjectStartIndexAndBit(ConstAddress header_address,
+ size_t* cell_index,
+ size_t* bit) const {
+ const size_t object_offset = header_address - offset_;
+ DCHECK(!(object_offset & kAllocationMask));
+ const size_t object_start_number = object_offset / kAllocationGranularity;
+ *cell_index = object_start_number / kBitsPerCell;
+ DCHECK_GT(kBitmapSize, *cell_index);
+ *bit = object_start_number & kCellMask;
+}
+
+template <typename Callback>
+inline void ObjectStartBitmap::Iterate(Callback callback) const {
+ for (size_t cell_index = 0; cell_index < kReservedForBitmap; cell_index++) {
+ if (!object_start_bit_map_[cell_index]) continue;
+
+ uint8_t value = object_start_bit_map_[cell_index];
+ while (value) {
+ const int trailing_zeroes = v8::base::bits::CountTrailingZeros(value);
+ const size_t object_start_number =
+ (cell_index * kBitsPerCell) + trailing_zeroes;
+ const Address object_address =
+ offset_ + (kAllocationGranularity * object_start_number);
+ callback(object_address);
+ // Clear current object bit in temporary value to advance iteration.
+ value &= ~(1 << (object_start_number & kCellMask));
+ }
+ }
+}
+
+void ObjectStartBitmap::Clear() {
+ std::fill(object_start_bit_map_.begin(), object_start_bit_map_.end(), 0);
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_START_BITMAP_INL_H_
diff --git a/deps/v8/src/heap/cppgc/object-start-bitmap.h b/deps/v8/src/heap/cppgc/object-start-bitmap.h
new file mode 100644
index 0000000000..1a180a552e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/object-start-bitmap.h
@@ -0,0 +1,80 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
+#define V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
+
+#include <limits.h>
+#include <stdint.h>
+
+#include <array>
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapObjectHeader;
+
+// A bitmap for recording object starts. Objects have to be allocated at
+// minimum granularity of kGranularity.
+//
+// Depends on internals such as:
+// - kBlinkPageSize
+// - kAllocationGranularity
+class V8_EXPORT_PRIVATE ObjectStartBitmap {
+ public:
+ // Granularity of addresses added to the bitmap.
+ static constexpr size_t Granularity() { return kAllocationGranularity; }
+
+ // Maximum number of entries in the bitmap.
+ static constexpr size_t MaxEntries() {
+ return kReservedForBitmap * kBitsPerCell;
+ }
+
+ explicit inline ObjectStartBitmap(Address offset);
+
+ // Finds an object header based on a
+ // address_maybe_pointing_to_the_middle_of_object. Will search for an object
+ // start in decreasing address order.
+ inline HeapObjectHeader* FindHeader(
+ ConstAddress address_maybe_pointing_to_the_middle_of_object) const;
+
+ inline void SetBit(ConstAddress);
+ inline void ClearBit(ConstAddress);
+ inline bool CheckBit(ConstAddress) const;
+
+ // Iterates all object starts recorded in the bitmap.
+ //
+ // The callback is of type
+ // void(Address)
+ // and is passed the object start address as parameter.
+ template <typename Callback>
+ inline void Iterate(Callback) const;
+
+ // Clear the object start bitmap.
+ inline void Clear();
+
+ private:
+ static constexpr size_t kBitsPerCell = sizeof(uint8_t) * CHAR_BIT;
+ static constexpr size_t kCellMask = kBitsPerCell - 1;
+ static constexpr size_t kBitmapSize =
+ (kPageSize + ((kBitsPerCell * kAllocationGranularity) - 1)) /
+ (kBitsPerCell * kAllocationGranularity);
+ static constexpr size_t kReservedForBitmap =
+ ((kBitmapSize + kAllocationMask) & ~kAllocationMask);
+
+ inline void ObjectStartIndexAndBit(ConstAddress, size_t*, size_t*) const;
+
+ Address offset_;
+ // The bitmap contains a bit for every kGranularity aligned address on a
+ // a NormalPage, i.e., for a page of size kBlinkPageSize.
+ std::array<uint8_t, kReservedForBitmap> object_start_bit_map_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_START_BITMAP_H_
diff --git a/deps/v8/src/heap/cppgc/page-memory-inl.h b/deps/v8/src/heap/cppgc/page-memory-inl.h
new file mode 100644
index 0000000000..23ce061b43
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/page-memory-inl.h
@@ -0,0 +1,57 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
+#define V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
+
+#include "src/heap/cppgc/page-memory.h"
+
+namespace cppgc {
+namespace internal {
+
+// Returns true if the provided allocator supports committing at the required
+// granularity.
+inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
+ return kGuardPageSize % allocator->CommitPageSize() == 0;
+}
+
+Address NormalPageMemoryRegion::Lookup(Address address) const {
+ size_t index = GetIndex(address);
+ if (!page_memories_in_use_[index]) return nullptr;
+ const MemoryRegion writeable_region = GetPageMemory(index).writeable_region();
+ return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
+}
+
+Address LargePageMemoryRegion::Lookup(Address address) const {
+ const MemoryRegion writeable_region = GetPageMemory().writeable_region();
+ return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
+}
+
+Address PageMemoryRegion::Lookup(Address address) const {
+ DCHECK(reserved_region().Contains(address));
+ return is_large()
+ ? static_cast<const LargePageMemoryRegion*>(this)->Lookup(address)
+ : static_cast<const NormalPageMemoryRegion*>(this)->Lookup(
+ address);
+}
+
+PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
+ auto it = set_.upper_bound(address);
+ // This check also covers set_.size() > 0, since for empty vectors it is
+ // guaranteed that begin() == end().
+ if (it == set_.begin()) return nullptr;
+ auto* result = std::next(it, -1)->second;
+ if (address < result->reserved_region().end()) return result;
+ return nullptr;
+}
+
+Address PageBackend::Lookup(Address address) const {
+ PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
+ return pmr ? pmr->Lookup(address) : nullptr;
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PAGE_MEMORY_INL_H_
diff --git a/deps/v8/src/heap/cppgc/page-memory.cc b/deps/v8/src/heap/cppgc/page-memory.cc
new file mode 100644
index 0000000000..66e2812f5c
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/page-memory.cc
@@ -0,0 +1,211 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/page-memory.h"
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
+ if (SupportsCommittingGuardPages(allocator)) {
+ CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kReadWrite));
+ } else {
+ // No protection in case the allocator cannot commit at the required
+ // granularity. Only protect if the allocator supports committing at that
+ // granularity.
+ //
+ // The allocator needs to support committing the overall range.
+ CHECK_EQ(0u,
+ page_memory.overall_region().size() % allocator->CommitPageSize());
+ CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kReadWrite));
+ }
+}
+
+void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
+ if (SupportsCommittingGuardPages(allocator)) {
+ // Swap the same region, providing the OS with a chance for fast lookup and
+ // change.
+ CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kNoAccess));
+ } else {
+ // See Unprotect().
+ CHECK_EQ(0u,
+ page_memory.overall_region().size() % allocator->CommitPageSize());
+ CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kNoAccess));
+ }
+}
+
+MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
+ size_t allocation_size) {
+ void* region_memory =
+ allocator->AllocatePages(nullptr, allocation_size, kPageSize,
+ PageAllocator::Permission::kNoAccess);
+ const MemoryRegion reserved_region(static_cast<Address>(region_memory),
+ allocation_size);
+ DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
+ return reserved_region;
+}
+
+void FreeMemoryRegion(PageAllocator* allocator,
+ const MemoryRegion& reserved_region) {
+ allocator->FreePages(reserved_region.base(), reserved_region.size());
+}
+
+} // namespace
+
+PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
+ MemoryRegion reserved_region, bool is_large)
+ : allocator_(allocator),
+ reserved_region_(reserved_region),
+ is_large_(is_large) {}
+
+PageMemoryRegion::~PageMemoryRegion() {
+ FreeMemoryRegion(allocator_, reserved_region());
+}
+
+// static
+constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
+
+NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
+ : PageMemoryRegion(allocator,
+ ReserveMemoryRegion(
+ allocator, RoundUp(kPageSize * kNumPageRegions,
+ allocator->AllocatePageSize())),
+ false) {
+#ifdef DEBUG
+ for (size_t i = 0; i < kNumPageRegions; ++i) {
+ DCHECK_EQ(false, page_memories_in_use_[i]);
+ }
+#endif // DEBUG
+}
+
+NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
+
+void NormalPageMemoryRegion::Allocate(Address writeable_base) {
+ const size_t index = GetIndex(writeable_base);
+ ChangeUsed(index, true);
+ Unprotect(allocator_, GetPageMemory(index));
+}
+
+void NormalPageMemoryRegion::Free(Address writeable_base) {
+ const size_t index = GetIndex(writeable_base);
+ ChangeUsed(index, false);
+ Protect(allocator_, GetPageMemory(index));
+}
+
+void NormalPageMemoryRegion::UnprotectForTesting() {
+ for (size_t i = 0; i < kNumPageRegions; ++i) {
+ Unprotect(allocator_, GetPageMemory(i));
+ }
+}
+
+LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
+ size_t length)
+ : PageMemoryRegion(allocator,
+ ReserveMemoryRegion(
+ allocator, RoundUp(length + 2 * kGuardPageSize,
+ allocator->AllocatePageSize())),
+ true) {}
+
+LargePageMemoryRegion::~LargePageMemoryRegion() = default;
+
+void LargePageMemoryRegion::UnprotectForTesting() {
+ Unprotect(allocator_, GetPageMemory());
+}
+
+PageMemoryRegionTree::PageMemoryRegionTree() = default;
+
+PageMemoryRegionTree::~PageMemoryRegionTree() = default;
+
+void PageMemoryRegionTree::Add(PageMemoryRegion* region) {
+ DCHECK(region);
+ auto result = set_.emplace(region->reserved_region().base(), region);
+ USE(result);
+ DCHECK(result.second);
+}
+
+void PageMemoryRegionTree::Remove(PageMemoryRegion* region) {
+ DCHECK(region);
+ auto size = set_.erase(region->reserved_region().base());
+ USE(size);
+ DCHECK_EQ(1u, size);
+}
+
+NormalPageMemoryPool::NormalPageMemoryPool() = default;
+
+NormalPageMemoryPool::~NormalPageMemoryPool() = default;
+
+void NormalPageMemoryPool::Add(size_t bucket, NormalPageMemoryRegion* pmr,
+ Address writeable_base) {
+ DCHECK_LT(bucket, kNumPoolBuckets);
+ pool_[bucket].push_back(std::make_pair(pmr, writeable_base));
+}
+
+std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
+ size_t bucket) {
+ DCHECK_LT(bucket, kNumPoolBuckets);
+ if (pool_[bucket].empty()) return {nullptr, nullptr};
+ std::pair<NormalPageMemoryRegion*, Address> pair = pool_[bucket].back();
+ pool_[bucket].pop_back();
+ return pair;
+}
+
+PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
+
+PageBackend::~PageBackend() = default;
+
+Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
+ std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
+ if (!result.first) {
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator_);
+ for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
+ page_pool_.Add(bucket, pmr.get(),
+ pmr->GetPageMemory(i).writeable_region().base());
+ }
+ page_memory_region_tree_.Add(pmr.get());
+ normal_page_memory_regions_.push_back(std::move(pmr));
+ return AllocateNormalPageMemory(bucket);
+ }
+ result.first->Allocate(result.second);
+ return result.second;
+}
+
+void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
+ auto* pmr = static_cast<NormalPageMemoryRegion*>(
+ page_memory_region_tree_.Lookup(writeable_base));
+ pmr->Free(writeable_base);
+ page_pool_.Add(bucket, pmr, writeable_base);
+}
+
+Address PageBackend::AllocateLargePageMemory(size_t size) {
+ auto pmr = std::make_unique<LargePageMemoryRegion>(allocator_, size);
+ const PageMemory pm = pmr->GetPageMemory();
+ Unprotect(allocator_, pm);
+ page_memory_region_tree_.Add(pmr.get());
+ large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
+ return pm.writeable_region().base();
+}
+
+void PageBackend::FreeLargePageMemory(Address writeable_base) {
+ PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
+ page_memory_region_tree_.Remove(pmr);
+ auto size = large_page_memory_regions_.erase(pmr);
+ USE(size);
+ DCHECK_EQ(1u, size);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/page-memory.h b/deps/v8/src/heap/cppgc/page-memory.h
new file mode 100644
index 0000000000..f3bc685fa3
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/page-memory.h
@@ -0,0 +1,237 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PAGE_MEMORY_H_
+#define V8_HEAP_CPPGC_PAGE_MEMORY_H_
+
+#include <array>
+#include <map>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+class V8_EXPORT_PRIVATE MemoryRegion final {
+ public:
+ MemoryRegion() = default;
+ MemoryRegion(Address base, size_t size) : base_(base), size_(size) {
+ DCHECK(base);
+ DCHECK_LT(0u, size);
+ }
+
+ Address base() const { return base_; }
+ size_t size() const { return size_; }
+ Address end() const { return base_ + size_; }
+
+ bool Contains(Address addr) const {
+ return (reinterpret_cast<uintptr_t>(addr) -
+ reinterpret_cast<uintptr_t>(base_)) < size_;
+ }
+
+ bool Contains(const MemoryRegion& other) const {
+ return base_ <= other.base() && other.end() <= end();
+ }
+
+ private:
+ Address base_ = nullptr;
+ size_t size_ = 0;
+};
+
+// PageMemory provides the backing of a single normal or large page.
+class V8_EXPORT_PRIVATE PageMemory final {
+ public:
+ PageMemory(MemoryRegion overall, MemoryRegion writeable)
+ : overall_(overall), writable_(writeable) {
+ DCHECK(overall.Contains(writeable));
+ }
+
+ const MemoryRegion writeable_region() const { return writable_; }
+ const MemoryRegion overall_region() const { return overall_; }
+
+ private:
+ MemoryRegion overall_;
+ MemoryRegion writable_;
+};
+
+class V8_EXPORT_PRIVATE PageMemoryRegion {
+ public:
+ virtual ~PageMemoryRegion();
+
+ const MemoryRegion reserved_region() const { return reserved_region_; }
+ bool is_large() const { return is_large_; }
+
+ // Lookup writeable base for an |address| that's contained in
+ // PageMemoryRegion. Filters out addresses that are contained in non-writeable
+ // regions (e.g. guard pages).
+ inline Address Lookup(Address address) const;
+
+ // Disallow copy/move.
+ PageMemoryRegion(const PageMemoryRegion&) = delete;
+ PageMemoryRegion& operator=(const PageMemoryRegion&) = delete;
+
+ virtual void UnprotectForTesting() = 0;
+
+ protected:
+ PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
+
+ PageAllocator* const allocator_;
+ const MemoryRegion reserved_region_;
+ const bool is_large_;
+};
+
+// NormalPageMemoryRegion serves kNumPageRegions normal-sized PageMemory object.
+class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
+ public:
+ static constexpr size_t kNumPageRegions = 10;
+
+ explicit NormalPageMemoryRegion(PageAllocator*);
+ ~NormalPageMemoryRegion() override;
+
+ const PageMemory GetPageMemory(size_t index) const {
+ DCHECK_LT(index, kNumPageRegions);
+ return PageMemory(
+ MemoryRegion(reserved_region().base() + kPageSize * index, kPageSize),
+ MemoryRegion(
+ reserved_region().base() + kPageSize * index + kGuardPageSize,
+ kPageSize - 2 * kGuardPageSize));
+ }
+
+ // Allocates a normal page at |writeable_base| address. Changes page
+ // protection.
+ void Allocate(Address writeable_base);
+
+ // Frees a normal page at at |writeable_base| address. Changes page
+ // protection.
+ void Free(Address);
+
+ inline Address Lookup(Address) const;
+
+ void UnprotectForTesting() final;
+
+ private:
+ void ChangeUsed(size_t index, bool value) {
+ DCHECK_LT(index, kNumPageRegions);
+ DCHECK_EQ(value, !page_memories_in_use_[index]);
+ page_memories_in_use_[index] = value;
+ }
+
+ size_t GetIndex(Address address) const {
+ return static_cast<size_t>(address - reserved_region().base()) >>
+ kPageSizeLog2;
+ }
+
+ std::array<bool, kNumPageRegions> page_memories_in_use_ = {};
+};
+
+// LargePageMemoryRegion serves a single large PageMemory object.
+class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
+ public:
+ LargePageMemoryRegion(PageAllocator*, size_t);
+ ~LargePageMemoryRegion() override;
+
+ const PageMemory GetPageMemory() const {
+ return PageMemory(
+ MemoryRegion(reserved_region().base(), reserved_region().size()),
+ MemoryRegion(reserved_region().base() + kGuardPageSize,
+ reserved_region().size() - 2 * kGuardPageSize));
+ }
+
+ inline Address Lookup(Address) const;
+
+ void UnprotectForTesting() final;
+};
+
+// A PageMemoryRegionTree is a binary search tree of PageMemoryRegions sorted
+// by reserved base addresses.
+//
+// The tree does not keep its elements alive but merely provides indexing
+// capabilities.
+class V8_EXPORT_PRIVATE PageMemoryRegionTree final {
+ public:
+ PageMemoryRegionTree();
+ ~PageMemoryRegionTree();
+
+ void Add(PageMemoryRegion*);
+ void Remove(PageMemoryRegion*);
+
+ inline PageMemoryRegion* Lookup(Address) const;
+
+ private:
+ std::map<Address, PageMemoryRegion*> set_;
+};
+
+// A pool of PageMemory objects represented by the writeable base addresses.
+//
+// The pool does not keep its elements alive but merely provides pooling
+// capabilities.
+class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
+ public:
+ static constexpr size_t kNumPoolBuckets = 16;
+
+ using Result = std::pair<NormalPageMemoryRegion*, Address>;
+
+ NormalPageMemoryPool();
+ ~NormalPageMemoryPool();
+
+ void Add(size_t, NormalPageMemoryRegion*, Address);
+ Result Take(size_t);
+
+ private:
+ std::vector<Result> pool_[kNumPoolBuckets];
+};
+
+// A backend that is used for allocating and freeing normal and large pages.
+//
+// Internally maintaints a set of PageMemoryRegions. The backend keeps its used
+// regions alive.
+class V8_EXPORT_PRIVATE PageBackend final {
+ public:
+ explicit PageBackend(PageAllocator*);
+ ~PageBackend();
+
+ // Allocates a normal page from the backend.
+ //
+ // Returns the writeable base of the region.
+ Address AllocateNormalPageMemory(size_t);
+
+ // Returns normal page memory back to the backend. Expects the
+ // |writeable_base| returned by |AllocateNormalMemory()|.
+ void FreeNormalPageMemory(size_t, Address writeable_base);
+
+ // Allocates a large page from the backend.
+ //
+ // Returns the writeable base of the region.
+ Address AllocateLargePageMemory(size_t size);
+
+ // Returns large page memory back to the backend. Expects the |writeable_base|
+ // returned by |AllocateLargePageMemory()|.
+ void FreeLargePageMemory(Address writeable_base);
+
+ // Returns the writeable base if |address| is contained in a valid page
+ // memory.
+ inline Address Lookup(Address) const;
+
+ // Disallow copy/move.
+ PageBackend(const PageBackend&) = delete;
+ PageBackend& operator=(const PageBackend&) = delete;
+
+ private:
+ PageAllocator* allocator_;
+ NormalPageMemoryPool page_pool_;
+ PageMemoryRegionTree page_memory_region_tree_;
+ std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
+ std::unordered_map<PageMemoryRegion*, std::unique_ptr<PageMemoryRegion>>
+ large_page_memory_regions_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PAGE_MEMORY_H_
diff --git a/deps/v8/src/heap/cppgc/persistent-node.cc b/deps/v8/src/heap/cppgc/persistent-node.cc
new file mode 100644
index 0000000000..299cefc521
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/persistent-node.cc
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/persistent-node.h"
+
+#include <algorithm>
+#include <numeric>
+
+namespace cppgc {
+namespace internal {
+
+size_t PersistentRegion::NodesInUse() const {
+ return std::accumulate(
+ nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
+ return acc + std::count_if(slots->cbegin(), slots->cend(),
+ [](const PersistentNode& node) {
+ return node.IsUsed();
+ });
+ });
+}
+
+void PersistentRegion::EnsureNodeSlots() {
+ nodes_.push_back(std::make_unique<PersistentNodeSlots>());
+ for (auto& node : *nodes_.back()) {
+ node.InitializeAsFreeNode(free_list_head_);
+ free_list_head_ = &node;
+ }
+}
+
+void PersistentRegion::Trace(Visitor* visitor) {
+ free_list_head_ = nullptr;
+ for (auto& slots : nodes_) {
+ bool is_empty = true;
+ for (auto& node : *slots) {
+ if (node.IsUsed()) {
+ node.Trace(visitor);
+ is_empty = false;
+ } else {
+ node.InitializeAsFreeNode(free_list_head_);
+ free_list_head_ = &node;
+ }
+ }
+ if (is_empty) {
+ PersistentNode* first_next = (*slots)[0].FreeListNext();
+ // First next was processed first in the loop above, guaranteeing that it
+ // either points to null or into a different node block.
+ CPPGC_DCHECK(!first_next || first_next < &slots->front() ||
+ first_next > &slots->back());
+ free_list_head_ = first_next;
+ slots.reset();
+ }
+ }
+ nodes_.erase(std::remove_if(nodes_.begin(), nodes_.end(),
+ [](const auto& ptr) { return !ptr; }),
+ nodes_.end());
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/pointer-policies.cc b/deps/v8/src/heap/cppgc/pointer-policies.cc
new file mode 100644
index 0000000000..e9dfcecdf3
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/pointer-policies.cc
@@ -0,0 +1,35 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/pointer-policies.h"
+#include "include/cppgc/internal/persistent-node.h"
+
+#include "src/base/macros.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap.h"
+
+namespace cppgc {
+namespace internal {
+
+EnabledCheckingPolicy::EnabledCheckingPolicy() {
+ USE(impl_);
+ // TODO(chromium:1056170): Save creating heap state.
+}
+
+void EnabledCheckingPolicy::CheckPointer(const void* ptr) {
+ // TODO(chromium:1056170): Provide implementation.
+}
+
+PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetStrongPersistentRegion();
+}
+
+PersistentRegion& WeakPersistentPolicy::GetPersistentRegion(void* object) {
+ auto* heap = BasePage::FromPayload(object)->heap();
+ return heap->GetWeakPersistentRegion();
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
new file mode 100644
index 0000000000..40107c1526
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -0,0 +1,66 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/prefinalizer-handler.h"
+
+#include <algorithm>
+#include <memory>
+
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
+ cppgc::Heap* heap, PreFinalizer prefinalzier) {
+ internal::Heap::From(heap)->prefinalizer_handler()->RegisterPrefinalizer(
+ prefinalzier);
+}
+
+bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
+ const PreFinalizer& other) {
+ return (object_ == other.object_) && (callback_ == other.callback_);
+}
+
+PreFinalizerHandler::PreFinalizerHandler()
+#ifdef DEBUG
+ : creation_thread_id_(v8::base::OS::GetCurrentThreadId())
+#endif
+{
+}
+
+void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer prefinalizer) {
+ DCHECK(CurrentThreadIsCreationThread());
+ DCHECK_EQ(ordered_pre_finalizers_.end(),
+ std::find(ordered_pre_finalizers_.begin(),
+ ordered_pre_finalizers_.end(), prefinalizer));
+ ordered_pre_finalizers_.push_back(prefinalizer);
+}
+
+void PreFinalizerHandler::InvokePreFinalizers() {
+ DCHECK(CurrentThreadIsCreationThread());
+ LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
+ ordered_pre_finalizers_.erase(
+ ordered_pre_finalizers_.begin(),
+ std::remove_if(ordered_pre_finalizers_.rbegin(),
+ ordered_pre_finalizers_.rend(),
+ [liveness_broker](const PreFinalizer& pf) {
+ return (pf.callback_)(liveness_broker, pf.object_);
+ })
+ .base());
+ ordered_pre_finalizers_.shrink_to_fit();
+}
+
+bool PreFinalizerHandler::CurrentThreadIsCreationThread() {
+#ifdef DEBUG
+ return creation_thread_id_ == v8::base::OS::GetCurrentThreadId();
+#else
+ return true;
+#endif
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.h b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
new file mode 100644
index 0000000000..a625553471
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+#define V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
+
+#include <vector>
+
+#include "include/cppgc/prefinalizer.h"
+
+namespace cppgc {
+namespace internal {
+
+class PreFinalizerHandler final {
+ public:
+ using PreFinalizer =
+ cppgc::internal::PreFinalizerRegistrationDispatcher::PreFinalizer;
+
+ PreFinalizerHandler();
+
+ void RegisterPrefinalizer(PreFinalizer prefinalzier);
+
+ void InvokePreFinalizers();
+
+ private:
+ // Checks that the current thread is the thread that created the heap.
+ bool CurrentThreadIsCreationThread();
+
+ // Pre-finalizers are called in the reverse order in which they are
+ // registered by the constructors (including constructors of Mixin
+ // objects) for an object, by processing the ordered_pre_finalizers_
+ // back-to-front.
+ std::vector<PreFinalizer> ordered_pre_finalizers_;
+
+#ifdef DEBUG
+ int creation_thread_id_;
+#endif
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PREFINALIZER_HANDLER_H_
diff --git a/deps/v8/src/heap/cppgc/raw-heap.cc b/deps/v8/src/heap/cppgc/raw-heap.cc
new file mode 100644
index 0000000000..cf7311b46f
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/raw-heap.cc
@@ -0,0 +1,32 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/raw-heap.h"
+
+#include "src/heap/cppgc/heap-space.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+constexpr size_t RawHeap::kNumberOfRegularSpaces;
+
+RawHeap::RawHeap(Heap* heap, size_t custom_spaces) : main_heap_(heap) {
+ size_t i = 0;
+ for (; i < static_cast<size_t>(RegularSpaceType::kLarge); ++i) {
+ spaces_.push_back(std::make_unique<NormalPageSpace>(this, i));
+ }
+ spaces_.push_back(std::make_unique<LargePageSpace>(
+ this, static_cast<size_t>(RegularSpaceType::kLarge)));
+ DCHECK_EQ(kNumberOfRegularSpaces, spaces_.size());
+ for (size_t j = 0; j < custom_spaces; j++) {
+ spaces_.push_back(
+ std::make_unique<NormalPageSpace>(this, kNumberOfRegularSpaces + j));
+ }
+}
+
+RawHeap::~RawHeap() = default;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/raw-heap.h b/deps/v8/src/heap/cppgc/raw-heap.h
new file mode 100644
index 0000000000..0591fa87ab
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/raw-heap.h
@@ -0,0 +1,106 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_RAW_HEAP_H_
+#define V8_HEAP_CPPGC_RAW_HEAP_H_
+
+#include <iterator>
+#include <memory>
+#include <vector>
+
+#include "include/cppgc/heap.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+class Heap;
+class BaseSpace;
+
+// RawHeap is responsible for space management.
+class V8_EXPORT_PRIVATE RawHeap final {
+ public:
+ // Normal spaces are used to store objects of different size classes:
+ // - kNormal1: < 32 bytes
+ // - kNormal2: < 64 bytes
+ // - kNormal3: < 128 bytes
+ // - kNormal4: >= 128 bytes
+ //
+ // Objects of size greater than 2^16 get stored in the large space.
+ //
+ // Users can override where objects are allocated via cppgc::CustomSpace to
+ // force allocation in a custom space.
+ enum class RegularSpaceType : uint8_t {
+ kNormal1,
+ kNormal2,
+ kNormal3,
+ kNormal4,
+ kLarge,
+ };
+
+ static constexpr size_t kNumberOfRegularSpaces =
+ static_cast<size_t>(RegularSpaceType::kLarge) + 1;
+
+ using Spaces = std::vector<std::unique_ptr<BaseSpace>>;
+ using iterator = Spaces::iterator;
+ using const_iterator = Spaces::const_iterator;
+
+ explicit RawHeap(Heap* heap, size_t custom_spaces);
+ ~RawHeap();
+
+ // Space iteration support.
+ iterator begin() { return spaces_.begin(); }
+ const_iterator begin() const { return spaces_.begin(); }
+ iterator end() { return spaces_.end(); }
+ const_iterator end() const { return spaces_.end(); }
+
+ iterator custom_begin() { return std::next(begin(), kNumberOfRegularSpaces); }
+ iterator custom_end() { return end(); }
+
+ size_t size() const { return spaces_.size(); }
+
+ BaseSpace* Space(RegularSpaceType type) {
+ const size_t index = static_cast<size_t>(type);
+ DCHECK_GT(kNumberOfRegularSpaces, index);
+ return Space(index);
+ }
+ const BaseSpace* Space(RegularSpaceType space) const {
+ return const_cast<RawHeap&>(*this).Space(space);
+ }
+
+ BaseSpace* CustomSpace(CustomSpaceIndex space_index) {
+ return Space(SpaceIndexForCustomSpace(space_index));
+ }
+ const BaseSpace* CustomSpace(CustomSpaceIndex space_index) const {
+ return const_cast<RawHeap&>(*this).CustomSpace(space_index);
+ }
+
+ Heap* heap() { return main_heap_; }
+ const Heap* heap() const { return main_heap_; }
+
+ private:
+ size_t SpaceIndexForCustomSpace(CustomSpaceIndex space_index) const {
+ DCHECK_LT(space_index.value, spaces_.size() - kNumberOfRegularSpaces);
+ return kNumberOfRegularSpaces + space_index.value;
+ }
+
+ BaseSpace* Space(size_t space_index) {
+ DCHECK_GT(spaces_.size(), space_index);
+ BaseSpace* space = spaces_[space_index].get();
+ DCHECK(space);
+ return space;
+ }
+ const BaseSpace* Space(size_t space_index) const {
+ return const_cast<RawHeap&>(*this).Space(space_index);
+ }
+
+ Heap* main_heap_;
+ Spaces spaces_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_RAW_HEAP_H_
diff --git a/deps/v8/src/heap/cppgc/sanitizers.h b/deps/v8/src/heap/cppgc/sanitizers.h
index e3102b01ed..17f6cd7306 100644
--- a/deps/v8/src/heap/cppgc/sanitizers.h
+++ b/deps/v8/src/heap/cppgc/sanitizers.h
@@ -5,6 +5,9 @@
#ifndef V8_HEAP_CPPGC_SANITIZERS_H_
#define V8_HEAP_CPPGC_SANITIZERS_H_
+#include <stdint.h>
+#include <string.h>
+
#include "src/base/macros.h"
//
@@ -16,10 +19,15 @@
#include <sanitizer/asan_interface.h>
#define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#if !defined(ASAN_POISON_MEMORY_REGION) || !defined(ASAN_UNPOISON_MEMORY_REGION)
+#error "ASAN_POISON_MEMORY_REGION must be defined"
+#endif
#else // !V8_USE_ADDRESS_SANITIZER
#define NO_SANITIZE_ADDRESS
+#define ASAN_POISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
#endif // V8_USE_ADDRESS_SANITIZER
@@ -27,12 +35,43 @@
#include <sanitizer/msan_interface.h>
+#define MSAN_POISON(addr, size) __msan_allocated_memory(addr, size)
#define MSAN_UNPOISON(addr, size) __msan_unpoison(addr, size)
#else // !V8_USE_MEMORY_SANITIZER
+#define MSAN_POISON(addr, size) ((void)(addr), (void)(size))
#define MSAN_UNPOISON(addr, size) ((void)(addr), (void)(size))
#endif // V8_USE_MEMORY_SANITIZER
+// API for newly allocated or reclaimed memory.
+#if defined(V8_USE_MEMORY_SANITIZER)
+#define SET_MEMORY_ACCESIBLE(address, size) \
+ MSAN_UNPOISON(address, size); \
+ memset((address), 0, (size))
+#define SET_MEMORY_INACCESIBLE(address, size) MSAN_POISON((address), (size))
+#elif DEBUG || defined(V8_USE_ADDRESS_SANITIZER)
+#define SET_MEMORY_ACCESIBLE(address, size) \
+ ASAN_UNPOISON_MEMORY_REGION(address, size); \
+ memset((address), 0, (size))
+#define SET_MEMORY_INACCESIBLE(address, size) \
+ ::cppgc::internal::ZapMemory((address), (size)); \
+ ASAN_POISON_MEMORY_REGION(address, size)
+#else
+#define SET_MEMORY_ACCESIBLE(address, size) memset((address), 0, (size))
+#define SET_MEMORY_INACCESIBLE(address, size) ((void)(address), (void)(size))
+#endif
+
+namespace cppgc {
+namespace internal {
+
+inline void ZapMemory(void* address, size_t size) {
+ static constexpr uint8_t kZappedValue = 0xcd;
+ memset(address, kZappedValue, size);
+}
+
+} // namespace internal
+} // namespace cppgc
+
#endif // V8_HEAP_CPPGC_SANITIZERS_H_
diff --git a/deps/v8/src/heap/cppgc/source-location.cc b/deps/v8/src/heap/cppgc/source-location.cc
new file mode 100644
index 0000000000..95154cf6d5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/source-location.cc
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/source-location.h"
+
+namespace cppgc {
+
+std::string SourceLocation::ToString() const {
+ if (!file_) {
+ return {};
+ }
+ return std::string(function_) + "@" + file_ + ":" + std::to_string(line_);
+}
+
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/stack.cc b/deps/v8/src/heap/cppgc/stack.cc
index a821768917..b99693708c 100644
--- a/deps/v8/src/heap/cppgc/stack.cc
+++ b/deps/v8/src/heap/cppgc/stack.cc
@@ -13,7 +13,7 @@
namespace cppgc {
namespace internal {
-using IterateStackCallback = void (Stack::*)(StackVisitor*, intptr_t*) const;
+using IterateStackCallback = void (*)(const Stack*, StackVisitor*, intptr_t*);
extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
IterateStackCallback);
@@ -63,8 +63,6 @@ void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
#endif // V8_USE_ADDRESS_SANITIZER
-#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
void IterateSafeStackIfNecessary(StackVisitor* visitor) {
#if defined(__has_feature)
#if __has_feature(safe_stack)
@@ -88,49 +86,44 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
#endif // defined(__has_feature)
}
-#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
-} // namespace
-
-#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-void Stack::IteratePointers(StackVisitor* visitor) const {
- PushAllRegistersAndIterateStack(this, visitor, &Stack::IteratePointersImpl);
- // No need to deal with callee-saved registers as they will be kept alive by
- // the regular conservative stack iteration.
- IterateSafeStackIfNecessary(visitor);
-}
-#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
-
+// Called by the trampoline that pushes registers on the stack. This method
+// should never be inlined to ensure that a possible redzone cannot contain
+// any data that needs to be scanned.
+V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
NO_SANITIZE_ADDRESS
-void Stack::IteratePointersImpl(StackVisitor* visitor,
- intptr_t* stack_end) const {
+void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
+ intptr_t* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
void* asan_fake_stack = __asan_get_current_fake_stack();
#endif // V8_USE_ADDRESS_SANITIZER
// All supported platforms should have their stack aligned to at least
// sizeof(void*).
constexpr size_t kMinStackAlignment = sizeof(void*);
- // Redzone should not contain any pointers as the iteration is always called
- // from the assembly trampoline. If inline assembly is ever inlined through
- // LTO this may become necessary.
- constexpr size_t kRedZoneBytes = 128;
- void** current = reinterpret_cast<void**>(
- reinterpret_cast<uintptr_t>(stack_end - kRedZoneBytes));
+ void** current = reinterpret_cast<void**>(stack_end);
CHECK_EQ(0u, reinterpret_cast<uintptr_t>(current) & (kMinStackAlignment - 1));
- for (; current < stack_start_; ++current) {
+ for (; current < stack->stack_start(); ++current) {
// MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
// into a local which is unpoisoned.
void* address = *current;
- MSAN_UNPOISON(address, sizeof(address));
+ MSAN_UNPOISON(&address, sizeof(address));
if (address == nullptr) continue;
visitor->VisitPointer(address);
#ifdef V8_USE_ADDRESS_SANITIZER
- IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack, stack_start_,
- stack_end, address);
+ IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack,
+ stack->stack_start(), stack_end, address);
#endif // V8_USE_ADDRESS_SANITIZER
}
}
+} // namespace
+
+void Stack::IteratePointers(StackVisitor* visitor) const {
+ PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
+ // No need to deal with callee-saved registers as they will be kept alive by
+ // the regular conservative stack iteration.
+ IterateSafeStackIfNecessary(visitor);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/stack.h b/deps/v8/src/heap/cppgc/stack.h
index 599bf3a54a..3f561aed08 100644
--- a/deps/v8/src/heap/cppgc/stack.h
+++ b/deps/v8/src/heap/cppgc/stack.h
@@ -7,11 +7,6 @@
#include "src/base/macros.h"
-// TODO(chromium:1056170): Implement all platforms.
-#if defined(V8_TARGET_ARCH_X64)
-#define CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN 1
-#endif
-
namespace cppgc {
namespace internal {
@@ -33,13 +28,12 @@ class V8_EXPORT_PRIVATE Stack final {
// Word-aligned iteration of the stack. Slot values are passed on to
// |visitor|.
-#ifdef CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
void IteratePointers(StackVisitor* visitor) const;
-#endif // CPPGC_SUPPORTS_CONSERVATIVE_STACK_SCAN
- private:
- void IteratePointersImpl(StackVisitor* visitor, intptr_t* stack_end) const;
+ // Returns the start of the stack.
+ const void* stack_start() const { return stack_start_; }
+ private:
const void* stack_start_;
};
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
new file mode 100644
index 0000000000..77d2d3c33e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -0,0 +1,213 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/sweeper.h"
+
+#include <vector>
+
+#include "src/heap/cppgc/free-list.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/object-start-bitmap-inl.h"
+#include "src/heap/cppgc/object-start-bitmap.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class ObjectStartBitmapVerifier
+ : private HeapVisitor<ObjectStartBitmapVerifier> {
+ friend class HeapVisitor<ObjectStartBitmapVerifier>;
+
+ public:
+ void Verify(RawHeap* heap) { Traverse(heap); }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ // Remember bitmap and reset previous pointer.
+ bitmap_ = &page->object_start_bitmap();
+ prev_ = nullptr;
+ return false;
+ }
+
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsLargeObject()) return true;
+
+ auto* raw_header = reinterpret_cast<ConstAddress>(header);
+ CHECK(bitmap_->CheckBit(raw_header));
+ if (prev_) {
+ CHECK_EQ(prev_, bitmap_->FindHeader(raw_header - 1));
+ }
+ prev_ = header;
+ return true;
+ }
+
+ ObjectStartBitmap* bitmap_ = nullptr;
+ HeapObjectHeader* prev_ = nullptr;
+};
+
+struct SpaceState {
+ BaseSpace::Pages unswept_pages;
+};
+using SpaceStates = std::vector<SpaceState>;
+
+bool SweepNormalPage(NormalPage* page) {
+ constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+
+ auto* space = NormalPageSpace::From(page->space());
+ ObjectStartBitmap& bitmap = page->object_start_bitmap();
+ bitmap.Clear();
+
+ Address start_of_gap = page->PayloadStart();
+ for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
+ begin != end;) {
+ HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(begin);
+ const size_t size = header->GetSize();
+ // Check if this is a free list entry.
+ if (header->IsFree<kAtomicAccess>()) {
+ SET_MEMORY_INACCESIBLE(header, std::min(kFreeListEntrySize, size));
+ begin += size;
+ continue;
+ }
+ // Check if object is not marked (not reachable).
+ if (!header->IsMarked<kAtomicAccess>()) {
+ header->Finalize();
+ SET_MEMORY_INACCESIBLE(header, size);
+ begin += size;
+ continue;
+ }
+ // The object is alive.
+ const Address header_address = reinterpret_cast<Address>(header);
+ if (start_of_gap != header_address) {
+ space->AddToFreeList(start_of_gap,
+ static_cast<size_t>(header_address - start_of_gap));
+ }
+ header->Unmark<kAtomicAccess>();
+ bitmap.SetBit(begin);
+ begin += size;
+ start_of_gap = begin;
+ }
+
+ if (start_of_gap != page->PayloadStart() &&
+ start_of_gap != page->PayloadEnd()) {
+ space->AddToFreeList(
+ start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
+ }
+
+ const bool is_empty = (start_of_gap == page->PayloadStart());
+ return is_empty;
+}
+
+// This visitor:
+// - resets linear allocation buffers and clears free lists for all spaces;
+// - moves all Heap pages to local Sweeper's state (SpaceStates).
+class PrepareForSweepVisitor final
+ : public HeapVisitor<PrepareForSweepVisitor> {
+ public:
+ explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ space->ResetLinearAllocationBuffer();
+ space->free_list().Clear();
+ (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+ return true;
+ }
+
+ bool VisitLargePageSpace(LargePageSpace* space) {
+ (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+
+ return true;
+ }
+
+ private:
+ SpaceStates* states_;
+};
+
+class MutatorThreadSweepVisitor final
+ : private HeapVisitor<MutatorThreadSweepVisitor> {
+ friend class HeapVisitor<MutatorThreadSweepVisitor>;
+
+ public:
+ explicit MutatorThreadSweepVisitor(SpaceStates* space_states) {
+ for (SpaceState& state : *space_states) {
+ for (BasePage* page : state.unswept_pages) {
+ Traverse(page);
+ }
+ state.unswept_pages.clear();
+ }
+ }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ const bool is_empty = SweepNormalPage(page);
+ if (is_empty) {
+ NormalPage::Destroy(page);
+ } else {
+ page->space()->AddPage(page);
+ }
+ return true;
+ }
+
+ bool VisitLargePage(LargePage* page) {
+ if (page->ObjectHeader()->IsMarked()) {
+ page->space()->AddPage(page);
+ } else {
+ page->ObjectHeader()->Finalize();
+ LargePage::Destroy(page);
+ }
+ return true;
+ }
+};
+
+} // namespace
+
+class Sweeper::SweeperImpl final {
+ public:
+ explicit SweeperImpl(RawHeap* heap) : heap_(heap) {
+ space_states_.resize(heap_->size());
+ }
+
+ void Start(Config config) {
+ is_in_progress_ = true;
+#if DEBUG
+ ObjectStartBitmapVerifier().Verify(heap_);
+#endif
+ PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+ if (config == Config::kAtomic) {
+ Finish();
+ } else {
+ DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
+ // TODO(chromium:1056170): Schedule concurrent sweeping.
+ }
+ }
+
+ void Finish() {
+ if (!is_in_progress_) return;
+
+ MutatorThreadSweepVisitor s(&space_states_);
+
+ is_in_progress_ = false;
+ }
+
+ private:
+ SpaceStates space_states_;
+ RawHeap* heap_;
+ bool is_in_progress_ = false;
+};
+
+Sweeper::Sweeper(RawHeap* heap) : impl_(std::make_unique<SweeperImpl>(heap)) {}
+Sweeper::~Sweeper() = default;
+
+void Sweeper::Start(Config config) { impl_->Start(config); }
+void Sweeper::Finish() { impl_->Finish(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
new file mode 100644
index 0000000000..3e38773168
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -0,0 +1,38 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_SWEEPER_H_
+#define V8_HEAP_CPPGC_SWEEPER_H_
+
+#include <memory>
+
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+class RawHeap;
+
+class V8_EXPORT_PRIVATE Sweeper final {
+ public:
+ enum class Config { kAtomic, kIncrementalAndConcurrent };
+
+ explicit Sweeper(RawHeap*);
+ ~Sweeper();
+
+ Sweeper(const Sweeper&) = delete;
+ Sweeper& operator=(const Sweeper&) = delete;
+
+ void Start(Config);
+ void Finish();
+
+ private:
+ class SweeperImpl;
+ std::unique_ptr<SweeperImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_SWEEPER_H_
diff --git a/deps/v8/src/heap/cppgc/visitor.h b/deps/v8/src/heap/cppgc/visitor.h
new file mode 100644
index 0000000000..caa840b4dc
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/visitor.h
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_VISITOR_H_
+#define V8_HEAP_CPPGC_VISITOR_H_
+
+#include "include/cppgc/visitor.h"
+
+namespace cppgc {
+namespace internal {
+
+// Base visitor that is allowed to create a public cppgc::Visitor object and
+// use its internals.
+class VisitorBase : public cppgc::Visitor {
+ public:
+ VisitorBase() = default;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_VISITOR_H_
diff --git a/deps/v8/src/heap/cppgc/worklist.h b/deps/v8/src/heap/cppgc/worklist.h
new file mode 100644
index 0000000000..5993d6a04e
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/worklist.h
@@ -0,0 +1,473 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_WORKLIST_H_
+#define V8_HEAP_CPPGC_WORKLIST_H_
+
+#include <cstddef>
+#include <utility>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/logging.h"
+#include "src/base/platform/mutex.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
+
+namespace cppgc {
+namespace internal {
+
+// A concurrent worklist based on segments. Each tasks gets private
+// push and pop segments. Empty pop segments are swapped with their
+// corresponding push segments. Full push segments are published to a global
+// pool of segments and replaced with empty segments.
+//
+// Work stealing is best effort, i.e., there is no way to inform other tasks
+// of the need of items.
+template <typename EntryType_, int SEGMENT_SIZE, int max_num_tasks = 8>
+class Worklist {
+ using WorklistType = Worklist<EntryType_, SEGMENT_SIZE, max_num_tasks>;
+
+ public:
+ using EntryType = EntryType_;
+ static constexpr int kMaxNumTasks = max_num_tasks;
+ static constexpr size_t kSegmentCapacity = SEGMENT_SIZE;
+
+ class View {
+ public:
+ View(WorklistType* worklist, int task_id)
+ : worklist_(worklist), task_id_(task_id) {}
+
+ // Pushes an entry onto the worklist.
+ bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
+
+ // Pops an entry from the worklist.
+ bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
+
+ // Returns true if the local portion of the worklist is empty.
+ bool IsLocalEmpty() const { return worklist_->IsLocalEmpty(task_id_); }
+
+ // Returns true if the worklist is empty. Can only be used from the main
+ // thread without concurrent access.
+ bool IsEmpty() const { return worklist_->IsEmpty(); }
+
+ bool IsGlobalPoolEmpty() const { return worklist_->IsGlobalPoolEmpty(); }
+
+ // Returns true if the local portion and the global pool are empty (i.e.
+ // whether the current view cannot pop anymore).
+ bool IsLocalViewEmpty() const {
+ return worklist_->IsLocalViewEmpty(task_id_);
+ }
+
+ void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
+
+ void* operator new(size_t, void* location) = delete;
+ void* operator new(size_t) = delete;
+
+ private:
+ WorklistType* const worklist_;
+ const int task_id_;
+ };
+
+ Worklist() : Worklist(kMaxNumTasks) {}
+
+ explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
+ DCHECK_LE(num_tasks_, kMaxNumTasks);
+ for (int i = 0; i < num_tasks_; i++) {
+ private_push_segment(i) = NewSegment();
+ private_pop_segment(i) = NewSegment();
+ }
+ }
+
+ ~Worklist() {
+ CHECK(IsEmpty());
+ for (int i = 0; i < num_tasks_; i++) {
+ DCHECK_NOT_NULL(private_push_segment(i));
+ DCHECK_NOT_NULL(private_pop_segment(i));
+ delete private_push_segment(i);
+ delete private_pop_segment(i);
+ }
+ }
+
+ // Swaps content with the given worklist. Local buffers need to
+ // be empty, not thread safe.
+ void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
+ CHECK(AreLocalsEmpty());
+ CHECK(other.AreLocalsEmpty());
+
+ global_pool_.Swap(other.global_pool_);
+ }
+
+ bool Push(int task_id, EntryType entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_push_segment(task_id));
+ if (!private_push_segment(task_id)->Push(entry)) {
+ PublishPushSegmentToGlobal(task_id);
+ bool success = private_push_segment(task_id)->Push(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ bool Pop(int task_id, EntryType* entry) {
+ DCHECK_LT(task_id, num_tasks_);
+ DCHECK_NOT_NULL(private_pop_segment(task_id));
+ if (!private_pop_segment(task_id)->Pop(entry)) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ Segment* tmp = private_pop_segment(task_id);
+ private_pop_segment(task_id) = private_push_segment(task_id);
+ private_push_segment(task_id) = tmp;
+ } else if (!StealPopSegmentFromGlobal(task_id)) {
+ return false;
+ }
+ bool success = private_pop_segment(task_id)->Pop(entry);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ size_t LocalPushSegmentSize(int task_id) const {
+ return private_push_segment(task_id)->Size();
+ }
+
+ bool IsLocalEmpty(int task_id) const {
+ return private_pop_segment(task_id)->IsEmpty() &&
+ private_push_segment(task_id)->IsEmpty();
+ }
+
+ bool IsGlobalPoolEmpty() const { return global_pool_.IsEmpty(); }
+
+ bool IsEmpty() const {
+ if (!AreLocalsEmpty()) return false;
+ return IsGlobalPoolEmpty();
+ }
+
+ bool AreLocalsEmpty() const {
+ for (int i = 0; i < num_tasks_; i++) {
+ if (!IsLocalEmpty(i)) return false;
+ }
+ return true;
+ }
+
+ bool IsLocalViewEmpty(int task_id) const {
+ return IsLocalEmpty(task_id) && IsGlobalPoolEmpty();
+ }
+
+ size_t LocalSize(int task_id) const {
+ return private_pop_segment(task_id)->Size() +
+ private_push_segment(task_id)->Size();
+ }
+
+ // Thread-safe but may return an outdated result.
+ size_t GlobalPoolSize() const { return global_pool_.Size(); }
+
+ // Clears all segments. Frees the global segment pool.
+ //
+ // Assumes that no other tasks are running.
+ void Clear() {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Clear();
+ private_push_segment(i)->Clear();
+ }
+ global_pool_.Clear();
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback.
+ // The signature of the callback is
+ // bool Callback(EntryType old, EntryType* new).
+ // If the callback returns |false| then the element is removed from the
+ // worklist. Otherwise the |new| entry is updated.
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Update(Callback callback) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Update(callback);
+ private_push_segment(i)->Update(callback);
+ }
+ global_pool_.Update(callback);
+ }
+
+ // Calls the specified callback on each element of the deques.
+ // The signature of the callback is:
+ // void Callback(EntryType entry).
+ //
+ // Assumes that no other tasks are running.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ for (int i = 0; i < num_tasks_; i++) {
+ private_pop_segment(i)->Iterate(callback);
+ private_push_segment(i)->Iterate(callback);
+ }
+ global_pool_.Iterate(callback);
+ }
+
+ template <typename Callback>
+ void IterateGlobalPool(Callback callback) {
+ global_pool_.Iterate(callback);
+ }
+
+ void FlushToGlobal(int task_id) {
+ PublishPushSegmentToGlobal(task_id);
+ PublishPopSegmentToGlobal(task_id);
+ }
+
+ void MergeGlobalPool(Worklist* other) {
+ global_pool_.Merge(&other->global_pool_);
+ }
+
+ private:
+ FRIEND_TEST(CppgcWorkListTest, SegmentCreate);
+ FRIEND_TEST(CppgcWorkListTest, SegmentPush);
+ FRIEND_TEST(CppgcWorkListTest, SegmentPushPop);
+ FRIEND_TEST(CppgcWorkListTest, SegmentIsEmpty);
+ FRIEND_TEST(CppgcWorkListTest, SegmentIsFull);
+ FRIEND_TEST(CppgcWorkListTest, SegmentClear);
+ FRIEND_TEST(CppgcWorkListTest, SegmentFullPushFails);
+ FRIEND_TEST(CppgcWorkListTest, SegmentEmptyPopFails);
+ FRIEND_TEST(CppgcWorkListTest, SegmentUpdateFalse);
+ FRIEND_TEST(CppgcWorkListTest, SegmentUpdate);
+
+ class Segment {
+ public:
+ static const size_t kCapacity = kSegmentCapacity;
+
+ Segment() : index_(0) {}
+
+ bool Push(EntryType entry) {
+ if (IsFull()) return false;
+ entries_[index_++] = entry;
+ return true;
+ }
+
+ bool Pop(EntryType* entry) {
+ if (IsEmpty()) return false;
+ *entry = entries_[--index_];
+ return true;
+ }
+
+ size_t Size() const { return index_; }
+ bool IsEmpty() const { return index_ == 0; }
+ bool IsFull() const { return index_ == kCapacity; }
+ void Clear() { index_ = 0; }
+
+ template <typename Callback>
+ void Update(Callback callback) {
+ size_t new_index = 0;
+ for (size_t i = 0; i < index_; i++) {
+ if (callback(entries_[i], &entries_[new_index])) {
+ new_index++;
+ }
+ }
+ index_ = new_index;
+ }
+
+ template <typename Callback>
+ void Iterate(Callback callback) const {
+ for (size_t i = 0; i < index_; i++) {
+ callback(entries_[i]);
+ }
+ }
+
+ Segment* next() const { return next_; }
+ void set_next(Segment* segment) { next_ = segment; }
+
+ private:
+ Segment* next_;
+ size_t index_;
+ EntryType entries_[kCapacity];
+ };
+
+ struct PrivateSegmentHolder {
+ Segment* private_push_segment;
+ Segment* private_pop_segment;
+ char cache_line_padding[64];
+ };
+
+ class GlobalPool {
+ public:
+ GlobalPool() : top_(nullptr) {}
+
+ // Swaps contents, not thread safe.
+ void Swap(GlobalPool& other) {
+ Segment* temp = top_;
+ set_top(other.top_);
+ other.set_top(temp);
+ size_t other_size = other.size_.exchange(
+ size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ size_.store(other_size, std::memory_order_relaxed);
+ }
+
+ V8_INLINE void Push(Segment* segment) {
+ v8::base::MutexGuard guard(&lock_);
+ segment->set_next(top_);
+ set_top(segment);
+ size_.fetch_add(1, std::memory_order_relaxed);
+ }
+
+ V8_INLINE bool Pop(Segment** segment) {
+ v8::base::MutexGuard guard(&lock_);
+ if (top_) {
+ DCHECK_LT(0U, size_);
+ size_.fetch_sub(1, std::memory_order_relaxed);
+ *segment = top_;
+ set_top(top_->next());
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE bool IsEmpty() const {
+ return v8::base::AsAtomicPtr(&top_)->load(std::memory_order_relaxed) ==
+ nullptr;
+ }
+
+ V8_INLINE size_t Size() const {
+ // It is safe to read |size_| without a lock since this variable is
+ // atomic, keeping in mind that threads may not immediately see the new
+ // value when it is updated.
+ return size_.load(std::memory_order_relaxed);
+ }
+
+ void Clear() {
+ v8::base::MutexGuard guard(&lock_);
+ size_.store(0, std::memory_order_relaxed);
+ Segment* current = top_;
+ while (current) {
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ }
+ set_top(nullptr);
+ }
+
+ // See Worklist::Update.
+ template <typename Callback>
+ void Update(Callback callback) {
+ v8::base::MutexGuard guard(&lock_);
+ Segment* prev = nullptr;
+ Segment* current = top_;
+ while (current) {
+ current->Update(callback);
+ if (current->IsEmpty()) {
+ DCHECK_LT(0U, size_);
+ size_.fetch_sub(1, std::memory_order_relaxed);
+ if (!prev) {
+ top_ = current->next();
+ } else {
+ prev->set_next(current->next());
+ }
+ Segment* tmp = current;
+ current = current->next();
+ delete tmp;
+ } else {
+ prev = current;
+ current = current->next();
+ }
+ }
+ }
+
+ // See Worklist::Iterate.
+ template <typename Callback>
+ void Iterate(Callback callback) {
+ v8::base::MutexGuard guard(&lock_);
+ for (Segment* current = top_; current; current = current->next()) {
+ current->Iterate(callback);
+ }
+ }
+
+ void Merge(GlobalPool* other) {
+ Segment* top = nullptr;
+ size_t other_size = 0;
+ {
+ v8::base::MutexGuard guard(&other->lock_);
+ if (!other->top_) return;
+ top = other->top_;
+ other_size = other->size_.load(std::memory_order_relaxed);
+ other->size_.store(0, std::memory_order_relaxed);
+ other->set_top(nullptr);
+ }
+
+ // It's safe to iterate through these segments because the top was
+ // extracted from |other|.
+ Segment* end = top;
+ while (end->next()) end = end->next();
+
+ {
+ v8::base::MutexGuard guard(&lock_);
+ size_.fetch_add(other_size, std::memory_order_relaxed);
+ end->set_next(top_);
+ set_top(top);
+ }
+ }
+
+ void* operator new(size_t, void* location) = delete;
+ void* operator new(size_t) = delete;
+
+ private:
+ void set_top(Segment* segment) {
+ v8::base::AsAtomicPtr(&top_)->store(segment, std::memory_order_relaxed);
+ }
+
+ v8::base::Mutex lock_;
+ Segment* top_;
+ std::atomic<size_t> size_{0};
+ };
+
+ V8_INLINE Segment*& private_push_segment(int task_id) {
+ return private_segments_[task_id].private_push_segment;
+ }
+
+ V8_INLINE Segment* const& private_push_segment(int task_id) const {
+ return private_segments_[task_id].private_push_segment;
+ }
+
+ V8_INLINE Segment*& private_pop_segment(int task_id) {
+ return private_segments_[task_id].private_pop_segment;
+ }
+
+ V8_INLINE Segment* const& private_pop_segment(int task_id) const {
+ return private_segments_[task_id].private_pop_segment;
+ }
+
+ V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
+ if (!private_push_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_push_segment(task_id));
+ private_push_segment(task_id) = NewSegment();
+ }
+ }
+
+ V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
+ if (!private_pop_segment(task_id)->IsEmpty()) {
+ global_pool_.Push(private_pop_segment(task_id));
+ private_pop_segment(task_id) = NewSegment();
+ }
+ }
+
+ V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
+ if (global_pool_.IsEmpty()) return false;
+ Segment* new_segment = nullptr;
+ if (global_pool_.Pop(&new_segment)) {
+ delete private_pop_segment(task_id);
+ private_pop_segment(task_id) = new_segment;
+ return true;
+ }
+ return false;
+ }
+
+ V8_INLINE Segment* NewSegment() {
+ // Bottleneck for filtering in crash dumps.
+ return new Segment();
+ }
+
+ PrivateSegmentHolder private_segments_[kMaxNumTasks];
+ GlobalPool global_pool_;
+ int num_tasks_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_WORKLIST_H_